diff options
author | Florian Fainelli <florian@openwrt.org> | 2012-06-06 13:31:30 +0000 |
---|---|---|
committer | Florian Fainelli <florian@openwrt.org> | 2012-06-06 13:31:30 +0000 |
commit | 7b12e9e49c0665f23f16b0d0d11cb0227d7ce70d (patch) | |
tree | 70d43a297f8e8c949f38ae4413c0a12b533569ee /target/linux/generic | |
parent | 4112094dbbd780d32be7450f9d938d2e64250c16 (diff) | |
download | mtk-20170518-7b12e9e49c0665f23f16b0d0d11cb0227d7ce70d.zip mtk-20170518-7b12e9e49c0665f23f16b0d0d11cb0227d7ce70d.tar.gz mtk-20170518-7b12e9e49c0665f23f16b0d0d11cb0227d7ce70d.tar.bz2 |
backport spi message queue infrastructure
SVN-Revision: 32077
Diffstat (limited to 'target/linux/generic')
-rw-r--r-- | target/linux/generic/patches-3.3/047-spi_message_queue.patch | 603 |
1 files changed, 603 insertions, 0 deletions
diff --git a/target/linux/generic/patches-3.3/047-spi_message_queue.patch b/target/linux/generic/patches-3.3/047-spi_message_queue.patch new file mode 100644 index 0000000..9aff0be --- /dev/null +++ b/target/linux/generic/patches-3.3/047-spi_message_queue.patch @@ -0,0 +1,603 @@ +commit ffbbdd21329f3e15eeca6df2d4bc11c04d9d91c0 +Author: Linus Walleij <linus.walleij@linaro.org> +Date: Wed Feb 22 10:05:38 2012 +0100 + + spi: create a message queueing infrastructure + + This rips the message queue in the PL022 driver out and pushes + it into (optional) common infrastructure. Drivers that want to + use the message pumping thread will need to define the new + per-messags transfer methods and leave the deprecated transfer() + method as NULL. + + Most of the design is described in the documentation changes that + are included in this patch. + + Since there is a queue that need to be stopped when the system + is suspending/resuming, two new calls are implemented for the + device drivers to call in their suspend()/resume() functions: + spi_master_suspend() and spi_master_resume(). + + ChangeLog v1->v2: + - Remove Kconfig entry and do not make the queue support optional + at all, instead be more agressive and have it as part of the + compulsory infrastructure. + - If the .transfer() method is implemented, delete print a small + deprecation notice and do not start the transfer pump. + - Fix a bitrotted comment. + ChangeLog v2->v3: + - Fix up a problematic sequence courtesy of Chris Blair. + - Stop rather than destroy the queue on suspend() courtesy of + Chris Blair. + + Signed-off-by: Chris Blair <chris.blair@stericsson.com> + Signed-off-by: Linus Walleij <linus.walleij@linaro.org> + Tested-by: Mark Brown <broonie@opensource.wolfsonmicro.com> + Reviewed-by: Mark Brown <broonie@opensource.wolfsonmicro.com> + Signed-off-by: Grant Likely <grant.likely@secretlab.ca> + +[Florian: dropped the changes on drivers/spi/spi-pl022.c, removed +the dev_info() about unqueued drivers still using the master function] + +--- a/Documentation/spi/spi-summary ++++ b/Documentation/spi/spi-summary +@@ -1,7 +1,7 @@ + Overview of Linux kernel SPI support + ==================================== + +-21-May-2007 ++02-Feb-2012 + + What is SPI? + ------------ +@@ -483,9 +483,9 @@ also initialize its own internal state. + and those methods.) + + After you initialize the spi_master, then use spi_register_master() to +-publish it to the rest of the system. At that time, device nodes for +-the controller and any predeclared spi devices will be made available, +-and the driver model core will take care of binding them to drivers. ++publish it to the rest of the system. At that time, device nodes for the ++controller and any predeclared spi devices will be made available, and ++the driver model core will take care of binding them to drivers. + + If you need to remove your SPI controller driver, spi_unregister_master() + will reverse the effect of spi_register_master(). +@@ -521,21 +521,53 @@ SPI MASTER METHODS + ** When you code setup(), ASSUME that the controller + ** is actively processing transfers for another device. + +- master->transfer(struct spi_device *spi, struct spi_message *message) +- This must not sleep. Its responsibility is arrange that the +- transfer happens and its complete() callback is issued. The two +- will normally happen later, after other transfers complete, and +- if the controller is idle it will need to be kickstarted. +- + master->cleanup(struct spi_device *spi) + Your controller driver may use spi_device.controller_state to hold + state it dynamically associates with that device. If you do that, + be sure to provide the cleanup() method to free that state. + ++ master->prepare_transfer_hardware(struct spi_master *master) ++ This will be called by the queue mechanism to signal to the driver ++ that a message is coming in soon, so the subsystem requests the ++ driver to prepare the transfer hardware by issuing this call. ++ This may sleep. ++ ++ master->unprepare_transfer_hardware(struct spi_master *master) ++ This will be called by the queue mechanism to signal to the driver ++ that there are no more messages pending in the queue and it may ++ relax the hardware (e.g. by power management calls). This may sleep. ++ ++ master->transfer_one_message(struct spi_master *master, ++ struct spi_message *mesg) ++ The subsystem calls the driver to transfer a single message while ++ queuing transfers that arrive in the meantime. When the driver is ++ finished with this message, it must call ++ spi_finalize_current_message() so the subsystem can issue the next ++ transfer. This may sleep. ++ ++ DEPRECATED METHODS ++ ++ master->transfer(struct spi_device *spi, struct spi_message *message) ++ This must not sleep. Its responsibility is arrange that the ++ transfer happens and its complete() callback is issued. The two ++ will normally happen later, after other transfers complete, and ++ if the controller is idle it will need to be kickstarted. This ++ method is not used on queued controllers and must be NULL if ++ transfer_one_message() and (un)prepare_transfer_hardware() are ++ implemented. ++ + + SPI MESSAGE QUEUE + +-The bulk of the driver will be managing the I/O queue fed by transfer(). ++If you are happy with the standard queueing mechanism provided by the ++SPI subsystem, just implement the queued methods specified above. Using ++the message queue has the upside of centralizing a lot of code and ++providing pure process-context execution of methods. The message queue ++can also be elevated to realtime priority on high-priority SPI traffic. ++ ++Unless the queueing mechanism in the SPI subsystem is selected, the bulk ++of the driver will be managing the I/O queue fed by the now deprecated ++function transfer(). + + That queue could be purely conceptual. For example, a driver used only + for low-frequency sensor access might be fine using synchronous PIO. +@@ -561,4 +593,6 @@ Stephen Street + Mark Underwood + Andrew Victor + Vitaly Wool +- ++Grant Likely ++Mark Brown ++Linus Walleij +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -30,6 +30,9 @@ + #include <linux/of_spi.h> + #include <linux/pm_runtime.h> + #include <linux/export.h> ++#include <linux/sched.h> ++#include <linux/delay.h> ++#include <linux/kthread.h> + + static void spidev_release(struct device *dev) + { +@@ -507,6 +510,293 @@ spi_register_board_info(struct spi_board + + /*-------------------------------------------------------------------------*/ + ++/** ++ * spi_pump_messages - kthread work function which processes spi message queue ++ * @work: pointer to kthread work struct contained in the master struct ++ * ++ * This function checks if there is any spi message in the queue that ++ * needs processing and if so call out to the driver to initialize hardware ++ * and transfer each message. ++ * ++ */ ++static void spi_pump_messages(struct kthread_work *work) ++{ ++ struct spi_master *master = ++ container_of(work, struct spi_master, pump_messages); ++ unsigned long flags; ++ bool was_busy = false; ++ int ret; ++ ++ /* Lock queue and check for queue work */ ++ spin_lock_irqsave(&master->queue_lock, flags); ++ if (list_empty(&master->queue) || !master->running) { ++ if (master->busy) { ++ ret = master->unprepare_transfer_hardware(master); ++ if (ret) { ++ dev_err(&master->dev, ++ "failed to unprepare transfer hardware\n"); ++ return; ++ } ++ } ++ master->busy = false; ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ return; ++ } ++ ++ /* Make sure we are not already running a message */ ++ if (master->cur_msg) { ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ return; ++ } ++ /* Extract head of queue */ ++ master->cur_msg = ++ list_entry(master->queue.next, struct spi_message, queue); ++ ++ list_del_init(&master->cur_msg->queue); ++ if (master->busy) ++ was_busy = true; ++ else ++ master->busy = true; ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ ++ if (!was_busy) { ++ ret = master->prepare_transfer_hardware(master); ++ if (ret) { ++ dev_err(&master->dev, ++ "failed to prepare transfer hardware\n"); ++ return; ++ } ++ } ++ ++ ret = master->transfer_one_message(master, master->cur_msg); ++ if (ret) { ++ dev_err(&master->dev, ++ "failed to transfer one message from queue\n"); ++ return; ++ } ++} ++ ++static int spi_init_queue(struct spi_master *master) ++{ ++ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; ++ ++ INIT_LIST_HEAD(&master->queue); ++ spin_lock_init(&master->queue_lock); ++ ++ master->running = false; ++ master->busy = false; ++ ++ init_kthread_worker(&master->kworker); ++ master->kworker_task = kthread_run(kthread_worker_fn, ++ &master->kworker, ++ dev_name(&master->dev)); ++ if (IS_ERR(master->kworker_task)) { ++ dev_err(&master->dev, "failed to create message pump task\n"); ++ return -ENOMEM; ++ } ++ init_kthread_work(&master->pump_messages, spi_pump_messages); ++ ++ /* ++ * Master config will indicate if this controller should run the ++ * message pump with high (realtime) priority to reduce the transfer ++ * latency on the bus by minimising the delay between a transfer ++ * request and the scheduling of the message pump thread. Without this ++ * setting the message pump thread will remain at default priority. ++ */ ++ if (master->rt) { ++ dev_info(&master->dev, ++ "will run message pump with realtime priority\n"); ++ sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); ++ } ++ ++ return 0; ++} ++ ++/** ++ * spi_get_next_queued_message() - called by driver to check for queued ++ * messages ++ * @master: the master to check for queued messages ++ * ++ * If there are more messages in the queue, the next message is returned from ++ * this call. ++ */ ++struct spi_message *spi_get_next_queued_message(struct spi_master *master) ++{ ++ struct spi_message *next; ++ unsigned long flags; ++ ++ /* get a pointer to the next message, if any */ ++ spin_lock_irqsave(&master->queue_lock, flags); ++ if (list_empty(&master->queue)) ++ next = NULL; ++ else ++ next = list_entry(master->queue.next, ++ struct spi_message, queue); ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ ++ return next; ++} ++EXPORT_SYMBOL_GPL(spi_get_next_queued_message); ++ ++/** ++ * spi_finalize_current_message() - the current message is complete ++ * @master: the master to return the message to ++ * ++ * Called by the driver to notify the core that the message in the front of the ++ * queue is complete and can be removed from the queue. ++ */ ++void spi_finalize_current_message(struct spi_master *master) ++{ ++ struct spi_message *mesg; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&master->queue_lock, flags); ++ mesg = master->cur_msg; ++ master->cur_msg = NULL; ++ ++ queue_kthread_work(&master->kworker, &master->pump_messages); ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ ++ mesg->state = NULL; ++ if (mesg->complete) ++ mesg->complete(mesg->context); ++} ++EXPORT_SYMBOL_GPL(spi_finalize_current_message); ++ ++static int spi_start_queue(struct spi_master *master) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&master->queue_lock, flags); ++ ++ if (master->running || master->busy) { ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ return -EBUSY; ++ } ++ ++ master->running = true; ++ master->cur_msg = NULL; ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ ++ queue_kthread_work(&master->kworker, &master->pump_messages); ++ ++ return 0; ++} ++ ++static int spi_stop_queue(struct spi_master *master) ++{ ++ unsigned long flags; ++ unsigned limit = 500; ++ int ret = 0; ++ ++ spin_lock_irqsave(&master->queue_lock, flags); ++ ++ /* ++ * This is a bit lame, but is optimized for the common execution path. ++ * A wait_queue on the master->busy could be used, but then the common ++ * execution path (pump_messages) would be required to call wake_up or ++ * friends on every SPI message. Do this instead. ++ */ ++ while ((!list_empty(&master->queue) || master->busy) && limit--) { ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ msleep(10); ++ spin_lock_irqsave(&master->queue_lock, flags); ++ } ++ ++ if (!list_empty(&master->queue) || master->busy) ++ ret = -EBUSY; ++ else ++ master->running = false; ++ ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ ++ if (ret) { ++ dev_warn(&master->dev, ++ "could not stop message queue\n"); ++ return ret; ++ } ++ return ret; ++} ++ ++static int spi_destroy_queue(struct spi_master *master) ++{ ++ int ret; ++ ++ ret = spi_stop_queue(master); ++ ++ /* ++ * flush_kthread_worker will block until all work is done. ++ * If the reason that stop_queue timed out is that the work will never ++ * finish, then it does no good to call flush/stop thread, so ++ * return anyway. ++ */ ++ if (ret) { ++ dev_err(&master->dev, "problem destroying queue\n"); ++ return ret; ++ } ++ ++ flush_kthread_worker(&master->kworker); ++ kthread_stop(master->kworker_task); ++ ++ return 0; ++} ++ ++/** ++ * spi_queued_transfer - transfer function for queued transfers ++ * @spi: spi device which is requesting transfer ++ * @msg: spi message which is to handled is queued to driver queue ++ */ ++static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) ++{ ++ struct spi_master *master = spi->master; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&master->queue_lock, flags); ++ ++ if (!master->running) { ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ return -ESHUTDOWN; ++ } ++ msg->actual_length = 0; ++ msg->status = -EINPROGRESS; ++ ++ list_add_tail(&msg->queue, &master->queue); ++ if (master->running && !master->busy) ++ queue_kthread_work(&master->kworker, &master->pump_messages); ++ ++ spin_unlock_irqrestore(&master->queue_lock, flags); ++ return 0; ++} ++ ++static int spi_master_initialize_queue(struct spi_master *master) ++{ ++ int ret; ++ ++ master->queued = true; ++ master->transfer = spi_queued_transfer; ++ ++ /* Initialize and start queue */ ++ ret = spi_init_queue(master); ++ if (ret) { ++ dev_err(&master->dev, "problem initializing queue\n"); ++ goto err_init_queue; ++ } ++ ret = spi_start_queue(master); ++ if (ret) { ++ dev_err(&master->dev, "problem starting queue\n"); ++ goto err_start_queue; ++ } ++ ++ return 0; ++ ++err_start_queue: ++err_init_queue: ++ spi_destroy_queue(master); ++ return ret; ++} ++ ++/*-------------------------------------------------------------------------*/ ++ + static void spi_master_release(struct device *dev) + { + struct spi_master *master; +@@ -522,6 +812,7 @@ static struct class spi_master_class = { + }; + + ++ + /** + * spi_alloc_master - allocate SPI master controller + * @dev: the controller, possibly using the platform_bus +@@ -621,6 +912,15 @@ int spi_register_master(struct spi_maste + dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), + dynamic ? " (dynamic)" : ""); + ++ /* If we're using a queued driver, start the queue */ ++ if (!master->transfer) { ++ status = spi_master_initialize_queue(master); ++ if (status) { ++ device_unregister(&master->dev); ++ goto done; ++ } ++ } ++ + mutex_lock(&board_lock); + list_add_tail(&master->list, &spi_master_list); + list_for_each_entry(bi, &board_list, list) +@@ -636,7 +936,6 @@ done: + } + EXPORT_SYMBOL_GPL(spi_register_master); + +- + static int __unregister(struct device *dev, void *null) + { + spi_unregister_device(to_spi_device(dev)); +@@ -657,6 +956,11 @@ void spi_unregister_master(struct spi_ma + { + int dummy; + ++ if (master->queued) { ++ if (spi_destroy_queue(master)) ++ dev_err(&master->dev, "queue remove failed\n"); ++ } ++ + mutex_lock(&board_lock); + list_del(&master->list); + mutex_unlock(&board_lock); +@@ -666,6 +970,37 @@ void spi_unregister_master(struct spi_ma + } + EXPORT_SYMBOL_GPL(spi_unregister_master); + ++int spi_master_suspend(struct spi_master *master) ++{ ++ int ret; ++ ++ /* Basically no-ops for non-queued masters */ ++ if (!master->queued) ++ return 0; ++ ++ ret = spi_stop_queue(master); ++ if (ret) ++ dev_err(&master->dev, "queue stop failed\n"); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(spi_master_suspend); ++ ++int spi_master_resume(struct spi_master *master) ++{ ++ int ret; ++ ++ if (!master->queued) ++ return 0; ++ ++ ret = spi_start_queue(master); ++ if (ret) ++ dev_err(&master->dev, "queue restart failed\n"); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(spi_master_resume); ++ + static int __spi_master_match(struct device *dev, void *data) + { + struct spi_master *m; +--- a/include/linux/spi/spi.h ++++ b/include/linux/spi/spi.h +@@ -22,6 +22,7 @@ + #include <linux/device.h> + #include <linux/mod_devicetable.h> + #include <linux/slab.h> ++#include <linux/kthread.h> + + /* + * INTERFACES between SPI master-side drivers and SPI infrastructure. +@@ -235,6 +236,27 @@ static inline void spi_unregister_driver + * the device whose settings are being modified. + * @transfer: adds a message to the controller's transfer queue. + * @cleanup: frees controller-specific state ++ * @queued: whether this master is providing an internal message queue ++ * @kworker: thread struct for message pump ++ * @kworker_task: pointer to task for message pump kworker thread ++ * @pump_messages: work struct for scheduling work to the message pump ++ * @queue_lock: spinlock to syncronise access to message queue ++ * @queue: message queue ++ * @cur_msg: the currently in-flight message ++ * @busy: message pump is busy ++ * @running: message pump is running ++ * @rt: whether this queue is set to run as a realtime task ++ * @prepare_transfer_hardware: a message will soon arrive from the queue ++ * so the subsystem requests the driver to prepare the transfer hardware ++ * by issuing this call ++ * @transfer_one_message: the subsystem calls the driver to transfer a single ++ * message while queuing transfers that arrive in the meantime. When the ++ * driver is finished with this message, it must call ++ * spi_finalize_current_message() so the subsystem can issue the next ++ * transfer ++ * @prepare_transfer_hardware: there are currently no more messages on the ++ * queue so the subsystem notifies the driver that it may relax the ++ * hardware by issuing this call + * + * Each SPI master controller can communicate with one or more @spi_device + * children. These make a small bus, sharing MOSI, MISO and SCK signals +@@ -318,6 +340,28 @@ struct spi_master { + + /* called on release() to free memory provided by spi_master */ + void (*cleanup)(struct spi_device *spi); ++ ++ /* ++ * These hooks are for drivers that want to use the generic ++ * master transfer queueing mechanism. If these are used, the ++ * transfer() function above must NOT be specified by the driver. ++ * Over time we expect SPI drivers to be phased over to this API. ++ */ ++ bool queued; ++ struct kthread_worker kworker; ++ struct task_struct *kworker_task; ++ struct kthread_work pump_messages; ++ spinlock_t queue_lock; ++ struct list_head queue; ++ struct spi_message *cur_msg; ++ bool busy; ++ bool running; ++ bool rt; ++ ++ int (*prepare_transfer_hardware)(struct spi_master *master); ++ int (*transfer_one_message)(struct spi_master *master, ++ struct spi_message *mesg); ++ int (*unprepare_transfer_hardware)(struct spi_master *master); + }; + + static inline void *spi_master_get_devdata(struct spi_master *master) +@@ -343,6 +387,13 @@ static inline void spi_master_put(struct + put_device(&master->dev); + } + ++/* PM calls that need to be issued by the driver */ ++extern int spi_master_suspend(struct spi_master *master); ++extern int spi_master_resume(struct spi_master *master); ++ ++/* Calls the driver make to interact with the message queue */ ++extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); ++extern void spi_finalize_current_message(struct spi_master *master); + + /* the spi driver core manages memory for the spi_master classdev */ + extern struct spi_master * |