summaryrefslogtreecommitdiffstats
path: root/drivers/media/IR/ir-raw-event.c
diff options
context:
space:
mode:
authorMaxim Levitsky <maximlevitsky@gmail.com>2010-07-31 11:59:17 -0300
committerMauro Carvalho Chehab <mchehab@redhat.com>2010-08-08 23:42:59 -0300
commit0d2cb1de8e81ffc06df67853be5ead3556d3a6b5 (patch)
treeb2fbf6a996da521592fe654c8bd1e9272992b260 /drivers/media/IR/ir-raw-event.c
parent45a568fa6f6bf8e5b9c32e52292f297e8473a985 (diff)
downloadlinux-0d2cb1de8e81ffc06df67853be5ead3556d3a6b5.tar.bz2
V4L/DVB: IR: replace workqueue with kthread
It is perfectly possible to have ir_raw_event_work running concurently on two cpus, thus we must protect it from that situation. This stems from the fact that if hardware sends short packets of samples we might end up queueing the work item more times that nessesary. Such job isn't well suited for a workqueue, so use a kernel thread. Signed-off-by: Maxim Levitsky <maximlevitsky@gmail.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media/IR/ir-raw-event.c')
-rw-r--r--drivers/media/IR/ir-raw-event.c42
1 files changed, 31 insertions, 11 deletions
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
index 9d5c029cfc0c..d0c18db4c0d3 100644
--- a/drivers/media/IR/ir-raw-event.c
+++ b/drivers/media/IR/ir-raw-event.c
@@ -12,9 +12,10 @@
* GNU General Public License for more details.
*/
-#include <linux/workqueue.h>
+#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/sched.h>
+#include <linux/freezer.h>
#include "ir-core-priv.h"
/* Define the max number of pulse/space transitions to buffer */
@@ -33,20 +34,30 @@ static u64 available_protocols;
static struct work_struct wq_load;
#endif
-static void ir_raw_event_work(struct work_struct *work)
+static int ir_raw_event_thread(void *data)
{
struct ir_raw_event ev;
struct ir_raw_handler *handler;
- struct ir_raw_event_ctrl *raw =
- container_of(work, struct ir_raw_event_ctrl, rx_work);
+ struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
+
+ while (!kthread_should_stop()) {
+ try_to_freeze();
- while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
mutex_lock(&ir_raw_handler_lock);
- list_for_each_entry(handler, &ir_raw_handler_list, list)
- handler->decode(raw->input_dev, ev);
+
+ while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
+ list_for_each_entry(handler, &ir_raw_handler_list, list)
+ handler->decode(raw->input_dev, ev);
+ raw->prev_ev = ev;
+ }
+
mutex_unlock(&ir_raw_handler_lock);
- raw->prev_ev = ev;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
}
+
+ return 0;
}
/**
@@ -141,7 +152,7 @@ void ir_raw_event_handle(struct input_dev *input_dev)
if (!ir->raw)
return;
- schedule_work(&ir->raw->rx_work);
+ wake_up_process(ir->raw->thread);
}
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
@@ -170,7 +181,7 @@ int ir_raw_event_register(struct input_dev *input_dev)
return -ENOMEM;
ir->raw->input_dev = input_dev;
- INIT_WORK(&ir->raw->rx_work, ir_raw_event_work);
+
ir->raw->enabled_protocols = ~0;
rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE,
GFP_KERNEL);
@@ -180,6 +191,15 @@ int ir_raw_event_register(struct input_dev *input_dev)
return rc;
}
+ ir->raw->thread = kthread_run(ir_raw_event_thread, ir->raw,
+ "rc%u", (unsigned int)ir->devno);
+
+ if (IS_ERR(ir->raw->thread)) {
+ kfree(ir->raw);
+ ir->raw = NULL;
+ return PTR_ERR(ir->raw->thread);
+ }
+
mutex_lock(&ir_raw_handler_lock);
list_add_tail(&ir->raw->list, &ir_raw_client_list);
list_for_each_entry(handler, &ir_raw_handler_list, list)
@@ -198,7 +218,7 @@ void ir_raw_event_unregister(struct input_dev *input_dev)
if (!ir->raw)
return;
- cancel_work_sync(&ir->raw->rx_work);
+ kthread_stop(ir->raw->thread);
mutex_lock(&ir_raw_handler_lock);
list_del(&ir->raw->list);