summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTony Battersby <tonyb@cybernetics.com>2009-03-31 15:24:13 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-01 08:59:19 -0700
commitabff55cee1039b5a3b96f7a5eb6e65b9f247a274 (patch)
treedfbcc71256c2129d2e0ee3e077461103338b9591 /fs
parentbb57c3edcd2fc51d95914c39448f36e43af9d6af (diff)
downloadlinux-abff55cee1039b5a3b96f7a5eb6e65b9f247a274.tar.bz2
epoll: don't use current in irq context
ep_call_nested() (formerly ep_poll_safewake()) uses "current" (without dereferencing it) to detect callback recursion, but it may be called from irq context where the use of current is generally discouraged. It would be better to use get_cpu() and put_cpu() to detect the callback recursion. Signed-off-by: Tony Battersby <tonyb@cybernetics.com> Acked-by: Davide Libenzi <davidel@xmailserver.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/eventpoll.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 205a1e1c77c7..db4365f8a75c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -97,8 +97,8 @@ struct epoll_filefd {
*/
struct nested_call_node {
struct list_head llink;
- struct task_struct *task;
void *cookie;
+ int cpu;
};
/*
@@ -327,7 +327,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
{
int error, call_nests = 0;
unsigned long flags;
- struct task_struct *this_task = current;
+ int this_cpu = get_cpu();
struct list_head *lsthead = &ncalls->tasks_call_list;
struct nested_call_node *tncur;
struct nested_call_node tnode;
@@ -340,20 +340,19 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
* very much limited.
*/
list_for_each_entry(tncur, lsthead, llink) {
- if (tncur->task == this_task &&
+ if (tncur->cpu == this_cpu &&
(tncur->cookie == cookie || ++call_nests > max_nests)) {
/*
* Ops ... loop detected or maximum nest level reached.
* We abort this wake by breaking the cycle itself.
*/
- spin_unlock_irqrestore(&ncalls->lock, flags);
-
- return -1;
+ error = -1;
+ goto out_unlock;
}
}
/* Add the current task and cookie to the list */
- tnode.task = this_task;
+ tnode.cpu = this_cpu;
tnode.cookie = cookie;
list_add(&tnode.llink, lsthead);
@@ -365,8 +364,10 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
/* Remove the current task from the list */
spin_lock_irqsave(&ncalls->lock, flags);
list_del(&tnode.llink);
+ out_unlock:
spin_unlock_irqrestore(&ncalls->lock, flags);
+ put_cpu();
return error;
}