diff options
author | Arnd Bergmann <arnd.bergmann@de.ibm.com> | 2007-12-20 16:39:59 +0900 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-12-21 19:46:19 +1100 |
commit | 33bfd7a73861c3727482c6c1c1c2ef40054060b7 (patch) | |
tree | 97b9ceaa32dfe08a39049fa27eda4220cc5b8cb7 /arch | |
parent | 7cd58e43810852eeb7af5a0c803f3890bd08b581 (diff) | |
download | linux-33bfd7a73861c3727482c6c1c1c2ef40054060b7.tar.bz2 |
[POWERPC] spufs: block fault handlers in spu_acquire_runnable
This change disables the logic that faults-in spu contexts under the
covers from the page fault handler. When a fault requires a runnable
context, the handler will block until the context is scheduled by
other means.
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 25 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 1 |
4 files changed, 21 insertions, 7 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 9cb081c26e71..6fa24d38706e 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -52,6 +52,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) init_waitqueue_head(&ctx->wbox_wq); init_waitqueue_head(&ctx->stop_wq); init_waitqueue_head(&ctx->mfc_wq); + init_waitqueue_head(&ctx->run_wq); ctx->state = SPU_STATE_SAVED; ctx->ops = &spu_backing_ops; ctx->owner = get_task_mm(current); diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index d9e56a503795..714972621220 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -236,21 +236,31 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, { struct spu_context *ctx = vma->vm_file->private_data; unsigned long area, offset = address - vma->vm_start; - int ret; offset += vma->vm_pgoff << PAGE_SHIFT; if (offset >= ps_size) return NOPFN_SIGBUS; - /* error here usually means a signal.. we might want to test - * the error code more precisely though + /* + * We have to wait for context to be loaded before we have + * pages to hand out to the user, but we don't want to wait + * with the mmap_sem held. + * It is possible to drop the mmap_sem here, but then we need + * to return NOPFN_REFAULT because the mappings may have + * hanged. */ - ret = spu_acquire_runnable(ctx, 0); - if (ret) - return NOPFN_REFAULT; + spu_acquire(ctx); + if (ctx->state == SPU_STATE_SAVED) { + up_read(¤t->mm->mmap_sem); + spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); + down_read(¤t->mm->mmap_sem); + goto out; + } area = ctx->spu->problem_phys + ps_offs; vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); + +out: spu_release(ctx); return NOPFN_REFAULT; @@ -1505,7 +1515,8 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, if (ret) goto out; - ret = spu_acquire_runnable(ctx, 0); + spu_acquire(ctx); + ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); if (ret) goto out; diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index ee80de07c0bc..52215aa2f3c6 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -682,6 +682,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) spu_bind_context(spu, ctx); cbe_spu_info[node].nr_active++; mutex_unlock(&cbe_spu_info[node].list_mutex); + wake_up_all(&ctx->run_wq); return 0; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index cce50f317c78..fcab1504f117 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -71,6 +71,7 @@ struct spu_context { wait_queue_head_t wbox_wq; wait_queue_head_t stop_wq; wait_queue_head_t mfc_wq; + wait_queue_head_t run_wq; struct fasync_struct *ibox_fasync; struct fasync_struct *wbox_fasync; struct fasync_struct *mfc_fasync; |