From 59848d6aded59a644bd3199033a9dc5a66d528f5 Mon Sep 17 00:00:00 2001 From: Alistair Strachan Date: Tue, 19 Jun 2018 17:57:34 -0700 Subject: staging: android: ashmem: Remove use of unlikely() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no speed difference, and it makes the code harder to read. Cc: Greg Kroah-Hartman Cc: Arve Hjønnevåg Cc: Todd Kjos Cc: Martijn Coenen Cc: devel@driverdev.osuosl.org Cc: linux-kernel@vger.kernel.org Cc: kernel-team@android.com Cc: Joel Fernandes Suggested-by: Greg Kroah-Hartman Signed-off-by: Alistair Strachan Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/ashmem.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers/staging/android') diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index a1a0025b59e0..c6386e4f5c9b 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -178,7 +178,7 @@ static int range_alloc(struct ashmem_area *asma, struct ashmem_range *range; range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); - if (unlikely(!range)) + if (!range) return -ENOMEM; range->asma = asma; @@ -246,11 +246,11 @@ static int ashmem_open(struct inode *inode, struct file *file) int ret; ret = generic_file_open(inode, file); - if (unlikely(ret)) + if (ret) return ret; asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); - if (unlikely(!asma)) + if (!asma) return -ENOMEM; INIT_LIST_HEAD(&asma->unpinned_list); @@ -361,14 +361,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) mutex_lock(&ashmem_mutex); /* user needs to SET_SIZE before mapping */ - if (unlikely(!asma->size)) { + if (!asma->size) { ret = -EINVAL; goto out; } /* requested protection bits must match our allowed protection mask */ - if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & - calc_vm_prot_bits(PROT_MASK, 0))) { + if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & + calc_vm_prot_bits(PROT_MASK, 0)) { ret = -EPERM; goto out; } @@ -486,7 +486,7 @@ static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) mutex_lock(&ashmem_mutex); /* the user can only remove, not add, protection bits */ - if (unlikely((asma->prot_mask & prot) != prot)) { + if ((asma->prot_mask & prot) != prot) { ret = -EINVAL; goto out; } @@ -524,7 +524,7 @@ static int set_name(struct ashmem_area *asma, void __user *name) local_name[ASHMEM_NAME_LEN - 1] = '\0'; mutex_lock(&ashmem_mutex); /* cannot change an existing mapping's name */ - if (unlikely(asma->file)) + if (asma->file) ret = -EINVAL; else strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); @@ -563,7 +563,7 @@ static int get_name(struct ashmem_area *asma, void __user *name) * Now we are just copying from the stack variable to userland * No lock held */ - if (unlikely(copy_to_user(name, local_name, len))) + if (copy_to_user(name, local_name, len)) ret = -EFAULT; return ret; } @@ -701,25 +701,25 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, size_t pgstart, pgend; int ret = -EINVAL; - if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) + if (copy_from_user(&pin, p, sizeof(pin))) return -EFAULT; mutex_lock(&ashmem_mutex); - if (unlikely(!asma->file)) + if (!asma->file) goto out_unlock; /* per custom, you can pass zero for len to mean "everything onward" */ if (!pin.len) pin.len = PAGE_ALIGN(asma->size) - pin.offset; - if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) + if ((pin.offset | pin.len) & ~PAGE_MASK) goto out_unlock; - if (unlikely(((__u32)-1) - pin.offset < pin.len)) + if (((__u32)-1) - pin.offset < pin.len) goto out_unlock; - if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) + if (PAGE_ALIGN(asma->size) < pin.offset + pin.len) goto out_unlock; pgstart = pin.offset / PAGE_SIZE; @@ -856,7 +856,7 @@ static int __init ashmem_init(void) ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", sizeof(struct ashmem_area), 0, 0, NULL); - if (unlikely(!ashmem_area_cachep)) { + if (!ashmem_area_cachep) { pr_err("failed to create slab cache\n"); goto out; } @@ -864,13 +864,13 @@ static int __init ashmem_init(void) ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", sizeof(struct ashmem_range), 0, 0, NULL); - if (unlikely(!ashmem_range_cachep)) { + if (!ashmem_range_cachep) { pr_err("failed to create slab cache\n"); goto out_free1; } ret = misc_register(&ashmem_misc); - if (unlikely(ret)) { + if (ret) { pr_err("failed to register misc device!\n"); goto out_free2; } -- cgit v1.2.3 From 8632c614565d0c5fdde527889601c018e97b6384 Mon Sep 17 00:00:00 2001 From: Alistair Strachan Date: Tue, 19 Jun 2018 17:57:35 -0700 Subject: staging: android: ashmem: Fix mmap size validation The ashmem driver did not check that the size/offset of the vma passed to its .mmap() function was not larger than the ashmem object being mapped. This could cause mmap() to succeed, even though accessing parts of the mapping would later fail with a segmentation fault. Ensure an error is returned by the ashmem_mmap() function if the vma size is larger than the ashmem object size. This enables safer handling of the problem in userspace. Cc: Todd Kjos Cc: devel@driverdev.osuosl.org Cc: linux-kernel@vger.kernel.org Cc: kernel-team@android.com Cc: Joel Fernandes Signed-off-by: Alistair Strachan Acked-by: Joel Fernandes (Google) Reviewed-by: Martijn Coenen Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/ashmem.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/staging/android') diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index c6386e4f5c9b..e392358ec244 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -366,6 +366,12 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) goto out; } + /* requested mapping size larger than object size */ + if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) { + ret = -EINVAL; + goto out; + } + /* requested protection bits must match our allowed protection mask */ if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & calc_vm_prot_bits(PROT_MASK, 0)) { -- cgit v1.2.3 From 10c553b154c725e27da2f09e45692222f97beb6e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Jun 2018 17:09:09 +0200 Subject: staging: android/vsoc: stop using 'timespec' The timespec structure suffers from the y2038 overflow and should not be used. This changes handle_vsoc_cond_wait() to use ktime_t directly. Signed-off-by: Arnd Bergmann Reviewed-by: Martijn Coenen Tested-by: Alistair Strachan Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/vsoc.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/staging/android') diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c index 806beda1040b..22571abcaa4e 100644 --- a/drivers/staging/android/vsoc.c +++ b/drivers/staging/android/vsoc.c @@ -405,7 +405,7 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) int ret = 0; struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); atomic_t *address = NULL; - struct timespec ts; + ktime_t wake_time; /* Ensure that the offset is aligned */ if (arg->offset & (sizeof(uint32_t) - 1)) @@ -433,14 +433,13 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) * We do things this way to flatten differences between 32 bit * and 64 bit timespecs. */ - ts.tv_sec = arg->wake_time_sec; - ts.tv_nsec = arg->wake_time_nsec; - - if (!timespec_valid(&ts)) + if (arg->wake_time_nsec >= NSEC_PER_SEC) return -EINVAL; + wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec); + hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts), + hrtimer_set_expires_range_ns(&to->timer, wake_time, current->timer_slack_ns); hrtimer_init_sleeper(to, current); -- cgit v1.2.3 From 173a4906aebe14fccc6cd338efda618fd69d56de Mon Sep 17 00:00:00 2001 From: Tobias Lindskog Date: Fri, 6 Jul 2018 14:44:16 -0700 Subject: staging: android: ashmem: Shrink directly through shmem_fallocate When ashmem_shrink is called from direct reclaim on a user thread, a call to do_fallocate will check for permissions against the security policy of that user thread. It can thus fail by chance if called on a thread that isn't permitted to modify the relevant ashmem areas. Because we know that we have a shmem file underneath, call the shmem implementation of fallocate directly instead of going through the user-space interface for fallocate. Signed-off-by: Tobias Lindskog Signed-off-by: Jeff Vander Stoep Signed-off-by: Joel Fernandes (Google) Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/ashmem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/staging/android') diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index e392358ec244..b73cc1e089a3 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -450,9 +450,9 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) loff_t start = range->pgstart * PAGE_SIZE; loff_t end = (range->pgend + 1) * PAGE_SIZE; - vfs_fallocate(range->asma->file, - FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, - start, end - start); + range->asma->file->f_op->fallocate(range->asma->file, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + start, end - start); range->purged = ASHMEM_WAS_PURGED; lru_del(range); -- cgit v1.2.3