[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180620005735.219840-1-astrachan@google.com>
Date: Tue, 19 Jun 2018 17:57:34 -0700
From: Alistair Strachan <astrachan@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: Alistair Strachan <astrachan@...gle.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
"Arve Hjønnevåg" <arve@...roid.com>,
Todd Kjos <tkjos@...roid.com>,
Martijn Coenen <maco@...roid.com>, devel@...verdev.osuosl.org,
kernel-team@...roid.com, Joel Fernandes <joel@...lfernandes.org>
Subject: [PATCH 1/2] staging: android: ashmem: Remove use of unlikely()
There is no speed difference, and it makes the code harder to read.
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc: Arve Hjønnevåg <arve@...roid.com>
Cc: Todd Kjos <tkjos@...roid.com>
Cc: Martijn Coenen <maco@...roid.com>
Cc: devel@...verdev.osuosl.org
Cc: linux-kernel@...r.kernel.org
Cc: kernel-team@...roid.com
Cc: Joel Fernandes <joel@...lfernandes.org>
Suggested-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Signed-off-by: Alistair Strachan <astrachan@...gle.com>
---
drivers/staging/android/ashmem.c | 34 ++++++++++++++++----------------
1 file changed, 17 insertions(+), 17 deletions(-)
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index a1a0025b59e0..c6386e4f5c9b 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -178,7 +178,7 @@ static int range_alloc(struct ashmem_area *asma,
struct ashmem_range *range;
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
- if (unlikely(!range))
+ if (!range)
return -ENOMEM;
range->asma = asma;
@@ -246,11 +246,11 @@ static int ashmem_open(struct inode *inode, struct file *file)
int ret;
ret = generic_file_open(inode, file);
- if (unlikely(ret))
+ if (ret)
return ret;
asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
- if (unlikely(!asma))
+ if (!asma)
return -ENOMEM;
INIT_LIST_HEAD(&asma->unpinned_list);
@@ -361,14 +361,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&ashmem_mutex);
/* user needs to SET_SIZE before mapping */
- if (unlikely(!asma->size)) {
+ if (!asma->size) {
ret = -EINVAL;
goto out;
}
/* requested protection bits must match our allowed protection mask */
- if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
- calc_vm_prot_bits(PROT_MASK, 0))) {
+ if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
+ calc_vm_prot_bits(PROT_MASK, 0)) {
ret = -EPERM;
goto out;
}
@@ -486,7 +486,7 @@ static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
mutex_lock(&ashmem_mutex);
/* the user can only remove, not add, protection bits */
- if (unlikely((asma->prot_mask & prot) != prot)) {
+ if ((asma->prot_mask & prot) != prot) {
ret = -EINVAL;
goto out;
}
@@ -524,7 +524,7 @@ static int set_name(struct ashmem_area *asma, void __user *name)
local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
- if (unlikely(asma->file))
+ if (asma->file)
ret = -EINVAL;
else
strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
@@ -563,7 +563,7 @@ static int get_name(struct ashmem_area *asma, void __user *name)
* Now we are just copying from the stack variable to userland
* No lock held
*/
- if (unlikely(copy_to_user(name, local_name, len)))
+ if (copy_to_user(name, local_name, len))
ret = -EFAULT;
return ret;
}
@@ -701,25 +701,25 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
size_t pgstart, pgend;
int ret = -EINVAL;
- if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
+ if (copy_from_user(&pin, p, sizeof(pin)))
return -EFAULT;
mutex_lock(&ashmem_mutex);
- if (unlikely(!asma->file))
+ if (!asma->file)
goto out_unlock;
/* per custom, you can pass zero for len to mean "everything onward" */
if (!pin.len)
pin.len = PAGE_ALIGN(asma->size) - pin.offset;
- if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
+ if ((pin.offset | pin.len) & ~PAGE_MASK)
goto out_unlock;
- if (unlikely(((__u32)-1) - pin.offset < pin.len))
+ if (((__u32)-1) - pin.offset < pin.len)
goto out_unlock;
- if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
+ if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
goto out_unlock;
pgstart = pin.offset / PAGE_SIZE;
@@ -856,7 +856,7 @@ static int __init ashmem_init(void)
ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
sizeof(struct ashmem_area),
0, 0, NULL);
- if (unlikely(!ashmem_area_cachep)) {
+ if (!ashmem_area_cachep) {
pr_err("failed to create slab cache\n");
goto out;
}
@@ -864,13 +864,13 @@ static int __init ashmem_init(void)
ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
sizeof(struct ashmem_range),
0, 0, NULL);
- if (unlikely(!ashmem_range_cachep)) {
+ if (!ashmem_range_cachep) {
pr_err("failed to create slab cache\n");
goto out_free1;
}
ret = misc_register(&ashmem_misc);
- if (unlikely(ret)) {
+ if (ret) {
pr_err("failed to register misc device!\n");
goto out_free2;
}
Powered by blists - more mailing lists