summaryrefslogtreecommitdiffstats
path: root/mm/zsmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/zsmalloc.c')
-rw-r--r--mm/zsmalloc.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index f24b71568e83..9e13fd7ee635 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -399,7 +399,10 @@ static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
unsigned long *handle)
{
*handle = zs_malloc(pool, size, gfp);
- return *handle ? 0 : -1;
+
+ if (IS_ERR((void *)(*handle)))
+ return PTR_ERR((void *)*handle);
+ return 0;
}
static void zs_zpool_free(void *pool, unsigned long handle)
{
@@ -1400,7 +1403,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
* @gfp: gfp flags when allocating object
*
* On success, handle to the allocated object is returned,
- * otherwise 0.
+ * otherwise an ERR_PTR().
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
*/
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
@@ -1411,11 +1414,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
struct zspage *zspage;
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
- return 0;
+ return (unsigned long)ERR_PTR(-EINVAL);
handle = cache_alloc_handle(pool, gfp);
if (!handle)
- return 0;
+ return (unsigned long)ERR_PTR(-ENOMEM);
/* extra space in chunk to keep the handle */
size += ZS_HANDLE_SIZE;
@@ -1440,7 +1443,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
zspage = alloc_zspage(pool, class, gfp);
if (!zspage) {
cache_free_handle(pool, handle);
- return 0;
+ return (unsigned long)ERR_PTR(-ENOMEM);
}
spin_lock(&class->lock);