lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 02 Jan 2017 17:06:29 +0900
From:   Jaewon Kim <jaewon31.kim@...sung.com>
To:     Michal Nazarewicz <mina86@...a86.com>,
        Michal Hocko <mhocko@...nel.org>
Cc:     gregkh@...uxfoundation.org, akpm@...ux-foundation.org,
        labbott@...hat.com, m.szyprowski@...sung.com, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, jaewon31.kim@...il.com
Subject: Re: [PATCH] mm: cma: print allocation failure reason and bitmap status



On 2017년 01월 02일 15:46, Michal Nazarewicz wrote:
> On Mon, Jan 02 2017, Jaewon Kim wrote:
>> There are many reasons of CMA allocation failure such as EBUSY, ENOMEM, EINTR.
>> But we did not know error reason so far. This patch prints the error value.
>>
>> Additionally if CONFIG_CMA_DEBUG is enabled, this patch shows bitmap status to
>> know available pages. Actually CMA internally tries on all available regions
>> because some regions can be failed because of EBUSY. Bitmap status is useful to
>> know in detail on both ENONEM and EBUSY;
>>  ENOMEM: not tried at all because of no available region
>>          it could be too small total region or could be fragmentation issue
>>  EBUSY:  tried some region but all failed
>>
>> This is an ENOMEM example with this patch.
>> [   12.415458]  [2:   Binder:714_1:  744] cma: cma_alloc: alloc failed, req-size: 256 pages, ret: -12
>> If CONFIG_CMA_DEBUG is enabled, avabile pages also will be shown as concatenated
>> size@...ition format. So 4@572 means that there are 4 available pages at 572
>> position starting from 0 position.
>> [   12.415503]  [2:   Binder:714_1:  744] cma: number of available pages: 4@...+7@...+7@...+8@...+38@...+166@...4+127@...1=> 357 free of 2048 total pages
>>
>> Signed-off-by: Jaewon Kim <jaewon31.kim@...sung.com>
>> Acked-by: Michal Nazarewicz <mina86@...a86.com>
>> ---
>>  mm/cma.c | 34 +++++++++++++++++++++++++++++++++-
>>  1 file changed, 33 insertions(+), 1 deletion(-)
>>
>> diff --git a/mm/cma.c b/mm/cma.c
>> index c960459..9e037541 100644
>> --- a/mm/cma.c
>> +++ b/mm/cma.c
>> @@ -353,6 +353,32 @@ int __init cma_declare_contiguous(phys_addr_t base,
>>      return ret;
>>  }
>>  
>> +#ifdef CONFIG_CMA_DEBUG
>> +static void debug_show_cma_areas(struct cma *cma)
> Make it ‘cma_debug_show_areas’.  All other functions have ‘cma’ as
> prefix so that’s more consistent.
OK no problem.
>
>> +{
>> +    unsigned long next_zero_bit, next_set_bit;
>> +    unsigned long start = 0;
>> +    unsigned int nr_zero, nr_total = 0;
>> +
>> +    mutex_lock(&cma->lock);
>> +    pr_info("number of available pages: ");
>> +    for (;;) {
>> +        next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
>> +        if (next_zero_bit >= cma->count)
>> +            break;
>> +        next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
>> +        nr_zero = next_set_bit - next_zero_bit;
>> +        pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
>> +        nr_total += nr_zero;
>> +        start = next_zero_bit + nr_zero;
>> +    }
>> +    pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
>> +    mutex_unlock(&cma->lock);
>> +}
>> +#else
>> +static inline void debug_show_cma_areas(struct cma *cma) { }
>> +#endif
>> +
>>  /**
>>   * cma_alloc() - allocate pages from contiguous area
>>   * @cma:   Contiguous memory region for which the allocation is performed.
>> @@ -369,7 +395,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
>>      unsigned long start = 0;
>>      unsigned long bitmap_maxno, bitmap_no, bitmap_count;
>>      struct page *page = NULL;
>> -    int ret;
>> +    int ret = -ENOMEM;
>>  
>>      if (!cma || !cma->count)
>>          return NULL;
>> @@ -426,6 +452,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
>>  
>>      trace_cma_alloc(pfn, page, count, align);
>>  
>> +    if (ret) {
>> +        pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
>> +            __func__, count, ret);
>> +        debug_show_cma_areas(cma);
>> +    }
>> +
>>      pr_debug("%s(): returned %p\n", __func__, page);
>>      return page;
>>  }
>> -- 
>>

Added the latest.

>From ec724faf7ace5e690715e23e82f17f27c697b6b1 Mon Sep 17 00:00:00 2001
From: Jaewon Kim <jaewon31.kim@...sung.com>
Date: Thu, 29 Dec 2016 11:00:16 +0900
Subject: [PATCH] mm: cma: print allocation failure reason and bitmap status

There are many reasons of CMA allocation failure such as EBUSY, ENOMEM, EINTR.
But we did not know error reason so far. This patch prints the error value.

Additionally if CONFIG_CMA_DEBUG is enabled, this patch shows bitmap status to
know available pages. Actually CMA internally tries on all available regions
because some regions can be failed because of EBUSY. Bitmap status is useful to
know in detail on both ENONEM and EBUSY;
 ENOMEM: not tried at all because of no available region
         it could be too small total region or could be fragmentation issue
 EBUSY:  tried some region but all failed

This is an ENOMEM example with this patch.
[   12.415458]  [2:   Binder:714_1:  744] cma: cma_alloc: alloc failed, req-size: 256 pages, ret: -12
If CONFIG_CMA_DEBUG is enabled, avabile pages also will be shown as concatenated
size@...ition format. So 4@572 means that there are 4 available pages at 572
position starting from 0 position.
[   12.415503]  [2:   Binder:714_1:  744] cma: number of available pages: 4@...+7@...+7@...+8@...+38@...+166@...4+127@...1=> 357 free of 2048 total pages

Signed-off-by: Jaewon Kim <jaewon31.kim@...sung.com>
Acked-by: Michal Nazarewicz <mina86@...a86.com>
---
 mm/cma.c | 34 +++++++++++++++++++++++++++++++++-
 1 file changed, 33 insertions(+), 1 deletion(-)

diff --git a/mm/cma.c b/mm/cma.c
index c960459..c393229 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -353,6 +353,32 @@ int __init cma_declare_contiguous(phys_addr_t base,
     return ret;
 }
 
+#ifdef CONFIG_CMA_DEBUG
+static void cma_debug_show_areas(struct cma *cma)
+{
+    unsigned long next_zero_bit, next_set_bit;
+    unsigned long start = 0;
+    unsigned int nr_zero, nr_total = 0;
+
+    mutex_lock(&cma->lock);
+    pr_info("number of available pages: ");
+    for (;;) {
+        next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
+        if (next_zero_bit >= cma->count)
+            break;
+        next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
+        nr_zero = next_set_bit - next_zero_bit;
+        pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
+        nr_total += nr_zero;
+        start = next_zero_bit + nr_zero;
+    }
+    pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
+    mutex_unlock(&cma->lock);
+}
+#else
+static inline void cma_debug_show_areas(struct cma *cma) { }
+#endif
+
 /**
  * cma_alloc() - allocate pages from contiguous area
  * @cma:   Contiguous memory region for which the allocation is performed.
@@ -369,7 +395,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
     unsigned long start = 0;
     unsigned long bitmap_maxno, bitmap_no, bitmap_count;
     struct page *page = NULL;
-    int ret;
+    int ret = -ENOMEM;
 
     if (!cma || !cma->count)
         return NULL;
@@ -426,6 +452,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
 
     trace_cma_alloc(pfn, page, count, align);
 
+    if (ret) {
+        pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
+            __func__, count, ret);
+        cma_debug_show_areas(cma);
+    }
+
     pr_debug("%s(): returned %p\n", __func__, page);
     return page;
 }
-- 
1.9.1

Powered by blists - more mailing lists