[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150213025029.GE6592@js1304-P5Q-DELUXE>
Date: Fri, 13 Feb 2015 11:50:30 +0900
From: Joonsoo Kim <iamjoonsoo.kim@....com>
To: Sasha Levin <sasha.levin@...cle.com>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
m.szyprowski@...sung.com, akpm@...ux-foundation.org,
lauraa@...eaurora.org, s.strogin@...tner.samsung.com
Subject: Re: [PATCH v5 2/3] mm: cma: allocation trigger
On Thu, Feb 12, 2015 at 05:26:47PM -0500, Sasha Levin wrote:
> Provides a userspace interface to trigger a CMA allocation.
>
> Usage:
>
> echo [pages] > alloc
>
> This would provide testing/fuzzing access to the CMA allocation paths.
>
> Signed-off-by: Sasha Levin <sasha.levin@...cle.com>
> ---
> mm/cma.c | 6 ++++++
> mm/cma.h | 4 ++++
> mm/cma_debug.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
> 3 files changed, 64 insertions(+), 2 deletions(-)
>
> diff --git a/mm/cma.c b/mm/cma.c
> index e093b53..9e3d44a 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -121,6 +121,12 @@ static int __init cma_activate_area(struct cma *cma)
> } while (--i);
>
> mutex_init(&cma->lock);
> +
> +#ifdef CONFIG_CMA_DEBUGFS
> + INIT_HLIST_HEAD(&cma->mem_head);
> + spin_lock_init(&cma->mem_head_lock);
> +#endif
> +
> return 0;
>
> err:
> diff --git a/mm/cma.h b/mm/cma.h
> index 4141887..1132d73 100644
> --- a/mm/cma.h
> +++ b/mm/cma.h
> @@ -7,6 +7,10 @@ struct cma {
> unsigned long *bitmap;
> unsigned int order_per_bit; /* Order of pages represented by one bit */
> struct mutex lock;
> +#ifdef CONFIG_CMA_DEBUGFS
> + struct hlist_head mem_head;
> + spinlock_t mem_head_lock;
> +#endif
> };
>
> extern struct cma cma_areas[MAX_CMA_AREAS];
> diff --git a/mm/cma_debug.c b/mm/cma_debug.c
> index 3a25413..5bd6863 100644
> --- a/mm/cma_debug.c
> +++ b/mm/cma_debug.c
> @@ -7,9 +7,18 @@
>
> #include <linux/debugfs.h>
> #include <linux/cma.h>
> +#include <linux/list.h>
> +#include <linux/kernel.h>
> +#include <linux/slab.h>
>
> #include "cma.h"
>
> +struct cma_mem {
> + struct hlist_node node;
> + struct page *p;
> + unsigned long n;
> +};
> +
> static struct dentry *cma_debugfs_root;
>
> static int cma_debugfs_get(void *data, u64 *val)
> @@ -23,8 +32,48 @@ static int cma_debugfs_get(void *data, u64 *val)
>
> DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
>
> -static void cma_debugfs_add_one(struct cma *cma, int idx)
> +static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
> +{
> + spin_lock(&cma->mem_head_lock);
> + hlist_add_head(&mem->node, &cma->mem_head);
> + spin_unlock(&cma->mem_head_lock);
> +}
> +
> +static int cma_alloc_mem(struct cma *cma, int count)
> +{
> + struct cma_mem *mem;
> + struct page *p;
> +
> + mem = kzalloc(sizeof(*mem), GFP_KERNEL);
> + if (!mem)
> + return -ENOMEM;
> +
> + p = cma_alloc(cma, count, CONFIG_CMA_ALIGNMENT);
Alignment is resurrected. Please change it to 0.
Other than that,
Acked-by: Joonsoo Kim <iamjoonsoo.kim@....com>
Thanks.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists