[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170831154105.897601710@linuxfoundation.org>
Date: Thu, 31 Aug 2017 17:43:50 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Sasha Levin <sasha.levin@...cle.com>,
Michal Nazarewicz <mina86@...a86.com>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Laurent Pinchart <laurent.pinchart+renesas@...asonboard.com>,
Gregory Fong <gregory.0xf0@...il.com>,
Pintu Kumar <pintu.k@...sung.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Linus Torvalds <torvalds@...ux-foundation.org>
Subject: [PATCH 3.18 14/24] mm: cma: constify and use correct signness in mm/cma.c
3.18-stable review patch. If anyone has any objections, please let me know.
------------------
From: Sasha Levin <sasha.levin@...cle.com>
commit ac173824959adeb489f9fcf88858774c4535a241 upstream.
Constify function parameters and use correct signness where needed.
Signed-off-by: Sasha Levin <sasha.levin@...cle.com>
Cc: Michal Nazarewicz <mina86@...a86.com>
Cc: Marek Szyprowski <m.szyprowski@...sung.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@....com>
Cc: Laurent Pinchart <laurent.pinchart+renesas@...asonboard.com>
Acked-by: Gregory Fong <gregory.0xf0@...il.com>
Cc: Pintu Kumar <pintu.k@...sung.com>
Signed-off-by: Andrew Morton <akpm@...ux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@...ux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
include/linux/cma.h | 12 ++++++------
mm/cma.c | 24 ++++++++++++++----------
2 files changed, 20 insertions(+), 16 deletions(-)
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -16,16 +16,16 @@
struct cma;
extern unsigned long totalcma_pages;
-extern phys_addr_t cma_get_base(struct cma *cma);
-extern unsigned long cma_get_size(struct cma *cma);
+extern phys_addr_t cma_get_base(const struct cma *cma);
+extern unsigned long cma_get_size(const struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
bool fixed, struct cma **res_cma);
-extern int cma_init_reserved_mem(phys_addr_t base,
- phys_addr_t size, int order_per_bit,
+extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ unsigned int order_per_bit,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
-extern bool cma_release(struct cma *cma, struct page *pages, int count);
+extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
+extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -47,17 +47,18 @@ static struct cma cma_areas[MAX_CMA_AREA
static unsigned cma_area_count;
static DEFINE_MUTEX(cma_mutex);
-phys_addr_t cma_get_base(struct cma *cma)
+phys_addr_t cma_get_base(const struct cma *cma)
{
return PFN_PHYS(cma->base_pfn);
}
-unsigned long cma_get_size(struct cma *cma)
+unsigned long cma_get_size(const struct cma *cma)
{
return cma->count << PAGE_SHIFT;
}
-static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
+static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
+ int align_order)
{
if (align_order <= cma->order_per_bit)
return 0;
@@ -68,7 +69,8 @@ static unsigned long cma_bitmap_aligned_
* Find a PFN aligned to the specified order and return an offset represented in
* order_per_bits.
*/
-static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
+static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
+ int align_order)
{
if (align_order <= cma->order_per_bit)
return 0;
@@ -82,13 +84,14 @@ static unsigned long cma_bitmap_maxno(st
return cma->count >> cma->order_per_bit;
}
-static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
- unsigned long pages)
+static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
+ unsigned long pages)
{
return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
}
-static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
+static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
+ unsigned int count)
{
unsigned long bitmap_no, bitmap_count;
@@ -167,7 +170,8 @@ core_initcall(cma_init_reserved_areas);
* This function creates custom contiguous area from already reserved memory.
*/
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
- int order_per_bit, struct cma **res_cma)
+ unsigned int order_per_bit,
+ struct cma **res_cma)
{
struct cma *cma;
phys_addr_t alignment;
@@ -359,7 +363,7 @@ err:
* This function allocates part of contiguous memory on specific
* contiguous memory area.
*/
-struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
+struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
{
unsigned long mask, offset, pfn, start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
@@ -430,7 +434,7 @@ struct page *cma_alloc(struct cma *cma,
* It returns false when provided pages do not belong to contiguous area and
* true otherwise.
*/
-bool cma_release(struct cma *cma, struct page *pages, int count)
+bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
{
unsigned long pfn;
Powered by blists - more mailing lists