[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7fccb193-b9a8-e9a9-0029-379d0286d061@redhat.com>
Date: Tue, 5 Sep 2017 17:19:39 -0700
From: Laura Abbott <labbott@...hat.com>
To: Vitaly Wool <vitalywool@...il.com>,
Sumit Semwal <sumit.semwal@...aro.org>
Cc: linux-kernel@...r.kernel.org, Oleksiy.Avramchenko@...y.com
Subject: Re: [PATCH/RFC] ion: add movability support for page pools
On 09/05/2017 05:55 AM, Vitaly Wool wrote:
> ion page pool may become quite large and scattered all around
> the kernel memory area. These pages are actually not used so
> moving them around to reduce fragmentation is quite cheap because
> there's no need to copy their contents.
>
Can you give a few more details about what this helps?
Some benchmark numbers?
It looks like something similar was implemented for zsmalloc.
Are we going to start seeing a proliferation of pseudo filesystems
for each subsystem that wants to migrate pages?
> This patch implements callbacks that support page moving.
>
> Singed-off-by: Vitaly Wool <vitalywool@...il.com>
> ---
> drivers/staging/android/ion/ion.h | 2 +
> drivers/staging/android/ion/ion_page_pool.c | 165 +++++++++++++++++++++++++++-
> 2 files changed, 163 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
> index fa9ed81..a948cb2 100644
> --- a/drivers/staging/android/ion/ion.h
> +++ b/drivers/staging/android/ion/ion.h
> @@ -320,6 +320,7 @@ size_t ion_heap_freelist_size(struct ion_heap *heap);
> * @order: order of pages in the pool
> * @list: plist node for list of pools
> * @cached: it's cached pool or not
> + * @inode: inode for ion_pool pseudo filesystem
> *
> * Allows you to keep a pool of pre allocated pages to use from your heap.
> * Keeping a pool of pages that is ready for dma, ie any cached mapping have
> @@ -336,6 +337,7 @@ struct ion_page_pool {
> gfp_t gfp_mask;
> unsigned int order;
> struct plist_node list;
> + struct inode *inode;
> };
>
> struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
> diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
> index 817849d..3c6ba9c 100644
> --- a/drivers/staging/android/ion/ion_page_pool.c
> +++ b/drivers/staging/android/ion/ion_page_pool.c
> @@ -19,12 +19,17 @@
> #include <linux/err.h>
> #include <linux/fs.h>
> #include <linux/list.h>
> +#include <linux/migrate.h>
> +#include <linux/mount.h>
> +#include <linux/page-flags.h>
> #include <linux/init.h>
> #include <linux/slab.h>
> #include <linux/swap.h>
>
> #include "ion.h"
>
> +#define ION_PAGE_CACHE 1
> +
> static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
> {
> struct page *page = alloc_pages(pool->gfp_mask, pool->order);
> @@ -37,12 +42,19 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
> static void ion_page_pool_free_pages(struct ion_page_pool *pool,
> struct page *page)
> {
> + if (pool->inode && pool->order == 0) {
> + lock_page(page);
> + __ClearPageMovable(page);
> + unlock_page(page);
> + }
> +
> __free_pages(page, pool->order);
> }
>
> static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
> {
> mutex_lock(&pool->mutex);
> + page->private = ION_PAGE_CACHE;
> if (PageHighMem(page)) {
> list_add_tail(&page->lru, &pool->high_items);
> pool->high_count++;
> @@ -50,6 +62,9 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
> list_add_tail(&page->lru, &pool->low_items);
> pool->low_count++;
> }
> +
> + if (pool->inode && pool->order == 0)
> + __SetPageMovable(page, pool->inode->i_mapping);
> mutex_unlock(&pool->mutex);
> return 0;
> }
> @@ -68,7 +83,9 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
> pool->low_count--;
> }
>
> - list_del(&page->lru);
> + clear_bit(ION_PAGE_CACHE, &page->private);
> +
> + list_del_init(&page->lru);
> return page;
> }
>
> @@ -85,8 +102,13 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
> page = ion_page_pool_remove(pool, false);
> mutex_unlock(&pool->mutex);
>
> - if (!page)
> + if (!page) {
> page = ion_page_pool_alloc_pages(pool);
> + } else {
> + lock_page(page);
> + __ClearPageMovable(page);
> + unlock_page(page);
> + }
>
> return page;
> }
> @@ -146,6 +168,136 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
> return freed;
> }
>
> +static bool ion_page_pool_isolate(struct page *page, isolate_mode_t mode)
> +{
> + struct ion_page_pool *pool;
> + struct address_space *mapping = page_mapping(page);
> +
> + VM_BUG_ON_PAGE(PageIsolated(page), page);
> +
> + if (!mapping)
> + return false;
> + pool = mapping->private_data;
> +
> + mutex_lock(&pool->mutex);
> + if (unlikely(!test_bit(ION_PAGE_CACHE, &page->private))) {
> + mutex_unlock(&pool->mutex);
> + return false;
> + }
> +
> + list_del(&page->lru);
> + if (PageHighMem(page))
> + pool->high_count--;
> + else
> + pool->low_count--;
> + mutex_unlock(&pool->mutex);
> +
> + return true;
> +}
> +
> +static int ion_page_pool_migrate(struct address_space *mapping,
> + struct page *newpage,
> + struct page *page, enum migrate_mode mode)
> +{
> + struct ion_page_pool *pool = mapping->private_data;
> +
> + VM_BUG_ON_PAGE(!PageMovable(page), page);
> + VM_BUG_ON_PAGE(!PageIsolated(page), page);
> +
> + lock_page(page);
> + newpage->private = ION_PAGE_CACHE;
> + __SetPageMovable(newpage, page_mapping(page));
> + get_page(newpage);
> + __ClearPageMovable(page);
> + ClearPagePrivate(page);
> + unlock_page(page);
> + mutex_lock(&pool->mutex);
> + if (PageHighMem(newpage)) {
> + list_add_tail(&newpage->lru, &pool->high_items);
> + pool->high_count++;
> + } else {
> + list_add_tail(&newpage->lru, &pool->low_items);
> + pool->low_count++;
> + }
> + mutex_unlock(&pool->mutex);
> + put_page(page);
> + return 0;
> +}
> +
> +static void ion_page_pool_putback(struct page *page)
> +{
> + /*
> + * migrate function either succeeds or returns -EAGAIN, which
> + * results in calling it again until it succeeds, sothis callback
> + * is not needed.
> + */
> +}
> +
> +static struct dentry *ion_pool_do_mount(struct file_system_type *fs_type,
> + int flags, const char *dev_name, void *data)
> +{
> + static const struct dentry_operations ops = {
> + .d_dname = simple_dname,
> + };
> +
> + return mount_pseudo(fs_type, "ion_pool:", NULL, &ops, 0x77);
> +}
> +
> +static struct file_system_type ion_pool_fs = {
> + .name = "ion_pool",
> + .mount = ion_pool_do_mount,
> + .kill_sb = kill_anon_super,
> +};
> +
> +static int ion_pool_cnt;
> +static struct vfsmount *ion_pool_mnt;
> +static int ion_pool_mount(void)
> +{
> + int ret = 0;
> +
> + ion_pool_mnt = kern_mount(&ion_pool_fs);
> + if (IS_ERR(ion_pool_mnt))
> + ret = PTR_ERR(ion_pool_mnt);
> +
> + return ret;
> +}
> +
> +static const struct address_space_operations ion_pool_aops = {
> + .isolate_page = ion_page_pool_isolate,
> + .migratepage = ion_page_pool_migrate,
> + .putback_page = ion_page_pool_putback,
> +};
> +
> +static int ion_pool_register_migration(struct ion_page_pool *pool)
> +{
> + int ret = simple_pin_fs(&ion_pool_fs, &ion_pool_mnt, &ion_pool_cnt);
> +
> + if (ret < 0) {
> + pr_err("Cannot mount pseudo fs: %d\n", ret);
> + return ret;
> + }
> + pool->inode = alloc_anon_inode(ion_pool_mnt->mnt_sb);
> + if (IS_ERR(pool->inode)) {
> + ret = PTR_ERR(pool->inode);
> + pool->inode = NULL;
> + simple_release_fs(&ion_pool_mnt, &ion_pool_cnt);
> + return ret;
> + }
> +
> + pool->inode->i_mapping->private_data = pool;
> + pool->inode->i_mapping->a_ops = &ion_pool_aops;
> + return 0;
> +}
> +
> +static void ion_pool_unregister_migration(struct ion_page_pool *pool)
> +{
> + if (pool->inode) {
> + iput(pool->inode);
> + pool->inode = NULL;
> + simple_release_fs(&ion_pool_mnt, &ion_pool_cnt);
> + }
> +}
> +
> struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
> bool cached)
> {
> @@ -161,19 +313,24 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
> pool->order = order;
> mutex_init(&pool->mutex);
> plist_node_init(&pool->list, order);
> - if (cached)
> + if (cached) {
> pool->cached = true;
> + ion_pool_register_migration(pool);
> + }
> +
> + pool->inode = NULL;
>
> return pool;
> }
>
> void ion_page_pool_destroy(struct ion_page_pool *pool)
> {
> + ion_pool_unregister_migration(pool);
> kfree(pool);
> }
>
> static int __init ion_page_pool_init(void)
> {
> - return 0;
> + return ion_pool_mount();
> }
> device_initcall(ion_page_pool_init);
>
Powered by blists - more mailing lists