[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2691374c551bc276ec135ea58207a440e34f7be4.1719909395.git.leon@kernel.org>
Date: Tue, 2 Jul 2024 12:09:45 +0300
From: Leon Romanovsky <leon@...nel.org>
To: Jens Axboe <axboe@...nel.dk>,
Jason Gunthorpe <jgg@...pe.ca>,
Robin Murphy <robin.murphy@....com>,
Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Keith Busch <kbusch@...nel.org>,
Christoph Hellwig <hch@....de>,
"Zeng, Oak" <oak.zeng@...el.com>,
Chaitanya Kulkarni <kch@...dia.com>
Cc: Leon Romanovsky <leonro@...dia.com>,
Sagi Grimberg <sagi@...mberg.me>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Logan Gunthorpe <logang@...tatee.com>,
Yishai Hadas <yishaih@...dia.com>,
Shameer Kolothum <shameerali.kolothum.thodi@...wei.com>,
Kevin Tian <kevin.tian@...el.com>,
Alex Williamson <alex.williamson@...hat.com>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Jérôme Glisse <jglisse@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-rdma@...r.kernel.org,
iommu@...ts.linux.dev,
linux-nvme@...ts.infradead.org,
linux-pci@...r.kernel.org,
kvm@...r.kernel.org,
linux-mm@...ck.org
Subject: [RFC PATCH v1 15/18] vfio/mlx5: Explicitly store page list
From: Leon Romanovsky <leonro@...dia.com>
As a preparation to removal scatter-gather table and unifying
receive and send list, explicitly store page list.
Signed-off-by: Leon Romanovsky <leonro@...dia.com>
---
drivers/vfio/pci/mlx5/cmd.c | 33 ++++++++++++++++-----------------
drivers/vfio/pci/mlx5/cmd.h | 1 +
2 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index adf57104555a..cb23f03d58f4 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -421,6 +421,7 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
__free_page(sg_page_iter_page(&sg_iter));
sg_free_append_table(&buf->table);
+ kvfree(buf->page_list);
kfree(buf);
}
@@ -428,44 +429,42 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
unsigned int npages)
{
unsigned int to_alloc = npages;
+ size_t old_size, new_size;
struct page **page_list;
unsigned long filled;
unsigned int to_fill;
int ret;
- to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
- page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
+ to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*buf->page_list));
+ old_size = buf->npages * sizeof(*buf->page_list);
+ new_size = old_size + to_alloc * sizeof(*buf->page_list);
+ page_list = kvrealloc(buf->page_list, old_size, new_size,
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!page_list)
return -ENOMEM;
+ buf->page_list = page_list;
+
do {
filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
- page_list);
- if (!filled) {
- ret = -ENOMEM;
- goto err;
- }
+ buf->page_list + buf->npages);
+ if (!filled)
+ return -ENOMEM;
+
to_alloc -= filled;
ret = sg_alloc_append_table_from_pages(
- &buf->table, page_list, filled, 0,
+ &buf->table, buf->page_list + buf->npages, filled, 0,
filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
GFP_KERNEL_ACCOUNT);
if (ret)
- goto err;
+ return ret;
buf->npages += filled;
- /* clean input for another bulk allocation */
- memset(page_list, 0, filled * sizeof(*page_list));
to_fill = min_t(unsigned int, to_alloc,
- PAGE_SIZE / sizeof(*page_list));
+ PAGE_SIZE / sizeof(*buf->page_list));
} while (to_alloc > 0);
- kvfree(page_list);
return 0;
-
-err:
- kvfree(page_list);
- return ret;
}
struct mlx5_vhca_data_buffer *
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 25dd6ff54591..5b764199db53 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -53,6 +53,7 @@ struct mlx5_vf_migration_header {
};
struct mlx5_vhca_data_buffer {
+ struct page **page_list;
struct sg_append_table table;
loff_t start_pos;
u64 length;
--
2.45.2
Powered by blists - more mailing lists