[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1d0ca7408af6e5f0bb09baffd021bc72287e5ed8.1709631413.git.leon@kernel.org>
Date: Tue, 5 Mar 2024 12:22:14 +0200
From: Leon Romanovsky <leon@...nel.org>
To: Christoph Hellwig <hch@....de>,
Robin Murphy <robin.murphy@....com>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Jason Gunthorpe <jgg@...pe.ca>,
Chaitanya Kulkarni <chaitanyak@...dia.com>
Cc: Leon Romanovsky <leonro@...dia.com>,
Jonathan Corbet <corbet@....net>,
Jens Axboe <axboe@...nel.dk>,
Keith Busch <kbusch@...nel.org>,
Sagi Grimberg <sagi@...mberg.me>,
Yishai Hadas <yishaih@...dia.com>,
Shameer Kolothum <shameerali.kolothum.thodi@...wei.com>,
Kevin Tian <kevin.tian@...el.com>,
Alex Williamson <alex.williamson@...hat.com>,
Jérôme Glisse <jglisse@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-block@...r.kernel.org,
linux-rdma@...r.kernel.org,
iommu@...ts.linux.dev,
linux-nvme@...ts.infradead.org,
kvm@...r.kernel.org,
linux-mm@...ck.org,
Bart Van Assche <bvanassche@....org>,
Damien Le Moal <damien.lemoal@...nsource.wdc.com>,
Amir Goldstein <amir73il@...il.com>,
"josef@...icpanda.com" <josef@...icpanda.com>,
"Martin K. Petersen" <martin.petersen@...cle.com>,
"daniel@...earbox.net" <daniel@...earbox.net>,
Dan Williams <dan.j.williams@...el.com>,
"jack@...e.com" <jack@...e.com>,
Zhu Yanjun <zyjzyj2000@...il.com>
Subject: [RFC 13/16] vfio/mlx5: Explicitly store page list
From: Leon Romanovsky <leonro@...dia.com>
As a preparation to removal scatter-gather table and unifying
receive and send list, explicitly store page list.
Signed-off-by: Leon Romanovsky <leonro@...dia.com>
---
drivers/vfio/pci/mlx5/cmd.c | 1 +
drivers/vfio/pci/mlx5/cmd.h | 1 +
drivers/vfio/pci/mlx5/main.c | 35 +++++++++++++++++------------------
3 files changed, 19 insertions(+), 18 deletions(-)
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 44762980fcb9..5e2103042d9b 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -411,6 +411,7 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
__free_page(sg_page_iter_page(&sg_iter));
sg_free_append_table(&buf->table);
+ kvfree(buf->page_list);
kfree(buf);
}
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 83728c0669e7..815fcb54494d 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -57,6 +57,7 @@ struct mlx5_vf_migration_header {
};
struct mlx5_vhca_data_buffer {
+ struct page **page_list;
struct sg_append_table table;
loff_t start_pos;
u64 length;
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index b11b1c27d284..7ffe24693a55 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -69,44 +69,43 @@ int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
unsigned int npages)
{
unsigned int to_alloc = npages;
+ size_t old_size, new_size;
struct page **page_list;
unsigned long filled;
unsigned int to_fill;
int ret;
- to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
- page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
+ to_fill = min_t(unsigned int, npages,
+ PAGE_SIZE / sizeof(*buf->page_list));
+ old_size = buf->npages * sizeof(*buf->page_list);
+ new_size = old_size + to_fill * sizeof(*buf->page_list);
+ page_list = kvrealloc(buf->page_list, old_size, new_size,
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!page_list)
return -ENOMEM;
+ buf->page_list = page_list;
+
do {
filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
- page_list);
- if (!filled) {
- ret = -ENOMEM;
- goto err;
- }
+ buf->page_list + buf->npages);
+ if (!filled)
+ return -ENOMEM;
+
to_alloc -= filled;
ret = sg_alloc_append_table_from_pages(
- &buf->table, page_list, filled, 0,
+ &buf->table, buf->page_list + buf->npages, filled, 0,
filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
GFP_KERNEL_ACCOUNT);
-
if (ret)
- goto err;
+ return ret;
+
buf->npages += filled;
- /* clean input for another bulk allocation */
- memset(page_list, 0, filled * sizeof(*page_list));
to_fill = min_t(unsigned int, to_alloc,
- PAGE_SIZE / sizeof(*page_list));
+ PAGE_SIZE / sizeof(*buf->page_list));
} while (to_alloc > 0);
- kvfree(page_list);
return 0;
-
-err:
- kvfree(page_list);
- return ret;
}
static void mlx5vf_disable_fd(struct mlx5_vf_migration_file *migf)
--
2.44.0
Powered by blists - more mailing lists