[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190620083924.1996-4-kevin.laatz@intel.com>
Date: Thu, 20 Jun 2019 08:39:16 +0000
From: Kevin Laatz <kevin.laatz@...el.com>
To: netdev@...r.kernel.org, ast@...nel.org, daniel@...earbox.net,
bjorn.topel@...el.com, magnus.karlsson@...el.com
Cc: bpf@...r.kernel.com, intel-wired-lan@...ts.osuosl.org,
bruce.richardson@...el.com, ciara.loftus@...el.com,
Kevin Laatz <kevin.laatz@...el.com>
Subject: [PATCH 03/11] xdp: add offset param to zero_copy_allocator
This patch adds an offset parameter for zero_copy_allocator.
This change is required for the unaligned chunk mode which will come later
in this patch set. The offset parameter is required for calculating the
original handle in unaligned mode since we can't easily mask back to it
like in the aligned case.
Signed-off-by: Kevin Laatz <kevin.laatz@...el.com>
---
include/net/xdp.h | 3 ++-
net/core/xdp.c | 11 ++++++-----
2 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 0f25b3675c5c..ea801fd2bf98 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -53,7 +53,8 @@ struct xdp_mem_info {
struct page_pool;
struct zero_copy_allocator {
- void (*free)(struct zero_copy_allocator *zca, unsigned long handle);
+ void (*free)(struct zero_copy_allocator *zca, unsigned long handle,
+ off_t off);
};
struct xdp_rxq_info {
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 4b2b194f4f1f..a77a7162d213 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -322,7 +322,7 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
* of xdp_frames/pages in those cases.
*/
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
- unsigned long handle)
+ unsigned long handle, off_t off)
{
struct xdp_mem_allocator *xa;
struct page *page;
@@ -353,7 +353,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
rcu_read_lock();
/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
- xa->zc_alloc->free(xa->zc_alloc, handle);
+ xa->zc_alloc->free(xa->zc_alloc, handle, off);
rcu_read_unlock();
default:
/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
@@ -363,19 +363,20 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
void xdp_return_frame(struct xdp_frame *xdpf)
{
- __xdp_return(xdpf->data, &xdpf->mem, false, 0);
+ __xdp_return(xdpf->data, &xdpf->mem, false, 0, 0);
}
EXPORT_SYMBOL_GPL(xdp_return_frame);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{
- __xdp_return(xdpf->data, &xdpf->mem, true, 0);
+ __xdp_return(xdpf->data, &xdpf->mem, true, 0, 0);
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
void xdp_return_buff(struct xdp_buff *xdp)
{
- __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle);
+ __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle,
+ xdp->data - xdp->data_hard_start);
}
EXPORT_SYMBOL_GPL(xdp_return_buff);
--
2.17.1
Powered by blists - more mailing lists