[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251204155133.2437621-2-aleksander.lobakin@intel.com>
Date: Thu, 4 Dec 2025 16:51:29 +0100
From: Alexander Lobakin <aleksander.lobakin@...el.com>
To: intel-wired-lan@...ts.osuosl.org
Cc: Alexander Lobakin <aleksander.lobakin@...el.com>,
Tony Nguyen <anthony.l.nguyen@...el.com>,
Przemek Kitszel <przemyslaw.kitszel@...el.com>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Simon Horman <horms@...nel.org>,
Jacob Keller <jacob.e.keller@...el.com>,
Aleksandr Loktionov <aleksandr.loktionov@...el.com>,
nxne.cnse.osdt.itp.upstreaming@...el.com,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH iwl-next v2 1/5] libeth: pass Rx queue index to PP when creating a fill queue
Since recently, page_pool_create() accepts optional stack index of
the Rx queue which the pool will be created for. It can then be
used on control path for stuff like memory providers.
Add the same field to libeth_fq and pass the index from all the
drivers using libeth for managing Rx to simplify implementing MP
support later.
idpf has one libeth_fq per buffer/fill queue and each Rx queue has
two fill queues, but since fill queues can never be shared, we can
store the corresponding Rx queue index there during the
initialization to pass it to libeth.
Reviewed-by: Jacob Keller <jacob.e.keller@...el.com>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@...el.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@...el.com>
---
drivers/net/ethernet/intel/idpf/idpf_txrx.h | 2 ++
include/net/libeth/rx.h | 2 ++
drivers/net/ethernet/intel/iavf/iavf_txrx.c | 1 +
drivers/net/ethernet/intel/ice/ice_base.c | 2 ++
drivers/net/ethernet/intel/idpf/idpf_txrx.c | 13 +++++++++++++
drivers/net/ethernet/intel/libeth/rx.c | 1 +
6 files changed, 21 insertions(+)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 6796f010e382..0eaebac8ceae 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -748,6 +748,7 @@ libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
* @size: Length of descriptor ring in bytes
* @dma: Physical address of ring
* @q_vector: Backreference to associated vector
+ * @rxq_idx: stack index of the corresponding Rx queue
* @rx_buffer_low_watermark: RX buffer low watermark
* @rx_hbuf_size: Header buffer size
* @rx_buf_size: Buffer size
@@ -791,6 +792,7 @@ struct idpf_buf_queue {
dma_addr_t dma;
struct idpf_q_vector *q_vector;
+ u16 rxq_idx;
u16 rx_buffer_low_watermark;
u16 rx_hbuf_size;
diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h
index 0e736846c5e8..db838ef7f9bb 100644
--- a/include/net/libeth/rx.h
+++ b/include/net/libeth/rx.h
@@ -72,6 +72,7 @@ enum libeth_fqe_type {
* @no_napi: the queue is not a data queue and does not have NAPI
* @buf_len: HW-writeable length per each buffer
* @nid: ID of the closest NUMA node with memory
+ * @idx: stack index of the corresponding Rx queue
*/
struct libeth_fq {
struct_group_tagged(libeth_fq_fp, fp,
@@ -90,6 +91,7 @@ struct libeth_fq {
u32 buf_len;
int nid;
+ u32 idx;
};
int libeth_rx_fq_create(struct libeth_fq *fq, void *napi_dev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 275b11dd0c60..3d938d7ab2cc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -771,6 +771,7 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
.count = rx_ring->count,
.buf_len = LIBIE_MAX_RX_BUF_LEN,
.nid = NUMA_NO_NODE,
+ .idx = rx_ring->queue_index,
};
int ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 6fb7051aa463..7097324c38f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -607,6 +607,7 @@ static int ice_rxq_pp_create(struct ice_rx_ring *rq)
struct libeth_fq fq = {
.count = rq->count,
.nid = NUMA_NO_NODE,
+ .idx = rq->q_index,
.hsplit = rq->vsi->hsplit,
.xdp = ice_is_xdp_ena_vsi(rq->vsi),
.buf_len = LIBIE_MAX_RX_BUF_LEN,
@@ -629,6 +630,7 @@ static int ice_rxq_pp_create(struct ice_rx_ring *rq)
.count = rq->count,
.type = LIBETH_FQE_HDR,
.nid = NUMA_NO_NODE,
+ .idx = rq->q_index,
.xdp = ice_is_xdp_ena_vsi(rq->vsi),
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 72215612b460..5dc41b7ba609 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -561,6 +561,7 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
.type = LIBETH_FQE_HDR,
.xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
+ .idx = bufq->rxq_idx,
};
int ret;
@@ -703,6 +704,7 @@ static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
.type = LIBETH_FQE_MTU,
.buf_len = IDPF_RX_MAX_BUF_SZ,
.nid = idpf_q_vector_to_mem(rxq->q_vector),
+ .idx = rxq->idx,
};
int ret;
@@ -763,6 +765,7 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
.xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
+ .idx = bufq->rxq_idx,
};
int ret;
@@ -1922,6 +1925,16 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport,
LIBETH_RX_LL_LEN;
idpf_rxq_set_descids(rsrc, q);
}
+
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
+ continue;
+
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
+ struct idpf_buf_queue *bufq;
+
+ bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ bufq->rxq_idx = rx_qgrp->splitq.rxq_sets[0]->rxq.idx;
+ }
}
err_alloc:
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
index 1d8248a31037..9ac3a1448b2f 100644
--- a/drivers/net/ethernet/intel/libeth/rx.c
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -157,6 +157,7 @@ int libeth_rx_fq_create(struct libeth_fq *fq, void *napi_dev)
.order = LIBETH_RX_PAGE_ORDER,
.pool_size = fq->count,
.nid = fq->nid,
+ .queue_idx = fq->idx,
.dev = napi ? napi->dev->dev.parent : napi_dev,
.netdev = napi ? napi->dev : NULL,
.napi = napi,
--
2.52.0
Powered by blists - more mailing lists