[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6bee09ad-c2b6-492a-9658-52968776e4e2@amd.com>
Date: Tue, 13 Feb 2024 14:05:01 -0800
From: "Nelson, Shannon" <shannon.nelson@....com>
To: Paolo Abeni <pabeni@...hat.com>, netdev@...r.kernel.org,
davem@...emloft.net, kuba@...nel.org, edumazet@...gle.com
Cc: brett.creeley@....com, drivers@...sando.io
Subject: Re: [PATCH v3 net-next 4/9] ionic: add initial framework for XDP
support
On 2/13/2024 3:20 AM, Paolo Abeni wrote:
> Caution: This message originated from an External Source. Use proper caution when opening attachments, clicking links, or responding.
>
>
> On Fri, 2024-02-09 at 16:48 -0800, Shannon Nelson wrote:
> [...]
>> +static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
>> +{
>> + struct xdp_rxq_info *rxq_info;
>> + int err;
>> +
>> + rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
>> + if (!rxq_info)
>> + return -ENOMEM;
>> +
>> + err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
>> + if (err) {
>> + dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
>> + q->index, err);
>
> You can avoid some little code duplication with the usual
> goto err;
>
> //...
> err:
> kfree(rxq_info);
> return err;
>
>> + kfree(rxq_info);
>> + return err;
>> + }
>> +
>> + err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
>> + if (err) {
>> + dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
>> + q->index, err);
>> + xdp_rxq_info_unreg(rxq_info);
>
> and using the same label here.
Since it is only the kfree(), I wasn't going to worry about a goto, but
sure, I can tweak that.
>
>> + kfree(rxq_info);
>> + return err;
>> + }
>> +
>> + q->xdp_rxq_info = rxq_info;
>> +
>> + return 0;
>> +}
>> +
>> +static int ionic_xdp_queues_config(struct ionic_lif *lif)
>> +{
>> + unsigned int i;
>> + int err;
>> +
>> + if (!lif->rxqcqs)
>> + return 0;
>> +
>> + /* There's no need to rework memory if not going to/from NULL program.
>> + * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
>> + * This way we don't need to keep an *xdp_prog in every queue struct.
>> + */
>> + if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
>> + return 0;
>> +
>> + for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
>> + struct ionic_queue *q = &lif->rxqcqs[i]->q;
>> +
>> + if (q->xdp_rxq_info) {
>> + ionic_xdp_unregister_rxq_info(q);
>
> You can reduce the nesting level adding a 'continue' here:
> continue;
> }
Sure, will adjust.
>
> err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
> // ...
>
>> + } else {
>> + err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
>> + if (err) {
>> + dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
>> + i, err);
>> + goto err_out;
>> + }
>> + }
>> + }
>> +
>> + return 0;
>> +
>> +err_out:
>> + for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
>> + ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
>> +
>> + return err;
>> +}
>> +
>> +static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
>> +{
>> + struct ionic_lif *lif = netdev_priv(netdev);
>> + struct bpf_prog *old_prog;
>> + u32 maxfs;
>> +
>> + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
>> +#define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts"
>> + NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT);
>> + netdev_info(lif->netdev, XDP_ERR_SPLIT);
>> + return -EOPNOTSUPP;
>> + }
>> +
>> + if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) {
>> +#define XDP_ERR_MTU "MTU is too large for XDP without frags support"
>> + NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU);
>> + netdev_info(lif->netdev, XDP_ERR_MTU);
>> + return -EINVAL;
>> + }
>> +
>> + maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
>> + if (bpf->prog)
>> + maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU);
>> + netdev->max_mtu = maxfs;
>> +
>> + if (!netif_running(netdev)) {
>> + old_prog = xchg(&lif->xdp_prog, bpf->prog);
>> + } else {
>> + mutex_lock(&lif->queue_lock);
>> + ionic_stop_queues_reconfig(lif);
>> + old_prog = xchg(&lif->xdp_prog, bpf->prog);
>> + ionic_start_queues_reconfig(lif);
>> + mutex_unlock(&lif->queue_lock);
>> + }
>> +
>> + if (old_prog)
>> + bpf_prog_put(old_prog);
>> +
>> + return 0;
>> +}
>> +
>> +static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
>> +{
>> + switch (bpf->command) {
>> + case XDP_SETUP_PROG:
>> + return ionic_xdp_config(netdev, bpf);
>> + default:
>> + return -EINVAL;
>> + }
>> +}
>> +
>> static const struct net_device_ops ionic_netdev_ops = {
>> .ndo_open = ionic_open,
>> .ndo_stop = ionic_stop,
>> .ndo_eth_ioctl = ionic_eth_ioctl,
>> .ndo_start_xmit = ionic_start_xmit,
>> + .ndo_bpf = ionic_xdp,
>> .ndo_get_stats64 = ionic_get_stats64,
>> .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
>> .ndo_set_features = ionic_set_features,
>> @@ -2755,6 +2922,7 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
>> swap(a->q.base, b->q.base);
>> swap(a->q.base_pa, b->q.base_pa);
>> swap(a->q.info, b->q.info);
>> + swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
>> swap(a->q_base, b->q_base);
>> swap(a->q_base_pa, b->q_base_pa);
>> swap(a->q_size, b->q_size);
>> diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
>> index 61548b3eea93..61fa4ea4f04c 100644
>> --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
>> +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
>> @@ -51,6 +51,9 @@ struct ionic_rx_stats {
>> u64 alloc_err;
>> u64 hwstamp_valid;
>> u64 hwstamp_invalid;
>> + u64 xdp_drop;
>> + u64 xdp_aborted;
>> + u64 xdp_pass;
>> };
>>
>> #define IONIC_QCQ_F_INITED BIT(0)
>> @@ -135,6 +138,9 @@ struct ionic_lif_sw_stats {
>> u64 hw_rx_over_errors;
>> u64 hw_rx_missed_errors;
>> u64 hw_tx_aborted_errors;
>> + u64 xdp_drop;
>> + u64 xdp_aborted;
>> + u64 xdp_pass;
>> };
>>
>> enum ionic_lif_state_flags {
>> @@ -230,6 +236,7 @@ struct ionic_lif {
>> struct ionic_phc *phc;
>>
>> struct dentry *dentry;
>> + struct bpf_prog *xdp_prog;
>> };
>>
>> struct ionic_phc {
>> diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
>> index 1f6022fb7679..2fb20173b2c6 100644
>> --- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
>> +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
>> @@ -27,6 +27,9 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
>> IONIC_LIF_STAT_DESC(hw_rx_over_errors),
>> IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
>> IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
>> + IONIC_LIF_STAT_DESC(xdp_drop),
>> + IONIC_LIF_STAT_DESC(xdp_aborted),
>> + IONIC_LIF_STAT_DESC(xdp_pass),
>> };
>>
>> static const struct ionic_stat_desc ionic_port_stats_desc[] = {
>> @@ -149,6 +152,9 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
>> IONIC_RX_STAT_DESC(hwstamp_invalid),
>> IONIC_RX_STAT_DESC(dropped),
>> IONIC_RX_STAT_DESC(vlan_stripped),
>> + IONIC_RX_STAT_DESC(xdp_drop),
>> + IONIC_RX_STAT_DESC(xdp_aborted),
>> + IONIC_RX_STAT_DESC(xdp_pass),
>> };
>>
>> #define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
>> @@ -185,6 +191,9 @@ static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
>> stats->rx_csum_error += rxstats->csum_error;
>> stats->rx_hwstamp_valid += rxstats->hwstamp_valid;
>> stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid;
>> + stats->xdp_drop += rxstats->xdp_drop;
>> + stats->xdp_aborted += rxstats->xdp_aborted;
>> + stats->xdp_pass += rxstats->xdp_pass;
>> }
>>
>> static void ionic_get_lif_stats(struct ionic_lif *lif,
>> diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
>> index aee38979a9d7..07a17be94d4d 100644
>> --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
>> +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
>> @@ -177,7 +177,7 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
>> if (page_to_nid(buf_info->page) != numa_mem_id())
>> return false;
>>
>> - size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
>> + size = ALIGN(used, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
>> buf_info->page_offset += size;
>> if (buf_info->page_offset >= IONIC_PAGE_SIZE)
>> return false;
>> @@ -287,6 +287,54 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
>> return skb;
>> }
>>
>> +static bool ionic_run_xdp(struct ionic_rx_stats *stats,
>> + struct net_device *netdev,
>> + struct ionic_queue *rxq,
>> + struct ionic_buf_info *buf_info,
>> + int len)
>> +{
>> + u32 xdp_action = XDP_ABORTED;
>> + struct bpf_prog *xdp_prog;
>> + struct xdp_buff xdp_buf;
>> +
>> + xdp_prog = READ_ONCE(rxq->lif->xdp_prog);
>> + if (!xdp_prog)
>> + return false;
>> +
>> + xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
>> + xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
>> + 0, len, false);
>> +
>> + dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
>> + 0, len,
>> + DMA_FROM_DEVICE);
>
> in case of XDP_PASS the same buf will be synched twice ?!?
Yeah, that is kind of ugly. I can pull the READ_ONCE of xdp_prog out a
level and use that as an indicator for blocking the 2nd dma sync.
sln
>
> Cheers,
>
> Paolo
>
Powered by blists - more mailing lists