[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CANXQDtY0AVH7gH1fEUa7OtJPpawi6MVBNkuXDCdPQY5Z7j6J3A@mail.gmail.com>
Date: Fri, 23 Jan 2026 23:36:04 +0530
From: Bhargava Chenna Marreddy <bhargava.marreddy@...adcom.com>
To: Jakub Kicinski <kuba@...nel.org>
Cc: andrew+netdev@...n.ch, vikas.gupta@...adcom.com,
vsrama-krishna.nemani@...adcom.com, pabeni@...hat.com,
rajashekar.hudumula@...adcom.com, rahul-rg.gupta@...adcom.com,
davem@...emloft.net, edumazet@...gle.com, pavan.chebbi@...adcom.com,
linux-kernel@...r.kernel.org, netdev@...r.kernel.org, horms@...nel.org,
ajit.khaparde@...adcom.com, michael.chan@...adcom.com
Subject: Re: [v5,net-next,8/8] bng_en: Add support for TPA events
On Tue, Jan 20, 2026 at 1:15 AM Jakub Kicinski <kuba@...nel.org> wrote:
>
> This is an AI-generated review of your patch. The human sending this
> email has considered the AI review valid, or at least plausible.
>
> For local reproduction steps see https://netdev-ai.bots.linux.dev/ai-local.html
> ---
> bng_en: Add support for TPA events
>
> This patch enables TPA (Transparent Packet Aggregation) functionality
> in the VNIC and adds functions to handle TPA events for LRO/GRO
> processing.
>
> > diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
>
> [ ... ]
>
> > @@ -159,10 +159,9 @@ enum {
> > #define MAX_TPA_MASK (MAX_TPA - 1)
> > #define MAX_TPA_SEGS 0x3f
> >
> > -#define BNGE_AGG_IDX_BMAP_SIZE (MAX_TPA / BITS_PER_LONG)
> > struct bnge_tpa_idx_map {
> > u16 agg_id_tbl[1024];
> > - unsigned long agg_idx_bmap[BNGE_AGG_IDX_BMAP_SIZE];
> > + DECLARE_BITMAP(agg_idx_bmap, MAX_TPA);
> > };
>
> > diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_txrx.c b/drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
>
> [ ... ]
>
> > +static u16 bnge_tpa_alloc_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id)
> > +{
> > + struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
> > + u16 idx = agg_id & MAX_TPA_MASK;
> > +
> > + if (test_bit(idx, map->agg_idx_bmap)) {
> > + idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA);
> > + if (idx >= MAX_TPA)
> > + return INVALID_HW_RING_ID;
> > + }
> > + __set_bit(idx, map->agg_idx_bmap);
> > + map->agg_id_tbl[agg_id] = idx;
> ^^^^^^
>
> The agg_id_tbl array is 1024 elements, but agg_id comes from hardware via
> TPA_START_AGG_ID() which uses mask RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16),
> allowing values up to 65535. Can this write out of bounds when
> agg_id >= 1024?
>
> For reference, the bnxt driver uses a different mask (0x0fff << 16),
> limiting agg_id to 0-4095.
Ack. Will update the macro to (0x3ff << 16) to match the 0-1023 TPA ID
range in the next revision.
>
> > + return idx;
> > +}
>
> [ ... ]
>
> > +static u16 bnge_lookup_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id)
> > +{
> > + struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
> > +
> > + return map->agg_id_tbl[agg_id];
> ^^^^^^
>
> Same concern here - the array lookup has no bounds check on agg_id.
Ack.
>
> > +}
>
> [ ... ]
>
> > +static void bnge_tpa_start(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
> > + u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
> > + struct rx_tpa_start_cmp_ext *tpa_start1)
> > +{
> > + struct bnge_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
> > + struct bnge_tpa_info *tpa_info;
> > + u16 cons, prod, agg_id;
> > + struct rx_bd *prod_bd;
> > + dma_addr_t mapping;
> > +
> > + agg_id = TPA_START_AGG_ID(tpa_start);
> > + agg_id = bnge_tpa_alloc_agg_idx(rxr, agg_id);
> > + if (unlikely(agg_id == INVALID_HW_RING_ID)) {
> > + netdev_warn(bn->netdev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
> > + rxr->bnapi->index, TPA_START_AGG_ID(tpa_start));
> > + bnge_sched_reset_rxr(bn, rxr);
> > + return;
> > + }
> > + cons = tpa_start->rx_tpa_start_cmp_opaque;
> > + prod = rxr->rx_prod;
> > + cons_rx_buf = &rxr->rx_buf_ring[cons];
> > + prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
> > + tpa_info = &rxr->rx_tpa[agg_id];
> > +
> > + if (unlikely(cons != rxr->rx_next_cons ||
> > + TPA_START_ERROR(tpa_start))) {
> > + netdev_warn(bn->netdev, "TPA cons %x, expected cons %x, error code %x\n",
> > + cons, rxr->rx_next_cons,
> > + TPA_START_ERROR_CODE(tpa_start1));
> > + bnge_sched_reset_rxr(bn, rxr);
> > + return;
> ^^^^^^
>
> When this error path is taken, bnge_tpa_alloc_agg_idx() has already set
> a bit in map->agg_idx_bmap but there is no corresponding call to
> bnge_free_agg_idx(). Will this leak the TPA index, eventually
> exhausting the available indices and causing subsequent TPA_START
> events to fail?
Reset logic coming in a later patchset will handle a full re-init (close/open).
This clears all bitmaps and prevents the leak.
>
> > + }
>
> [ ... ]
>
> > +static void bnge_tpa_agg(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
> > + struct rx_agg_cmp *rx_agg)
> > +{
> > + u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
> > + struct bnge_tpa_info *tpa_info;
> > +
> > + agg_id = bnge_lookup_agg_idx(rxr, agg_id);
> > + tpa_info = &rxr->rx_tpa[agg_id];
>
> If bnge_lookup_agg_idx() returns a garbage value due to out-of-bounds
> read, does this risk accessing invalid memory here as well?
Same fix applies here. Changing the macro to 0x3ff handles both cases.
Thanks,
Bhargava Marreddy
>
> > +
> > + tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
> > +}
Download attachment "smime.p7s" of type "application/pkcs7-signature" (5496 bytes)
Powered by blists - more mailing lists