lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20260119194553.1264793-1-kuba@kernel.org>
Date: Mon, 19 Jan 2026 11:45:53 -0800
From: Jakub Kicinski <kuba@...nel.org>
To: bhargava.marreddy@...adcom.com
Cc: Jakub Kicinski <kuba@...nel.org>,
	andrew+netdev@...n.ch,
	vikas.gupta@...adcom.com,
	vsrama-krishna.nemani@...adcom.com,
	pabeni@...hat.com,
	rajashekar.hudumula@...adcom.com,
	rahul-rg.gupta@...adcom.com,
	davem@...emloft.net,
	edumazet@...gle.com,
	pavan.chebbi@...adcom.com,
	linux-kernel@...r.kernel.org,
	netdev@...r.kernel.org,
	horms@...nel.org,
	ajit.khaparde@...adcom.com,
	michael.chan@...adcom.com
Subject: Re: [v5,net-next,8/8] bng_en: Add support for TPA events

This is an AI-generated review of your patch. The human sending this
email has considered the AI review valid, or at least plausible.

For local reproduction steps see https://netdev-ai.bots.linux.dev/ai-local.html
---
bng_en: Add support for TPA events

This patch enables TPA (Transparent Packet Aggregation) functionality
in the VNIC and adds functions to handle TPA events for LRO/GRO
processing.

> diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h

[ ... ]

> @@ -159,10 +159,9 @@ enum {
>  #define MAX_TPA_MASK	(MAX_TPA - 1)
>  #define MAX_TPA_SEGS	0x3f
>
> -#define BNGE_AGG_IDX_BMAP_SIZE	(MAX_TPA / BITS_PER_LONG)
>  struct bnge_tpa_idx_map {
>  	u16		agg_id_tbl[1024];
> -	unsigned long	agg_idx_bmap[BNGE_AGG_IDX_BMAP_SIZE];
> +	DECLARE_BITMAP(agg_idx_bmap, MAX_TPA);
>  };

> diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_txrx.c b/drivers/net/ethernet/broadcom/bnge/bnge_txrx.c

[ ... ]

> +static u16 bnge_tpa_alloc_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id)
> +{
> +	struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
> +	u16 idx = agg_id & MAX_TPA_MASK;
> +
> +	if (test_bit(idx, map->agg_idx_bmap)) {
> +		idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA);
> +		if (idx >= MAX_TPA)
> +			return INVALID_HW_RING_ID;
> +	}
> +	__set_bit(idx, map->agg_idx_bmap);
> +	map->agg_id_tbl[agg_id] = idx;
                        ^^^^^^

The agg_id_tbl array is 1024 elements, but agg_id comes from hardware via
TPA_START_AGG_ID() which uses mask RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16),
allowing values up to 65535. Can this write out of bounds when
agg_id >= 1024?

For reference, the bnxt driver uses a different mask (0x0fff << 16),
limiting agg_id to 0-4095.

> +	return idx;
> +}

[ ... ]

> +static u16 bnge_lookup_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id)
> +{
> +	struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
> +
> +	return map->agg_id_tbl[agg_id];
                             ^^^^^^

Same concern here - the array lookup has no bounds check on agg_id.

> +}

[ ... ]

> +static void bnge_tpa_start(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
> +			   u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
> +			   struct rx_tpa_start_cmp_ext *tpa_start1)
> +{
> +	struct bnge_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
> +	struct bnge_tpa_info *tpa_info;
> +	u16 cons, prod, agg_id;
> +	struct rx_bd *prod_bd;
> +	dma_addr_t mapping;
> +
> +	agg_id = TPA_START_AGG_ID(tpa_start);
> +	agg_id = bnge_tpa_alloc_agg_idx(rxr, agg_id);
> +	if (unlikely(agg_id == INVALID_HW_RING_ID)) {
> +		netdev_warn(bn->netdev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
> +			    rxr->bnapi->index, TPA_START_AGG_ID(tpa_start));
> +		bnge_sched_reset_rxr(bn, rxr);
> +		return;
> +	}
> +	cons = tpa_start->rx_tpa_start_cmp_opaque;
> +	prod = rxr->rx_prod;
> +	cons_rx_buf = &rxr->rx_buf_ring[cons];
> +	prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
> +	tpa_info = &rxr->rx_tpa[agg_id];
> +
> +	if (unlikely(cons != rxr->rx_next_cons ||
> +		     TPA_START_ERROR(tpa_start))) {
> +		netdev_warn(bn->netdev, "TPA cons %x, expected cons %x, error code %x\n",
> +			    cons, rxr->rx_next_cons,
> +			    TPA_START_ERROR_CODE(tpa_start1));
> +		bnge_sched_reset_rxr(bn, rxr);
> +		return;
                ^^^^^^

When this error path is taken, bnge_tpa_alloc_agg_idx() has already set
a bit in map->agg_idx_bmap but there is no corresponding call to
bnge_free_agg_idx(). Will this leak the TPA index, eventually
exhausting the available indices and causing subsequent TPA_START
events to fail?

> +	}

[ ... ]

> +static void bnge_tpa_agg(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
> +			 struct rx_agg_cmp *rx_agg)
> +{
> +	u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
> +	struct bnge_tpa_info *tpa_info;
> +
> +	agg_id = bnge_lookup_agg_idx(rxr, agg_id);
> +	tpa_info = &rxr->rx_tpa[agg_id];

If bnge_lookup_agg_idx() returns a garbage value due to out-of-bounds
read, does this risk accessing invalid memory here as well?

> +
> +	tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
> +}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ