lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <291580c9-d68a-4910-8f28-58852055d7ee@oracle.com>
Date: Thu, 27 Nov 2025 15:59:25 +0530
From: ALOK TIWARI <alok.a.tiwari@...cle.com>
To: Bhargava Marreddy <bhargava.marreddy@...adcom.com>, davem@...emloft.net,
        edumazet@...gle.com, kuba@...nel.org, pabeni@...hat.com,
        andrew+netdev@...n.ch, horms@...nel.org
Cc: netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
        michael.chan@...adcom.com, pavan.chebbi@...adcom.com,
        vsrama-krishna.nemani@...adcom.com, vikas.gupta@...adcom.com,
        Rajashekar Hudumula <rajashekar.hudumula@...adcom.com>
Subject: Re: [External] : [v3, net-next 08/12] bng_en: Add support for TPA
 events

> +static inline struct sk_buff *bnge_gro_skb(struct bnge_net *bn,
> +					   struct bnge_tpa_info *tpa_info,
> +					   struct rx_tpa_end_cmp *tpa_end,
> +					   struct rx_tpa_end_cmp_ext *tpa_end1,
> +					   struct sk_buff *skb)
> +{
> +	int payload_off;
> +	u16 segs;
> +
> +	segs = TPA_END_TPA_SEGS(tpa_end);
> +	if (segs == 1)
> +		return skb;
> +
> +	NAPI_GRO_CB(skb)->count = segs;
> +	skb_shinfo(skb)->gso_size =
> +		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
> +	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
> +	payload_off = TPA_END_PAYLOAD_OFF(tpa_end1);
> +	skb = bnge_gro_func(tpa_info, payload_off,
> +			    TPA_END_GRO_TS(tpa_end), skb);
> +	if (likely(skb))
> +		tcp_gro_complete(skb);
> +
> +	return skb;
> +}
> +#endif
> +
> +static inline struct sk_buff *bnge_tpa_end(struct bnge_net *bn,
> +					   struct bnge_cp_ring_info *cpr,
> +					   u32 *raw_cons,
> +					   struct rx_tpa_end_cmp *tpa_end,
> +					   struct rx_tpa_end_cmp_ext *tpa_end1,
> +					   u8 *event)
> +{
> +	struct bnge_napi *bnapi = cpr->bnapi;
> +	struct net_device *dev = bn->netdev;
> +	struct bnge_tpa_info *tpa_info;
> +	struct bnge_rx_ring_info *rxr;
> +	u8 *data_ptr, agg_bufs;
> +	struct sk_buff *skb;
> +	u16 idx = 0, agg_id;
> +	dma_addr_t mapping;
> +	unsigned int len;
> +	void *data;
> +
> +	rxr = bnapi->rx_ring;
> +	agg_id = TPA_END_AGG_ID(tpa_end);
> +	agg_id = bnge_lookup_agg_idx(rxr, agg_id);
> +	agg_bufs = TPA_END_AGG_BUFS(tpa_end1);
> +	tpa_info = &rxr->rx_tpa[agg_id];
> +	if (unlikely(agg_bufs != tpa_info->agg_count)) {
> +		netdev_warn(bn->netdev, "TPA end agg_buf %d != expected agg_bufs %d\n",
> +			    agg_bufs, tpa_info->agg_count);
> +		agg_bufs = tpa_info->agg_count;
> +	}
> +	tpa_info->agg_count = 0;
> +	*event |= BNGE_AGG_EVENT;
> +	bnge_free_agg_idx(rxr, agg_id);
> +	idx = agg_id;
> +	data = tpa_info->data;
> +	data_ptr = tpa_info->data_ptr;
> +	prefetch(data_ptr);
> +	len = tpa_info->len;
> +	mapping = tpa_info->mapping;
> +
> +	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
> +		bnge_abort_tpa(cpr, idx, agg_bufs);
> +		if (agg_bufs > MAX_SKB_FRAGS)
> +			netdev_warn(bn->netdev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
> +				    agg_bufs, (int)MAX_SKB_FRAGS);
> +		return NULL;
> +	}
> +
> +	if (len <= bn->rx_copybreak) {
> +		skb = bnge_copy_skb(bnapi, data_ptr, len, mapping);
> +		if (!skb) {
> +			bnge_abort_tpa(cpr, idx, agg_bufs);
> +			return NULL;
> +		}
> +	} else {
> +		u8 *new_data;
> +		dma_addr_t new_mapping;
> +
> +		new_data = __bnge_alloc_rx_frag(bn, &new_mapping, rxr,
> +						GFP_ATOMIC);
> +		if (!new_data) {
> +			bnge_abort_tpa(cpr, idx, agg_bufs);
> +			return NULL;
> +		}
> +
> +		tpa_info->data = new_data;
> +		tpa_info->data_ptr = new_data + bn->rx_offset;
> +		tpa_info->mapping = new_mapping;
> +
> +		skb = napi_build_skb(data, bn->rx_buf_size);
> +		dma_sync_single_for_cpu(bn->bd->dev, mapping,
> +					bn->rx_buf_use_size, bn->rx_dir);
> +
> +		if (!skb) {
> +			page_pool_free_va(rxr->head_pool, data, true);
> +			bnge_abort_tpa(cpr, idx, agg_bufs);
> +			return NULL;
> +		}
> +		skb_mark_for_recycle(skb);
> +		skb_reserve(skb, bn->rx_offset);
> +		skb_put(skb, len);
> +	}
> +
> +	if (agg_bufs) {
> +		skb = bnge_rx_agg_netmems_skb(bn, cpr, skb, idx, agg_bufs,
> +					      true);
> +		/* Page reuse already handled by bnge_rx_agg_netmems_skb(). */
> +		if (!skb)
> +			return NULL;
> +	}
> +
> +	skb->protocol = eth_type_trans(skb, dev);
> +
> +	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
> +		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
> +
> +	if (tpa_info->vlan_valid &&
> +	    (dev->features & BNGE_HW_FEATURE_VLAN_ALL_RX)) {
> +		__be16 vlan_proto = htons(tpa_info->metadata >>
> +					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
> +		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
> +
> +		if (eth_type_vlan(vlan_proto)) {
> +			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
> +		} else {
> +			dev_kfree_skb(skb);
> +			return NULL;
> +		}
> +	}
> +
> +	skb_checksum_none_assert(skb);
> +	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
> +		skb->ip_summed = CHECKSUM_UNNECESSARY;
> +		skb->csum_level =
> +			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
> +	}
> +
> +#ifdef CONFIG_INET
> +	if (bn->priv_flags & BNGE_NET_EN_GRO)
> +		skb = bnge_gro_skb(bn, tpa_info, tpa_end, tpa_end1, skb);
> +#endif
> +
> +	return skb;
> +}
> +
>   static enum pkt_hash_types bnge_rss_ext_op(struct bnge_net *bn,
>   					   struct rx_cmp *rxcmp)
>   {
> @@ -380,6 +751,7 @@ static struct sk_buff *bnge_rx_skb(struct bnge_net *bn,
>   
>   /* returns the following:
>    * 1       - 1 packet successfully received
> + * 0       - successful TPA_START, packet not completed yet
>    * -EBUSY  - completion ring does not have all the agg buffers yet
>    * -ENOMEM - packet aborted due to out of memory
>    * -EIO    - packet aborted due to hw error indicated in BD
> @@ -413,6 +785,11 @@ static int bnge_rx_pkt(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
>   
>   	cmp_type = RX_CMP_TYPE(rxcmp);
>   
> +	if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
> +		bnge_tpa_agg(bn, rxr, (struct rx_agg_cmp *)rxcmp);
> +		goto next_rx_no_prod_no_len;
> +	}
> +
>   	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
>   	cp_cons = RING_CMP(bn, tmp_raw_cons);
>   	rxcmp1 = (struct rx_cmp_ext *)
> @@ -427,6 +804,32 @@ static int bnge_rx_pkt(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
>   	dma_rmb();
>   	prod = rxr->rx_prod;
>   
> +	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
> +	    cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
> +		bnge_tpa_start(bn, rxr, cmp_type,
> +			       (struct rx_tpa_start_cmp *)rxcmp,
> +			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
> +
> +		*event |= BNGE_RX_EVENT;
> +		goto next_rx_no_prod_no_len;
> +
> +	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
> +		skb = bnge_tpa_end(bn, cpr, &tmp_raw_cons,
> +				   (struct rx_tpa_end_cmp *)rxcmp,
> +				   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);

bnge_tpa_end never return an ERR_PTR
why use IS_ERR, not if (!skb) ?

> +
> +		if (IS_ERR(skb))
> +			return -EBUSY;
> +
> +		rc = -ENOMEM;
> +		if (likely(skb)) {
> +			bnge_deliver_skb(bn, bnapi, skb);
> +			rc = 1;
> +		}
> +		*event |= BNGE_RX_EVENT;
> +		goto next_rx_no_prod_no_len;
> +	}
> +
>   	cons = rxcmp->rx_cmp_opaque;
>   	if (unlikely(cons != rxr->rx_next_cons)) {
>   		int rc1 = bnge_discard_rx(bn, cpr, &tmp_raw_cons, rxcmp);
> @@ -461,7 +864,8 @@ static int bnge_rx_pkt(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
>   	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
>   		bnge_reuse_rx_data(rxr, cons, data);
>   		if (agg_bufs)
> -			bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs);
> +			bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
> +					       false);
>   		rc = -EIO;
>   		goto next_rx_no_len;

Thanks,
Alok

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ