lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <31fcb873-742f-415d-916d-89735133a0be@intel.com>
Date: Tue, 30 Jul 2024 16:51:34 +0200
From: Alexander Lobakin <aleksander.lobakin@...el.com>
To: Mateusz Polchlopek <mateusz.polchlopek@...el.com>
CC: <intel-wired-lan@...ts.osuosl.org>, <netdev@...r.kernel.org>, Jacob Keller
	<jacob.e.keller@...el.com>
Subject: Re: [Intel-wired-lan] [PATCH iwl-next v8 11/14] iavf: refactor
 iavf_clean_rx_irq to support legacy and flex descriptors

From: Mateusz Polchlopek <mateusz.polchlopek@...el.com>
Date: Tue, 30 Jul 2024 05:15:06 -0400

> From: Jacob Keller <jacob.e.keller@...el.com>
> 
> Using VIRTCHNL_VF_OFFLOAD_FLEX_DESC, the iAVF driver is capable of
> negotiating to enable the advanced flexible descriptor layout. Add the
> flexible NIC layout (RXDID=2) as a member of the Rx descriptor union.

[...]

> -static void iavf_rx_checksum(struct iavf_vsi *vsi, struct sk_buff *skb,
> -			     struct iavf_rx_desc *rx_desc)
> +static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb,
> +			 struct libeth_rx_pt ptype,

You can save a couple lines of diff if you rename ptype -> decoded. And
it would also be more consistent to what you're defining below.

> +			 struct libeth_rx_csum_decoded csum_bits)
>  {
> -	struct libeth_rx_pt decoded;
> -	u32 rx_error, rx_status;
>  	bool ipv4, ipv6;
> -	u64 qword;
> -	u8 ptype;
>  
>  	skb->ip_summed = CHECKSUM_NONE;
>  
> -	qword = le64_to_cpu(rx_desc->qw1);
> -	ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
> -
> -	decoded = libie_rx_pt_parse(ptype);
> -	if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
> -		return;
> -
> -	rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
> -	rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
> -
>  	/* did the hardware decode the packet and checksum? */
> -	if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
> +	if (unlikely(!csum_bits.l3l4p))
>  		return;
>  
> -	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
> -	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
> +	ipv4 = libeth_rx_pt_get_ip_ver(ptype) == LIBETH_RX_PT_OUTER_IPV4;
> +	ipv6 = libeth_rx_pt_get_ip_ver(ptype) == LIBETH_RX_PT_OUTER_IPV6;
>  
> -	if (ipv4 &&
> -	    (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
> -			 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
> +	if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
>  		goto checksum_fail;
>  
>  	/* likely incorrect csum if alternate IP extension headers found */
> -	if (ipv6 &&
> -	    rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
> -		/* don't increment checksum err here, non-fatal err */
> +	if (unlikely(ipv6 && csum_bits.ipv6exadd))
>  		return;
>  
>  	/* there was some L4 error, count error and punt packet to the stack */
> -	if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
> +	if (unlikely(csum_bits.l4e))
>  		goto checksum_fail;
>  
>  	/* handle packets that were not able to be checksummed due
>  	 * to arrival speed, in this case the stack can compute
>  	 * the csum.
>  	 */
> -	if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
> +	if (unlikely(csum_bits.pprs))
>  		return;
>  
>  	skb->ip_summed = CHECKSUM_UNNECESSARY;
> @@ -959,30 +943,122 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi, struct sk_buff *skb,
>  }
>  
>  /**
> - * iavf_rx_hash - set the hash value in the skb
> + * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum
> + * @vsi: the VSI we care about
> + * @rx_desc: the receive descriptor
> + * @decoded: decoded packet type
> + *
> + * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
> + * descriptor writeback format.
> + *
> + * Return: decoded checksum bits

(don't forget about periods at the end of sentences)

> + **/
> +static struct libeth_rx_csum_decoded
> +iavf_legacy_rx_csum(const struct iavf_vsi *vsi,
> +		    const struct iavf_rx_desc *rx_desc,
> +		    const struct libeth_rx_pt decoded)
> +{
> +	struct libeth_rx_csum_decoded csum_bits = {};
> +	u64 qw1 = le64_to_cpu(rx_desc->qw1);
> +
> +	if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
> +		return csum_bits;
> +
> +	csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_QW1_IPE_M, qw1);
> +	csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_QW1_EIPE_M, qw1);
> +	csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_QW1_L4E_M, qw1);
> +	csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_QW1_PPRS_M, qw1);
> +	csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_QW1_L3L4P_M, qw1);
> +	csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_QW1_IPV6EXADD_M, qw1);
> +
> +	return csum_bits;
> +}
> +
> +/**
> + * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum
> + * @vsi: the VSI we care about
> + * @rx_desc: the receive descriptor
> + * @decoded: decoded packet type
> + *
> + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
> + * descriptor writeback format.
> + *
> + * Return: decoded checksum bits
> + **/
> +static struct libeth_rx_csum_decoded
> +iavf_flex_rx_csum(const struct iavf_vsi *vsi,
> +		  const struct iavf_rx_desc *rx_desc,
> +		  const struct libeth_rx_pt decoded)
> +{
> +	struct libeth_rx_csum_decoded csum_bits = {};
> +	u64 qw1 = le64_to_cpu(rx_desc->qw1);
> +
> +	if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
> +		return csum_bits;
> +
> +	csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_QW1_XSUM_IPE_M, qw1);
> +	csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_QW1_XSUM_EIPE_M, qw1);
> +	csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_QW1_XSUM_L4E_M, qw1);
> +	csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_QW1_XSUM_EUDPE_M, qw1);
> +	csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_QW1_L3L4P_M, qw1);
> +	csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_QW1_IPV6EXADD_M, qw1);
> +	csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_QW2_NAT_M, qw1);
> +
> +	return csum_bits;
> +}
> +
> +/**
> + * iavf_legacy_rx_hash - set the hash value in the skb
>   * @ring: descriptor ring
>   * @rx_desc: specific descriptor
>   * @skb: skb currently being received and modified
> - * @rx_ptype: Rx packet type
> + * @decoded: decoded packet type
> + *
> + * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
> + * descriptor writeback format.
>   **/
> -static void iavf_rx_hash(struct iavf_ring *ring,
> -			 struct iavf_rx_desc *rx_desc,
> -			 struct sk_buff *skb,
> -			 u8 rx_ptype)
> +static void iavf_legacy_rx_hash(const struct iavf_ring *ring,
> +				const struct iavf_rx_desc *rx_desc,
> +				struct sk_buff *skb,
> +				const struct libeth_rx_pt decoded)
>  {
> -	struct libeth_rx_pt decoded;
> +	const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_QW1_FLTSTAT_M);
>  	u32 hash;
> -	const __le64 rss_mask =
> -		cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
> -			    IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
>  
> -	decoded = libie_rx_pt_parse(rx_ptype);
>  	if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
>  		return;
>  
>  	if ((rx_desc->qw1 & rss_mask) == rss_mask) {
> -		hash = le64_get_bits(rx_desc->qw0,
> -				     IAVF_RX_DESC_LEGACY_QW0_RSS_M);
> +		hash = le64_get_bits(rx_desc->qw0, IAVF_RXD_LEGACY_QW0_RSS_M);
> +		libeth_rx_pt_set_hash(skb, hash, decoded);
> +	}
> +}
> +
> +/**
> + * iavf_flex_rx_hash - set the hash value in the skb
> + * @ring: descriptor ring
> + * @rx_desc: specific descriptor
> + * @skb: skb currently being received and modified
> + * @decoded: decoded packet type
> + *
> + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
> + * descriptor writeback format.
> + **/
> +static void iavf_flex_rx_hash(const struct iavf_ring *ring,
> +			      const struct iavf_rx_desc *rx_desc,
> +			      struct sk_buff *skb,
> +			      const struct libeth_rx_pt decoded)
> +{
> +	__le64 qw1 = rx_desc->qw1;
> +	bool rss_valid;
> +	u32 hash;
> +
> +	if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
> +		return;
> +
> +	rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_QW1_RSS_VALID_M);
> +	if (rss_valid) {
> +		hash = le64_get_bits(qw1, IAVF_RXD_FLEX_QW1_RSS_HASH_M);
>  		libeth_rx_pt_set_hash(skb, hash, decoded);
>  	}
>  }
> @@ -998,14 +1074,23 @@ static void iavf_rx_hash(struct iavf_ring *ring,
>   * order to populate the hash, checksum, VLAN, protocol, and
>   * other fields within the skb.
>   **/
> -static void
> -iavf_process_skb_fields(struct iavf_ring *rx_ring,
> -			struct iavf_rx_desc *rx_desc, struct sk_buff *skb,
> -			u8 rx_ptype)
> +static void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
> +				    const struct iavf_rx_desc *rx_desc,
> +				    struct sk_buff *skb, u32 rx_ptype)
>  {
> -	iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
> +	struct libeth_rx_csum_decoded csum_bits = {};

Since you assign @csum_bits unconditionally below, it's not needed to
initialize it here.

> +	struct libeth_rx_pt decoded;
>  
> -	iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
> +	decoded = libie_rx_pt_parse(rx_ptype);
> +
> +	if (rx_ring->rxdid == VIRTCHNL_RXDID_1_32B_BASE) {
> +		iavf_legacy_rx_hash(rx_ring, rx_desc, skb, decoded);
> +		csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, rx_desc, decoded);
> +	} else {
> +		iavf_flex_rx_hash(rx_ring, rx_desc, skb, decoded);
> +		csum_bits = iavf_flex_rx_csum(rx_ring->vsi, rx_desc, decoded);
> +	}
> +	iavf_rx_csum(rx_ring->vsi, skb, decoded, csum_bits);
>  
>  	skb_record_rx_queue(skb, rx_ring->queue_index);

[...]

> +static struct libeth_rx_extracted
> +iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring,
> +			      const struct iavf_rx_desc *rx_desc)
> +{
> +	struct libeth_rx_extracted fields = {};
> +	__le64 qw0 = rx_desc->qw0;
> +	__le64 qw1 = rx_desc->qw1;
> +	__le64 qw2 = rx_desc->qw2;

Make them u64 right here with le64_to_cpu() and then just use
FIELD_GET()s instead of le64_get_bits() below. On BE systems, each
le64_get_bits() implies a byteswap.

> +	bool l2tag1p;
> +	bool l2tag2p;
> +
> +	fields.end_of_packet = le64_get_bits(qw1, IAVF_RXD_LEGACY_QW1_EOP_M);
> +	fields.size = le64_get_bits(qw1, IAVF_RXD_LEGACY_QW1_LENGTH_M);
> +	fields.rxe = le64_get_bits(qw1, IAVF_RXD_LEGACY_QW1_RXE_M);
> +	fields.rx_ptype = le64_get_bits(qw1, IAVF_RXD_LEGACY_QW1_PTYPE_M);
> +
> +	l2tag1p = le64_get_bits(qw1, IAVF_RXD_LEGACY_QW1_L2TAG1P_M);
> +	if (l2tag1p && (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1))
> +		fields.vlan_tag = le64_get_bits(qw0,
> +						IAVF_RXD_LEGACY_QW0_L2TAG1_M);
> +
> +	l2tag2p = le64_get_bits(qw2, IAVF_RXD_LEGACY_QW2_L2TAG2P_M);
> +	if (l2tag2p && (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2))
> +		fields.vlan_tag = le64_get_bits(qw2,
> +						IAVF_RXD_LEGACY_QW2_L2TAG2_M);
> +
> +	return fields;

As I wrote in the previous reply, this needs to be split into several
functions as not all the fields are always needed.

> +}
> +
> +/**
> + * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor
> + * @rx_ring: rx descriptor ring
> + * @rx_desc: the descriptor to process
> + *
> + * Decode the Rx descriptor and extract relevant information including the
> + * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
> + *
> + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
> + * descriptor writeback format.
> + *
> + * Return: fields extracted from the Rx descriptor
> + */
> +static struct libeth_rx_extracted
> +iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring,
> +			    const struct iavf_rx_desc *rx_desc)
> +{
> +	struct libeth_rx_extracted fields = {};
> +	__le64 qw0 = rx_desc->qw0;
> +	__le64 qw1 = rx_desc->qw1;
> +	__le64 qw2 = rx_desc->qw2;

Same here.

> +	bool l2tag1p, l2tag2p;
> +
> +	fields.size = le64_get_bits(qw0, IAVF_RXD_FLEX_QW0_PKT_LEN_M);
> +	fields.rx_ptype = le64_get_bits(qw0, IAVF_RXD_FLEX_QW0_PTYPE_M);
> +	fields.rxe = le64_get_bits(qw1, IAVF_RXD_FLEX_QW1_RXE_M);
> +	fields.end_of_packet = le64_get_bits(qw1, IAVF_RXD_FLEX_QW1_EOP_M);
> +
> +	l2tag1p = le64_get_bits(qw1, IAVF_RXD_FLEX_QW1_L2TAG1P_M);
> +	if (l2tag1p && (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1))
> +		fields.vlan_tag = le64_get_bits(qw1,
> +						IAVF_RXD_FLEX_QW1_L2TAG1_M);
> +
> +	l2tag2p = le64_get_bits(qw2, IAVF_RXD_FLEX_QW2_L2TAG2P_M);
> +	if (l2tag2p && (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2))
> +		fields.vlan_tag = le64_get_bits(qw2,
> +						IAVF_RXD_FLEX_QW2_L2TAG2_2_M);
> +
> +	return fields;

Same here.

> +}
> +
> +static struct libeth_rx_extracted
> +iavf_extract_rx_fields(const struct iavf_ring *rx_ring,
> +		       const struct iavf_rx_desc *rx_desc)
> +{
> +	if (rx_ring->rxdid == VIRTCHNL_RXDID_1_32B_BASE)
> +		return iavf_extract_legacy_rx_fields(rx_ring, rx_desc);
> +	else
> +		return iavf_extract_flex_rx_fields(rx_ring, rx_desc);
> +}
> +
>  /**
>   * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
>   * @rx_ring: rx descriptor ring to transact packets on
> @@ -1142,13 +1317,9 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
>  	bool failure = false;
>  
>  	while (likely(total_rx_packets < (unsigned int)budget)) {
> +		struct libeth_rx_extracted fields = {};

Initialization is not needed since you always assign it.

>  		struct libeth_fqe *rx_buffer;
>  		struct iavf_rx_desc *rx_desc;
> -		u16 ext_status = 0;
> -		unsigned int size;
> -		u16 vlan_tag = 0;
> -		u8 rx_ptype;
> -		u64 qword;
>  
>  		/* return some buffers to hardware, one at a time is too slow */
>  		if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {

[...]

> diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
> index 07e54db0bd4d..498746a83d35 100644
> --- a/drivers/net/ethernet/intel/iavf/iavf_type.h
> +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
> @@ -179,39 +179,13 @@ struct iavf_hw {
>  };
>  
>  struct iavf_rx_desc {
> -	aligned_le64 qw0;
> -	aligned_le64 qw1;
> -	aligned_le64 qw2;
> -	aligned_le64 qw3;
> -} __aligned(4 * sizeof(__le64));;
> -
> -enum iavf_rx_desc_status_bits {
> -	/* Note: These are predefined bit offsets */
> -	IAVF_RX_DESC_STATUS_DD_SHIFT		= 0,
> -	IAVF_RX_DESC_STATUS_EOF_SHIFT		= 1,
> -	IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT	= 2,
> -	IAVF_RX_DESC_STATUS_L3L4P_SHIFT		= 3,
> -	IAVF_RX_DESC_STATUS_CRCP_SHIFT		= 4,
> -	IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
> -	IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
> -	/* Note: Bit 8 is reserved in X710 and XL710 */
> -	IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT	= 8,
> -	IAVF_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
> -	IAVF_RX_DESC_STATUS_FLM_SHIFT		= 11,
> -	IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
> -	IAVF_RX_DESC_STATUS_LPBK_SHIFT		= 14,
> -	IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
> -	IAVF_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */
> -	/* Note: For non-tunnel packets INT_UDP_0 is the right status for
> -	 * UDP header
> -	 */
> -	IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT	= 18,
> -	IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
> +	__le64 qw0;
> +	__le64 qw1;
> +	__le64 qw2;
> +	__le64 qw3;
>  };

Some rebasing issues here. You redefine the struct you introduced in the
previous patch. I'd say the previous definition was more correct.

>  
> -#define IAVF_RXD_QW1_STATUS_SHIFT	0
> -#define IAVF_RXD_QW1_STATUS_MASK	((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
> -					 << IAVF_RXD_QW1_STATUS_SHIFT)
> +#define IAVF_RXD_QW1_STATUS_MASK		GENMASK(18, 0)
>  
>  #define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
>  #define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK  (0x3UL << \
> @@ -228,22 +202,6 @@ enum iavf_rx_desc_fltstat_values {
>  	IAVF_RX_DESC_FLTSTAT_RSS_HASH	= 3,
>  };
>  
> -#define IAVF_RXD_QW1_ERROR_SHIFT	19
> -#define IAVF_RXD_QW1_ERROR_MASK		(0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
> -
> -enum iavf_rx_desc_error_bits {
> -	/* Note: These are predefined bit offsets */
> -	IAVF_RX_DESC_ERROR_RXE_SHIFT		= 0,
> -	IAVF_RX_DESC_ERROR_RECIPE_SHIFT		= 1,
> -	IAVF_RX_DESC_ERROR_HBO_SHIFT		= 2,
> -	IAVF_RX_DESC_ERROR_L3L4E_SHIFT		= 3, /* 3 BITS */
> -	IAVF_RX_DESC_ERROR_IPE_SHIFT		= 3,
> -	IAVF_RX_DESC_ERROR_L4E_SHIFT		= 4,
> -	IAVF_RX_DESC_ERROR_EIPE_SHIFT		= 5,
> -	IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT	= 6,
> -	IAVF_RX_DESC_ERROR_PPRS_SHIFT		= 7
> -};
> -
>  enum iavf_rx_desc_error_l3l4e_fcoe_masks {
>  	IAVF_RX_DESC_ERROR_L3L4E_NONE		= 0,
>  	IAVF_RX_DESC_ERROR_L3L4E_PROT		= 1,
> @@ -252,13 +210,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
>  	IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN	= 4
>  };
>  
> -#define IAVF_RXD_QW1_PTYPE_SHIFT	30
> -#define IAVF_RXD_QW1_PTYPE_MASK		(0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
> -
> -#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT	38
> -#define IAVF_RXD_QW1_LENGTH_PBUF_MASK	(0x3FFFULL << \
> -					 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
> -
>  #define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT	52
>  #define IAVF_RXD_QW1_LENGTH_HBUF_MASK	(0x7FFULL << \
>  					 IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
> @@ -505,9 +456,85 @@ struct iavf_eth_stats {
>  	u64 tx_errors;			/* tepc */
>  };
>  
> -#define IAVF_RX_DESC_LEGACY_QW0_RSS_M		GENMASK_ULL(63, 32)
> -#define IAVF_RX_DESC_LEGACY_QW0_L2TAG1_M	GENMASK_ULL(33, 16)
> -#define IAVF_RX_DESC_LEGACY_QW2_L2TAG2_2_M	GENMASK_ULL(63, 48)
> -#define IAVF_RX_DESC_LEGACY_QW2_EXT_STATUS_M	GENMASK_ULL(11, 0)

Define these four correctly in the previous patch, so you wouldn't
redefine it here once again?

> +/* LEGACY DESCRIPTOR */
> +/* Quad Word 0 */
> +#define IAVF_RXD_LEGACY_QW0_RSS_M		GENMASK_ULL(63, 32)
> +#define IAVF_RXD_LEGACY_QW0_L2TAG1_M		GENMASK_ULL(31, 16)

[...]

> diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
> index 4163dfe90b4a..d60fba84b109 100644
> --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
> +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
> @@ -403,6 +403,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
>  	int pairs = adapter->num_active_queues;
>  	struct virtchnl_queue_pair_info *vqpi;
>  	u32 i, max_frame;
> +	u8 rx_flags = 0;
>  	size_t len;
>  
>  	max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
> @@ -420,6 +421,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
>  	if (!vqci)
>  		return;
>  
> +	if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
> +		rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;

This is not related to the Rx descriptor refactoring I'd say?

> +
>  	vqci->vsi_id = adapter->vsi_res->vsi_id;
>  	vqci->num_queue_pairs = pairs;
>  	vqpi = vqci->qpair;
> @@ -442,6 +446,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
>  		if (CRC_OFFLOAD_ALLOWED(adapter))
>  			vqpi->rxq.crc_disable = !!(adapter->netdev->features &
>  						   NETIF_F_RXFCS);
> +		vqpi->rxq.flags = rx_flags;
>  		vqpi++;
>  	}

Thanks,
Olek

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ