lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 10 Aug 2022 22:41:03 +0800
From:   "shenjian (K)" <shenjian15@...wei.com>
To:     Alexander Lobakin <alexandr.lobakin@...el.com>
CC:     <davem@...emloft.net>, <kuba@...nel.org>, <andrew@...n.ch>,
        <ecree.xilinx@...il.com>, <hkallweit1@...il.com>,
        <saeed@...nel.org>, <leon@...nel.org>, <netdev@...r.kernel.org>,
        <linuxarm@...neuler.org>
Subject: Re: [RFCv7 PATCH net-next 23/36] net: adjust the build check for
 net_gso_ok()



在 2022/8/10 19:09, Alexander Lobakin 写道:
> From: Jian Shen <shenjian15@...wei.com>
> Date: Wed, 10 Aug 2022 11:06:11 +0800
>
>> Introduce macro GSO_INDEX(x) to replace the NETIF_F_XXX
>> feature shift check, for all the macroes NETIF_F_XXX will
>> be remove later.
>>
>> Signed-off-by: Jian Shen <shenjian15@...wei.com>
>> ---
>>   include/linux/netdevice.h | 40 ++++++++++++++++++++-------------------
>>   1 file changed, 21 insertions(+), 19 deletions(-)
>>
>> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
>> index 1bd5dcbc884d..b01af2a3838d 100644
>> --- a/include/linux/netdevice.h
>> +++ b/include/linux/netdevice.h
>> @@ -4886,28 +4886,30 @@ netdev_features_t netif_skb_features(struct sk_buff *skb);
>>   
>>   static inline bool net_gso_ok(netdev_features_t features, int gso_type)
>>   {
>> +#define GSO_INDEX(x)	((1ULL << (x)) >> NETIF_F_GSO_SHIFT)
> What if we get a new GSO offload which's corresponding bit will be
> higher than 64?
> You could instead do
>
> #define __SKB_GSO_FLAG(x)	(1ULL << (x))
>
> enum {
> 	SKB_GSO_TCPV4_BIT	= 0,
> 	SKB_GSO_DODGY_BIT	= 1,
> 	...,
> };
> enum {
> 	SKB_GSO_TCPV4		= __SKB_GSO_FLAG(TCPV4),
> 	SKB_GSO_DODGY		= __SKB_GSO_FLAG(DODGY),
> 	...,
> };
>
> and then just
>
> #define ASSERT_GSO_TYPE(fl, feat)	\
> 	static_assert((fl) == (feat) - NETIF_F_GSO_SHIFT)
>
> 	...
> 	ASSERT_GSO_TYPE(SKB_GSO_TCPV4_BIT, NETIF_F_TSO_BIT);
> 	ASSERT_GSO_TYPE(SKB_GSO_DODGY, NETIF_F_GSO_ROBUST_BIT);
> 	...
Yes, it may be misused for new GSO offload bit higher than 64, the macro
__SKB_GSO_FLAG(x) is better.

>> +
>>   	netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
>>   
>>   	/* check flags correspondence */
>> -	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_IPXIP4  != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_IPXIP6  != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_SCTP    != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
>> -	BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
>> +	BUILD_BUG_ON(SKB_GSO_TCPV4   != GSO_INDEX(NETIF_F_TSO_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_DODGY   != GSO_INDEX(NETIF_F_GSO_ROBUST_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_TCP_ECN != GSO_INDEX(NETIF_F_TSO_ECN_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != GSO_INDEX(NETIF_F_TSO_MANGLEID_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_TCPV6   != GSO_INDEX(NETIF_F_TSO6_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_FCOE    != GSO_INDEX(NETIF_F_FSO_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_GRE     != GSO_INDEX(NETIF_F_GSO_GRE_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_GRE_CSUM != GSO_INDEX(NETIF_F_GSO_GRE_CSUM_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_IPXIP4  != GSO_INDEX(NETIF_F_GSO_IPXIP4_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_IPXIP6  != GSO_INDEX(NETIF_F_GSO_IPXIP6_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != GSO_INDEX(NETIF_F_GSO_UDP_TUNNEL_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != GSO_INDEX(NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_PARTIAL != GSO_INDEX(NETIF_F_GSO_PARTIAL_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != GSO_INDEX(NETIF_F_GSO_TUNNEL_REMCSUM_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_SCTP    != GSO_INDEX(NETIF_F_GSO_SCTP_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_ESP != GSO_INDEX(NETIF_F_GSO_ESP_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_UDP != GSO_INDEX(NETIF_F_GSO_UDP_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_UDP_L4 != GSO_INDEX(NETIF_F_GSO_UDP_L4_BIT));
>> +	BUILD_BUG_ON(SKB_GSO_FRAGLIST != GSO_INDEX(NETIF_F_GSO_FRAGLIST_BIT));
>>   
>>   	return (features & feature) == feature;
>>   }
>> -- 
>> 2.33.0
> Thanks,
> Olek
> .
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ