lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202106250554.Z8MN8MPD-lkp@intel.com>
Date:   Fri, 25 Jun 2021 05:55:22 +0800
From:   kernel test robot <lkp@...el.com>
To:     Bailey Forrest <bcf@...gle.com>,
        "David S . Miller" <davem@...emloft.net>
Cc:     kbuild-all@...ts.01.org, netdev@...r.kernel.org,
        Willem de Bruijn <willemb@...gle.com>,
        Catherine Sullivan <csully@...gle.com>
Subject: Re: [PATCH net-next 15/16] gve: DQO: Add TX path

Hi Bailey,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on 35713d9b8f090d7a226e4aaeeb742265cde33c82]

url:    https://github.com/0day-ci/linux/commits/Bailey-Forrest/gve-Introduce-DQO-descriptor-format/20210625-021110
base:   35713d9b8f090d7a226e4aaeeb742265cde33c82
config: i386-randconfig-a011-20210622 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build):
        # https://github.com/0day-ci/linux/commit/af0833aafca5d9abd931a16ee9e761e85f5ad965
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Bailey-Forrest/gve-Introduce-DQO-descriptor-format/20210625-021110
        git checkout af0833aafca5d9abd931a16ee9e761e85f5ad965
        # save the attached .config to linux build tree
        make W=1 ARCH=i386 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@...el.com>

All warnings (new ones prefixed by >>):

   drivers/net/ethernet/google/gve/gve_tx_dqo.c: In function 'gve_tx_clean_pending_packets':
   drivers/net/ethernet/google/gve/gve_tx_dqo.c:88:27: warning: unused variable 'buf' [-Wunused-variable]
      88 |    struct gve_tx_dma_buf *buf = &cur_state->bufs[j];
         |                           ^~~
   drivers/net/ethernet/google/gve/gve_tx_dqo.c: In function 'gve_tx_add_skb_no_copy_dqo':
   drivers/net/ethernet/google/gve/gve_tx_dqo.c:496:26: warning: unused variable 'buf' [-Wunused-variable]
     496 |   struct gve_tx_dma_buf *buf =
         |                          ^~~
   drivers/net/ethernet/google/gve/gve_tx_dqo.c:515:26: warning: unused variable 'buf' [-Wunused-variable]
     515 |   struct gve_tx_dma_buf *buf =
         |                          ^~~
   drivers/net/ethernet/google/gve/gve_tx_dqo.c:556:26: warning: unused variable 'buf' [-Wunused-variable]
     556 |   struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
         |                          ^~~
   drivers/net/ethernet/google/gve/gve_tx_dqo.c: In function 'remove_from_list':
>> drivers/net/ethernet/google/gve/gve_tx_dqo.c:730:6: warning: variable 'index' set but not used [-Wunused-but-set-variable]
     730 |  s16 index, prev_index, next_index;
         |      ^~~~~
   drivers/net/ethernet/google/gve/gve_tx_dqo.c: In function 'gve_unmap_packet':
>> drivers/net/ethernet/google/gve/gve_tx_dqo.c:753:25: warning: variable 'buf' set but not used [-Wunused-but-set-variable]
     753 |  struct gve_tx_dma_buf *buf;
         |                         ^~~
   In file included from include/linux/printk.h:7,
                    from include/linux/kernel.h:17,
                    from arch/x86/include/asm/percpu.h:27,
                    from arch/x86/include/asm/current.h:6,
                    from include/linux/sched.h:12,
                    from include/linux/ratelimit.h:6,
                    from include/linux/dev_printk.h:16,
                    from include/linux/device.h:15,
                    from include/linux/dma-mapping.h:7,
                    from drivers/net/ethernet/google/gve/gve.h:10,
                    from drivers/net/ethernet/google/gve/gve_tx_dqo.c:7:
   drivers/net/ethernet/google/gve/gve_tx_dqo.c: In function 'remove_miss_completions':
>> include/linux/kern_levels.h:5:18: warning: format '%ld' expects argument of type 'long int', but argument 3 has type 'int' [-Wformat=]
       5 | #define KERN_SOH "\001"  /* ASCII Start Of Header */
         |                  ^~~~~~
   include/linux/kern_levels.h:11:18: note: in expansion of macro 'KERN_SOH'
      11 | #define KERN_ERR KERN_SOH "3" /* error conditions */
         |                  ^~~~~~~~
   include/linux/printk.h:343:9: note: in expansion of macro 'KERN_ERR'
     343 |  printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
         |         ^~~~~~~~
   include/linux/net.h:247:3: note: in expansion of macro 'pr_err'
     247 |   function(__VA_ARGS__);    \
         |   ^~~~~~~~
   include/linux/net.h:257:2: note: in expansion of macro 'net_ratelimited_function'
     257 |  net_ratelimited_function(pr_err, fmt, ##__VA_ARGS__)
         |  ^~~~~~~~~~~~~~~~~~~~~~~~
   drivers/net/ethernet/google/gve/gve_tx_dqo.c:893:3: note: in expansion of macro 'net_err_ratelimited'
     893 |   net_err_ratelimited("%s: No reinjection completion was received for: %ld.\n",
         |   ^~~~~~~~~~~~~~~~~~~
   drivers/net/ethernet/google/gve/gve_tx_dqo.c:893:74: note: format string is defined here
     893 |   net_err_ratelimited("%s: No reinjection completion was received for: %ld.\n",
         |                                                                        ~~^
         |                                                                          |
         |                                                                          long int
         |                                                                        %d


vim +/index +730 drivers/net/ethernet/google/gve/gve_tx_dqo.c

   447	
   448	/* Returns 0 on success, or < 0 on error.
   449	 *
   450	 * Before this function is called, the caller must ensure
   451	 * gve_has_pending_packet(tx) returns true.
   452	 */
   453	static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
   454					      struct sk_buff *skb)
   455	{
   456		const struct skb_shared_info *shinfo = skb_shinfo(skb);
   457		const bool is_gso = skb_is_gso(skb);
   458		u32 desc_idx = tx->dqo_tx.tail;
   459	
   460		struct gve_tx_pending_packet_dqo *pending_packet;
   461		struct gve_tx_metadata_dqo metadata;
   462		s16 completion_tag;
   463		int i;
   464	
   465		pending_packet = gve_alloc_pending_packet(tx);
   466		pending_packet->skb = skb;
   467		pending_packet->num_bufs = 0;
   468		completion_tag = pending_packet - tx->dqo.pending_packets;
   469	
   470		gve_extract_tx_metadata_dqo(skb, &metadata);
   471		if (is_gso) {
   472			int header_len = gve_prep_tso(skb);
   473	
   474			if (unlikely(header_len < 0))
   475				goto err;
   476	
   477			gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx,
   478						 skb, &metadata, header_len);
   479			desc_idx = (desc_idx + 1) & tx->mask;
   480		}
   481	
   482		gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx,
   483					     &metadata);
   484		desc_idx = (desc_idx + 1) & tx->mask;
   485	
   486		/* Note: HW requires that the size of a non-TSO packet be within the
   487		 * range of [17, 9728].
   488		 *
   489		 * We don't double check because
   490		 * - We limited `netdev->min_mtu` to ETH_MIN_MTU.
   491		 * - Hypervisor won't allow MTU larger than 9216.
   492		 */
   493	
   494		/* Map the linear portion of skb */
   495		{
   496			struct gve_tx_dma_buf *buf =
   497				&pending_packet->bufs[pending_packet->num_bufs];
   498			u32 len = skb_headlen(skb);
   499			dma_addr_t addr;
   500	
   501			addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
   502			if (unlikely(dma_mapping_error(tx->dev, addr)))
   503				goto err;
   504	
   505			dma_unmap_len_set(buf, len, len);
   506			dma_unmap_addr_set(buf, dma, addr);
   507			++pending_packet->num_bufs;
   508	
   509			gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
   510						 completion_tag,
   511						 /*eop=*/shinfo->nr_frags == 0, is_gso);
   512		}
   513	
   514		for (i = 0; i < shinfo->nr_frags; i++) {
   515			struct gve_tx_dma_buf *buf =
   516				&pending_packet->bufs[pending_packet->num_bufs];
   517			const skb_frag_t *frag = &shinfo->frags[i];
   518			bool is_eop = i == (shinfo->nr_frags - 1);
   519			u32 len = skb_frag_size(frag);
   520			dma_addr_t addr;
   521	
   522			addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
   523			if (unlikely(dma_mapping_error(tx->dev, addr)))
   524				goto err;
   525	
   526			dma_unmap_len_set(buf, len, len);
   527			dma_unmap_addr_set(buf, dma, addr);
   528			++pending_packet->num_bufs;
   529	
   530			gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
   531						 completion_tag, is_eop, is_gso);
   532		}
   533	
   534		/* Commit the changes to our state */
   535		tx->dqo_tx.tail = desc_idx;
   536	
   537		/* Request a descriptor completion on the last descriptor of the
   538		 * packet if we are allowed to by the HW enforced interval.
   539		 */
   540		{
   541			u32 last_desc_idx = (desc_idx - 1) & tx->mask;
   542			u32 last_report_event_interval =
   543				(last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
   544	
   545			if (unlikely(last_report_event_interval >=
   546				     GVE_TX_MIN_RE_INTERVAL)) {
   547				tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
   548				tx->dqo_tx.last_re_idx = last_desc_idx;
   549			}
   550		}
   551	
   552		return 0;
   553	
   554	err:
   555		for (i = 0; i < pending_packet->num_bufs; i++) {
 > 556			struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
   557	
   558			if (i == 0) {
   559				dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma),
   560						 dma_unmap_len(buf, len),
   561						 DMA_TO_DEVICE);
   562			} else {
   563				dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma),
   564					       dma_unmap_len(buf, len), DMA_TO_DEVICE);
   565			}
   566		}
   567	
   568		pending_packet->skb = NULL;
   569		pending_packet->num_bufs = 0;
   570		gve_free_pending_packet(tx, pending_packet);
   571	
   572		return -1;
   573	}
   574	
   575	static int gve_num_descs_per_buf(size_t size)
   576	{
   577		return DIV_ROUND_UP(size, GVE_TX_MAX_BUF_SIZE_DQO);
   578	}
   579	
   580	static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
   581	{
   582		const struct skb_shared_info *shinfo = skb_shinfo(skb);
   583		int num_descs;
   584		int i;
   585	
   586		num_descs = gve_num_descs_per_buf(skb_headlen(skb));
   587	
   588		for (i = 0; i < shinfo->nr_frags; i++) {
   589			unsigned int frag_size = skb_frag_size(&shinfo->frags[i]);
   590	
   591			num_descs += gve_num_descs_per_buf(frag_size);
   592		}
   593	
   594		return num_descs;
   595	}
   596	
   597	/* Returns true if HW is capable of sending TSO represented by `skb`.
   598	 *
   599	 * Each segment must not span more than GVE_TX_MAX_DATA_DESCS buffers.
   600	 * - The header is counted as one buffer for every single segment.
   601	 * - A buffer which is split between two segments is counted for both.
   602	 * - If a buffer contains both header and payload, it is counted as two buffers.
   603	 */
   604	static bool gve_can_send_tso(const struct sk_buff *skb)
   605	{
   606		const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
   607		const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
   608		const struct skb_shared_info *shinfo = skb_shinfo(skb);
   609		const int gso_size = shinfo->gso_size;
   610		int cur_seg_num_bufs;
   611		int cur_seg_size;
   612		int i;
   613	
   614		cur_seg_size = skb_headlen(skb) - header_len;
   615		cur_seg_num_bufs = cur_seg_size > 0;
   616	
   617		for (i = 0; i < shinfo->nr_frags; i++) {
   618			if (cur_seg_size >= gso_size) {
   619				cur_seg_size %= gso_size;
   620				cur_seg_num_bufs = cur_seg_size > 0;
   621			}
   622	
   623			if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
   624				return false;
   625	
   626			cur_seg_size += skb_frag_size(&shinfo->frags[i]);
   627		}
   628	
   629		return true;
   630	}
   631	
   632	/* Attempt to transmit specified SKB.
   633	 *
   634	 * Returns 0 if the SKB was transmitted or dropped.
   635	 * Returns -1 if there is not currently enough space to transmit the SKB.
   636	 */
   637	static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
   638				  struct sk_buff *skb)
   639	{
   640		int num_buffer_descs;
   641		int total_num_descs;
   642	
   643		if (skb_is_gso(skb)) {
   644			/* If TSO doesn't meet HW requirements, attempt to linearize the
   645			 * packet.
   646			 */
   647			if (unlikely(!gve_can_send_tso(skb) &&
   648				     skb_linearize(skb) < 0)) {
   649				net_err_ratelimited("%s: Failed to transmit TSO packet\n",
   650						    priv->dev->name);
   651				goto drop;
   652			}
   653	
   654			num_buffer_descs = gve_num_buffer_descs_needed(skb);
   655		} else {
   656			num_buffer_descs = gve_num_buffer_descs_needed(skb);
   657	
   658			if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
   659				if (unlikely(skb_linearize(skb) < 0))
   660					goto drop;
   661	
   662				num_buffer_descs = 1;
   663			}
   664		}
   665	
   666		/* Metadata + (optional TSO) + data descriptors. */
   667		total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
   668		if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
   669				GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP))) {
   670			return -1;
   671		}
   672	
   673		if (unlikely(gve_tx_add_skb_no_copy_dqo(tx, skb) < 0))
   674			goto drop;
   675	
   676		netdev_tx_sent_queue(tx->netdev_txq, skb->len);
   677		skb_tx_timestamp(skb);
   678		return 0;
   679	
   680	drop:
   681		tx->dropped_pkt++;
   682		dev_kfree_skb_any(skb);
   683		return 0;
   684	}
   685	
   686	/* Transmit a given skb and ring the doorbell. */
   687	netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
   688	{
   689		struct gve_priv *priv = netdev_priv(dev);
   690		struct gve_tx_ring *tx;
   691	
   692		tx = &priv->tx[skb_get_queue_mapping(skb)];
   693		if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) {
   694			/* We need to ring the txq doorbell -- we have stopped the Tx
   695			 * queue for want of resources, but prior calls to gve_tx()
   696			 * may have added descriptors without ringing the doorbell.
   697			 */
   698			gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
   699			return NETDEV_TX_BUSY;
   700		}
   701	
   702		if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
   703			return NETDEV_TX_OK;
   704	
   705		gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
   706		return NETDEV_TX_OK;
   707	}
   708	
   709	static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
   710				struct gve_tx_pending_packet_dqo *pending_packet)
   711	{
   712		s16 old_tail, index;
   713	
   714		index = pending_packet - tx->dqo.pending_packets;
   715		old_tail = list->tail;
   716		list->tail = index;
   717		if (old_tail == -1)
   718			list->head = index;
   719		else
   720			tx->dqo.pending_packets[old_tail].next = index;
   721	
   722		pending_packet->next = -1;
   723		pending_packet->prev = old_tail;
   724	}
   725	
   726	static void remove_from_list(struct gve_tx_ring *tx,
   727				     struct gve_index_list *list,
   728				     struct gve_tx_pending_packet_dqo *pending_packet)
   729	{
 > 730		s16 index, prev_index, next_index;
   731	
   732		index = pending_packet - tx->dqo.pending_packets;
   733		prev_index = pending_packet->prev;
   734		next_index = pending_packet->next;
   735	
   736		if (prev_index == -1) {
   737			/* Node is head */
   738			list->head = next_index;
   739		} else {
   740			tx->dqo.pending_packets[prev_index].next = next_index;
   741		}
   742		if (next_index == -1) {
   743			/* Node is tail */
   744			list->tail = prev_index;
   745		} else {
   746			tx->dqo.pending_packets[next_index].prev = prev_index;
   747		}
   748	}
   749	
   750	static void gve_unmap_packet(struct device *dev,
   751				     struct gve_tx_pending_packet_dqo *pending_packet)
   752	{
 > 753		struct gve_tx_dma_buf *buf;
   754		int i;
   755	
   756		/* SKB linear portion is guaranteed to be mapped */
   757		buf = &pending_packet->bufs[0];
   758		dma_unmap_single(dev, dma_unmap_addr(buf, dma),
   759				 dma_unmap_len(buf, len), DMA_TO_DEVICE);
   760		for (i = 1; i < pending_packet->num_bufs; i++) {
   761			buf = &pending_packet->bufs[i];
   762			dma_unmap_page(dev, dma_unmap_addr(buf, dma),
   763				       dma_unmap_len(buf, len), DMA_TO_DEVICE);
   764		}
   765		pending_packet->num_bufs = 0;
   766	}
   767	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

Download attachment ".config.gz" of type "application/gzip" (48305 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ