lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202310061208.tATL34LR-lkp@intel.com>
Date: Fri, 6 Oct 2023 12:38:23 +0800
From: kernel test robot <lkp@...el.com>
To: Stanislav Fomichev <sdf@...gle.com>, bpf@...r.kernel.org
Cc: llvm@...ts.linux.dev, oe-kbuild-all@...ts.linux.dev, ast@...nel.org,
	daniel@...earbox.net, andrii@...nel.org, martin.lau@...ux.dev,
	song@...nel.org, yhs@...com, john.fastabend@...il.com,
	kpsingh@...nel.org, sdf@...gle.com, haoluo@...gle.com,
	jolsa@...nel.org, kuba@...nel.org, toke@...nel.org,
	willemb@...gle.com, dsahern@...nel.org, magnus.karlsson@...el.com,
	bjorn@...nel.org, maciej.fijalkowski@...el.com, hawk@...nel.org,
	yoong.siang.song@...el.com, netdev@...r.kernel.org,
	xdp-hints@...-project.net
Subject: Re: [PATCH bpf-next v3 05/10] net: stmmac: Add Tx HWTS support to
 XDP ZC

Hi Stanislav,

kernel test robot noticed the following build errors:

[auto build test ERROR on bpf-next/master]

url:    https://github.com/intel-lab-lkp/linux/commits/Stanislav-Fomichev/xsk-Support-tx_metadata_len/20231004-040718
base:   https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link:    https://lore.kernel.org/r/20231003200522.1914523-6-sdf%40google.com
patch subject: [PATCH bpf-next v3 05/10] net: stmmac: Add Tx HWTS support to XDP ZC
config: riscv-rv32_defconfig (https://download.01.org/0day-ci/archive/20231006/202310061208.tATL34LR-lkp@intel.com/config)
compiler: clang version 17.0.0 (https://github.com/llvm/llvm-project.git 4a5ac14ee968ff0ad5d2cc1ffa0299048db4c88a)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231006/202310061208.tATL34LR-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@...el.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202310061208.tATL34LR-lkp@intel.com/

All errors (new ones prefixed by >>):

>> drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:2554:3: error: call to undeclared function 'xsk_tx_metadata_to_compl'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
    2554 |                 xsk_tx_metadata_to_compl(meta, &tx_q->tx_skbuff_dma[entry].xsk_meta);
         |                 ^
   drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:2554:3: note: did you mean 'xsk_tx_metadata_complete'?
   include/net/xdp_sock.h:185:20: note: 'xsk_tx_metadata_complete' declared here
     185 | static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
         |                    ^
   1 error generated.


vim +/xsk_tx_metadata_to_compl +2554 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

  2464	
  2465	static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
  2466	{
  2467		struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
  2468		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  2469		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
  2470		struct xsk_buff_pool *pool = tx_q->xsk_pool;
  2471		unsigned int entry = tx_q->cur_tx;
  2472		struct dma_desc *tx_desc = NULL;
  2473		struct xdp_desc xdp_desc;
  2474		bool work_done = true;
  2475		u32 tx_set_ic_bit = 0;
  2476		unsigned long flags;
  2477	
  2478		/* Avoids TX time-out as we are sharing with slow path */
  2479		txq_trans_cond_update(nq);
  2480	
  2481		budget = min(budget, stmmac_tx_avail(priv, queue));
  2482	
  2483		while (budget-- > 0) {
  2484			struct stmmac_metadata_request meta_req;
  2485			struct xsk_tx_metadata *meta = NULL;
  2486			dma_addr_t dma_addr;
  2487			bool set_ic;
  2488	
  2489			/* We are sharing with slow path and stop XSK TX desc submission when
  2490			 * available TX ring is less than threshold.
  2491			 */
  2492			if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
  2493			    !netif_carrier_ok(priv->dev)) {
  2494				work_done = false;
  2495				break;
  2496			}
  2497	
  2498			if (!xsk_tx_peek_desc(pool, &xdp_desc))
  2499				break;
  2500	
  2501			if (likely(priv->extend_desc))
  2502				tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
  2503			else if (tx_q->tbs & STMMAC_TBS_AVAIL)
  2504				tx_desc = &tx_q->dma_entx[entry].basic;
  2505			else
  2506				tx_desc = tx_q->dma_tx + entry;
  2507	
  2508			dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
  2509			meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
  2510			xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
  2511	
  2512			tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
  2513	
  2514			/* To return XDP buffer to XSK pool, we simple call
  2515			 * xsk_tx_completed(), so we don't need to fill up
  2516			 * 'buf' and 'xdpf'.
  2517			 */
  2518			tx_q->tx_skbuff_dma[entry].buf = 0;
  2519			tx_q->xdpf[entry] = NULL;
  2520	
  2521			tx_q->tx_skbuff_dma[entry].map_as_page = false;
  2522			tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
  2523			tx_q->tx_skbuff_dma[entry].last_segment = true;
  2524			tx_q->tx_skbuff_dma[entry].is_jumbo = false;
  2525	
  2526			stmmac_set_desc_addr(priv, tx_desc, dma_addr);
  2527	
  2528			tx_q->tx_count_frames++;
  2529	
  2530			if (!priv->tx_coal_frames[queue])
  2531				set_ic = false;
  2532			else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
  2533				set_ic = true;
  2534			else
  2535				set_ic = false;
  2536	
  2537			meta_req.priv = priv;
  2538			meta_req.tx_desc = tx_desc;
  2539			meta_req.set_ic = &set_ic;
  2540			xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops, &meta_req);
  2541	
  2542			if (set_ic) {
  2543				tx_q->tx_count_frames = 0;
  2544				stmmac_set_tx_ic(priv, tx_desc);
  2545				tx_set_ic_bit++;
  2546			}
  2547	
  2548			stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
  2549					       true, priv->mode, true, true,
  2550					       xdp_desc.len);
  2551	
  2552			stmmac_enable_dma_transmission(priv, priv->ioaddr);
  2553	
> 2554			xsk_tx_metadata_to_compl(meta, &tx_q->tx_skbuff_dma[entry].xsk_meta);
  2555	
  2556			tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
  2557			entry = tx_q->cur_tx;
  2558		}
  2559		flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
  2560		txq_stats->tx_set_ic_bit += tx_set_ic_bit;
  2561		u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
  2562	
  2563		if (tx_desc) {
  2564			stmmac_flush_tx_descriptors(priv, queue);
  2565			xsk_tx_release(pool);
  2566		}
  2567	
  2568		/* Return true if all of the 3 conditions are met
  2569		 *  a) TX Budget is still available
  2570		 *  b) work_done = true when XSK TX desc peek is empty (no more
  2571		 *     pending XSK TX for transmission)
  2572		 */
  2573		return !!budget && work_done;
  2574	}
  2575	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ