[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202109102328.2HUc7lcO-lkp@intel.com>
Date: Fri, 10 Sep 2021 23:36:33 +0800
From: kernel test robot <lkp@...el.com>
To: Atish Patra <atish.patra@....com>
Cc: llvm@...ts.linux.dev, kbuild-all@...ts.01.org,
Atish Patra <Atish.Patra@....com>, linux-kernel@...r.kernel.org
Subject: [atishp04:sbi_pmu_v3_0day_bot 9/11]
drivers/perf/riscv_pmu_sbi.c:471:20: warning: stack frame size (1344)
exceeds limit (1024) in function 'pmu_sbi_ovf_handler'
tree: https://github.com/atishp04/linux sbi_pmu_v3_0day_bot
head: 9cf50156bc48c6cb6ee7760c14b4b1d4e804050d
commit: eeccd9f47f1980fea12c614af3dcd48c788f52c4 [9/11] RISC-V: Add interrupt support for perf
config: riscv-randconfig-r031-20210910 (attached as .config)
compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project 261cbe98c38f8c1ee1a482fe76511110e790f58a)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install riscv cross compiling tool for clang build
# apt-get install binutils-riscv64-linux-gnu
# https://github.com/atishp04/linux/commit/eeccd9f47f1980fea12c614af3dcd48c788f52c4
git remote add atishp04 https://github.com/atishp04/linux
git fetch --no-tags atishp04 sbi_pmu_v3_0day_bot
git checkout eeccd9f47f1980fea12c614af3dcd48c788f52c4
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=riscv
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@...el.com>
All warnings (new ones prefixed by >>):
>> drivers/perf/riscv_pmu_sbi.c:471:20: warning: stack frame size (1344) exceeds limit (1024) in function 'pmu_sbi_ovf_handler' [-Wframe-larger-than]
static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
^
1 warning generated.
vim +/pmu_sbi_ovf_handler +471 drivers/perf/riscv_pmu_sbi.c
470
> 471 static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
472 {
473 struct perf_sample_data data;
474 struct pt_regs *regs;
475 struct hw_perf_event *hw_evt;
476 union sbi_pmu_ctr_info *info;
477 int lidx, hidx, fidx;
478 struct riscv_pmu *pmu;
479 struct perf_event *event;
480 struct cpu_hw_events *hwc = dev;
481 unsigned long overflow = csr_read(CSR_SSCOUNTOVF);
482 uint64_t ival[RISCV_MAX_COUNTERS] = {0};
483
484 /* No overflow bit is set */
485 if (!overflow) {
486 csr_clear(CSR_SIP, SIP_LCOFIP);
487 return IRQ_NONE;
488 }
489
490 fidx = find_first_bit(hwc->used_event_ctrs, RISCV_MAX_COUNTERS);
491 event = hwc->events[fidx];
492 if (!event) {
493 csr_clear(CSR_SIP, SIP_LCOFIP);
494 pr_warn("None of the counters are enabled\n");
495 return IRQ_NONE;
496 }
497
498 pmu = to_riscv_pmu(event->pmu);
499 pmu_sbi_stop_all(pmu);
500
501 /**
502 * Overflow interrupt pending bit should only be cleared after stopping
503 * all the counters to avoid any race condition.
504 */
505 regs = get_irq_regs();
506 csr_clear(CSR_SIP, SIP_LCOFIP);
507
508 for_each_set_bit(lidx, hwc->used_event_ctrs, RISCV_MAX_COUNTERS) {
509 struct perf_event *event = hwc->events[lidx];
510
511 if (!event)
512 continue;
513 info = &pmu_ctr_list[lidx];
514 if (!info)
515 continue;
516
517 /* compute hardware counter index */
518 hidx = info->csr - CSR_CYCLE;
519 /* check if the corresponding bit is set in sscountovf */
520 if (!(overflow & (1 << hidx)))
521 continue;
522
523 hw_evt = &event->hw;
524 riscv_pmu_event_update(event);
525 perf_sample_data_init(&data, 0, hw_evt->last_period);
526 if (!riscv_pmu_event_set_period(event, &ival[lidx]))
527 continue;
528
529 /*
530 * Perf event overflow will queue the processing of the event as
531 * an irq_work which will be taken care of in the handling of
532 * IPI_IRQ_WORK.
533 */
534 if (perf_event_overflow(event, &data, regs))
535 pmu_sbi_ctr_stop(event, 0);
536 }
537 pmu_sbi_start_all(pmu, ival);
538
539 return IRQ_HANDLED;
540 }
541
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Download attachment ".config.gz" of type "application/gzip" (34163 bytes)
Powered by blists - more mailing lists