[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202110071616.ZE7mvL1l-lkp@intel.com>
Date: Thu, 7 Oct 2021 16:19:23 +0800
From: kernel test robot <lkp@...el.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: kbuild-all@...ts.01.org, linux-kernel@...r.kernel.org
Subject: [peterz-queue:sched/cleanup 39/40] kernel/rcu/tree.c:1337:5: error:
implicit declaration of function 'irq_work_queue_remote'; did you mean
'irq_work_queue_on'?
tree: https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git sched/cleanup
head: c87bd97277563de19b33bf10e866066c5bafdce0
commit: 65f1ba8c0b5c027b66655d098c3b61f901260b54 [39/40] rcu/tree: Use irq_work_queue_remote()
config: x86_64-randconfig-a015-20211004 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build):
# https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git/commit/?id=65f1ba8c0b5c027b66655d098c3b61f901260b54
git remote add peterz-queue https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git
git fetch --no-tags peterz-queue sched/cleanup
git checkout 65f1ba8c0b5c027b66655d098c3b61f901260b54
# save the attached .config to linux build tree
make W=1 ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@...el.com>
All errors (new ones prefixed by >>):
kernel/rcu/tree.c: In function 'rcu_implicit_dynticks_qs':
>> kernel/rcu/tree.c:1337:5: error: implicit declaration of function 'irq_work_queue_remote'; did you mean 'irq_work_queue_on'? [-Werror=implicit-function-declaration]
1337 | irq_work_queue_remote(rdp->cpu, &rdp->rcu_iw);
| ^~~~~~~~~~~~~~~~~~~~~
| irq_work_queue_on
cc1: some warnings being treated as errors
vim +1337 kernel/rcu/tree.c
1212
1213 /*
1214 * Return true if the specified CPU has passed through a quiescent
1215 * state by virtue of being in or having passed through an dynticks
1216 * idle state since the last call to dyntick_save_progress_counter()
1217 * for this same CPU, or by virtue of having been offline.
1218 */
1219 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1220 {
1221 unsigned long jtsq;
1222 bool *rnhqp;
1223 bool *ruqp;
1224 struct rcu_node *rnp = rdp->mynode;
1225
1226 raw_lockdep_assert_held_rcu_node(rnp);
1227
1228 /*
1229 * If the CPU passed through or entered a dynticks idle phase with
1230 * no active irq/NMI handlers, then we can safely pretend that the CPU
1231 * already acknowledged the request to pass through a quiescent
1232 * state. Either way, that CPU cannot possibly be in an RCU
1233 * read-side critical section that started before the beginning
1234 * of the current RCU grace period.
1235 */
1236 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1237 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1238 rcu_gpnum_ovf(rnp, rdp);
1239 return 1;
1240 }
1241
1242 /*
1243 * Complain if a CPU that is considered to be offline from RCU's
1244 * perspective has not yet reported a quiescent state. After all,
1245 * the offline CPU should have reported a quiescent state during
1246 * the CPU-offline process, or, failing that, by rcu_gp_init()
1247 * if it ran concurrently with either the CPU going offline or the
1248 * last task on a leaf rcu_node structure exiting its RCU read-side
1249 * critical section while all CPUs corresponding to that structure
1250 * are offline. This added warning detects bugs in any of these
1251 * code paths.
1252 *
1253 * The rcu_node structure's ->lock is held here, which excludes
1254 * the relevant portions the CPU-hotplug code, the grace-period
1255 * initialization code, and the rcu_read_unlock() code paths.
1256 *
1257 * For more detail, please refer to the "Hotplug CPU" section
1258 * of RCU's Requirements documentation.
1259 */
1260 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1261 bool onl;
1262 struct rcu_node *rnp1;
1263
1264 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1265 __func__, rnp->grplo, rnp->grphi, rnp->level,
1266 (long)rnp->gp_seq, (long)rnp->completedqs);
1267 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1268 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1269 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1270 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1271 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1272 __func__, rdp->cpu, ".o"[onl],
1273 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1274 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1275 return 1; /* Break things loose after complaining. */
1276 }
1277
1278 /*
1279 * A CPU running for an extended time within the kernel can
1280 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1281 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1282 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
1283 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1284 * variable are safe because the assignments are repeated if this
1285 * CPU failed to pass through a quiescent state. This code
1286 * also checks .jiffies_resched in case jiffies_to_sched_qs
1287 * is set way high.
1288 */
1289 jtsq = READ_ONCE(jiffies_to_sched_qs);
1290 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1291 rnhqp = per_cpu_ptr(&rcu_data.rcu_need_heavy_qs, rdp->cpu);
1292 if (!READ_ONCE(*rnhqp) &&
1293 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1294 time_after(jiffies, rcu_state.jiffies_resched) ||
1295 rcu_state.cbovld)) {
1296 WRITE_ONCE(*rnhqp, true);
1297 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1298 smp_store_release(ruqp, true);
1299 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1300 WRITE_ONCE(*ruqp, true);
1301 }
1302
1303 /*
1304 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1305 * The above code handles this, but only for straight cond_resched().
1306 * And some in-kernel loops check need_resched() before calling
1307 * cond_resched(), which defeats the above code for CPUs that are
1308 * running in-kernel with scheduling-clock interrupts disabled.
1309 * So hit them over the head with the resched_cpu() hammer!
1310 */
1311 if (tick_nohz_full_cpu(rdp->cpu) &&
1312 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1313 rcu_state.cbovld)) {
1314 WRITE_ONCE(*ruqp, true);
1315 resched_cpu(rdp->cpu);
1316 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1317 }
1318
1319 /*
1320 * If more than halfway to RCU CPU stall-warning time, invoke
1321 * resched_cpu() more frequently to try to loosen things up a bit.
1322 * Also check to see if the CPU is getting hammered with interrupts,
1323 * but only once per grace period, just to keep the IPIs down to
1324 * a dull roar.
1325 */
1326 if (time_after(jiffies, rcu_state.jiffies_resched)) {
1327 if (time_after(jiffies,
1328 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1329 resched_cpu(rdp->cpu);
1330 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1331 }
1332 if (!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1333 (rnp->ffmask & rdp->grpmask)) {
1334 rdp->rcu_iw_gp_seq = rnp->gp_seq;
1335 if (likely(rdp->cpu != smp_processor_id())) {
1336 rdp->rcu_iw_pending = true;
> 1337 irq_work_queue_remote(rdp->cpu, &rdp->rcu_iw);
1338 }
1339 }
1340 }
1341
1342 return 0;
1343 }
1344
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Download attachment ".config.gz" of type "application/gzip" (30363 bytes)
Powered by blists - more mailing lists