lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Thu, 5 Mar 2020 09:59:45 +0800
From:   kbuild test robot <lkp@...el.com>
To:     "Paul E. McKenney" <paulmck@...nel.org>
Cc:     kbuild-all@...ts.01.org, clang-built-linux@...glegroups.com,
        linux-kernel@...r.kernel.org
Subject: [rcu:dev.2020.02.29a 43/43] kernel/rcu/tasks.h:212:20: error: no
 member named 'rcu_tasks_holdout' in 'struct task_struct'

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev.2020.02.29a
head:   61f7110d6b78f4c84ea5d5480185740840889af7
commit: 61f7110d6b78f4c84ea5d5480185740840889af7 [43/43] rcu-tasks: Add an RCU-tasks rude variant
config: x86_64-defconfig (attached as .config)
compiler: clang version 11.0.0 (git://gitmirror/llvm_project f95095e9f612084fc95c5e797c2ab5eb8dd674b8)
reproduce:
        # FIXME the reproduce steps for clang is not ready yet

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@...el.com>

All error/warnings (new ones prefixed by >>):

   In file included from kernel/rcu/update.c:562:
>> kernel/rcu/tasks.h:212:20: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
           if (!READ_ONCE(t->rcu_tasks_holdout) ||
                          ~  ^
   include/linux/compiler.h:293:34: note: expanded from macro 'READ_ONCE'
   #define READ_ONCE(x) __READ_ONCE(x, 1)
                                    ^
   include/linux/compiler.h:285:17: note: expanded from macro '__READ_ONCE'
           union { typeof(x) __val; char __c[1]; } __u;                    \
                          ^
   In file included from kernel/rcu/update.c:562:
>> kernel/rcu/tasks.h:212:20: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
           if (!READ_ONCE(t->rcu_tasks_holdout) ||
                          ~  ^
   include/linux/compiler.h:293:34: note: expanded from macro 'READ_ONCE'
   #define READ_ONCE(x) __READ_ONCE(x, 1)
                                    ^
   include/linux/compiler.h:287:22: note: expanded from macro '__READ_ONCE'
                   __read_once_size(&(x), __u.__c, sizeof(x));             \
                                      ^
   In file included from kernel/rcu/update.c:562:
>> kernel/rcu/tasks.h:212:20: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
           if (!READ_ONCE(t->rcu_tasks_holdout) ||
                          ~  ^
   include/linux/compiler.h:293:34: note: expanded from macro 'READ_ONCE'
   #define READ_ONCE(x) __READ_ONCE(x, 1)
                                    ^
   include/linux/compiler.h:287:42: note: expanded from macro '__READ_ONCE'
                   __read_once_size(&(x), __u.__c, sizeof(x));             \
                                                          ^
   In file included from kernel/rcu/update.c:562:
>> kernel/rcu/tasks.h:212:20: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
           if (!READ_ONCE(t->rcu_tasks_holdout) ||
                          ~  ^
   include/linux/compiler.h:293:34: note: expanded from macro 'READ_ONCE'
   #define READ_ONCE(x) __READ_ONCE(x, 1)
                                    ^
   include/linux/compiler.h:289:30: note: expanded from macro '__READ_ONCE'
                   __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
                                              ^
   In file included from kernel/rcu/update.c:562:
>> kernel/rcu/tasks.h:212:20: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
           if (!READ_ONCE(t->rcu_tasks_holdout) ||
                          ~  ^
   include/linux/compiler.h:293:34: note: expanded from macro 'READ_ONCE'
   #define READ_ONCE(x) __READ_ONCE(x, 1)
                                    ^
   include/linux/compiler.h:289:50: note: expanded from macro '__READ_ONCE'
                   __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
                                                                  ^
   In file included from kernel/rcu/update.c:562:
>> kernel/rcu/tasks.h:212:6: error: invalid argument type 'void' to unary expression
           if (!READ_ONCE(t->rcu_tasks_holdout) ||
               ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>> kernel/rcu/tasks.h:213:9: error: no member named 'rcu_tasks_nvcsw' in 'struct task_struct'
               t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
               ~  ^
>> kernel/rcu/tasks.h:216:30: error: no member named 'rcu_tasks_idle_cpu' in 'struct task_struct'
                !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
                                    ~  ^
   kernel/rcu/tasks.h:217:17: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
                   WRITE_ONCE(t->rcu_tasks_holdout, false);
                              ~  ^
   include/linux/compiler.h:310:17: note: expanded from macro 'WRITE_ONCE'
           union { typeof(x) __val; char __c[1]; } __u =   \
                          ^
   In file included from kernel/rcu/update.c:562:
   kernel/rcu/tasks.h:217:17: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
                   WRITE_ONCE(t->rcu_tasks_holdout, false);
                              ~  ^
   include/linux/compiler.h:311:30: note: expanded from macro 'WRITE_ONCE'
                   { .__val = (__force typeof(x)) (val) }; \
                                              ^
   In file included from kernel/rcu/update.c:562:
   kernel/rcu/tasks.h:217:17: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
                   WRITE_ONCE(t->rcu_tasks_holdout, false);
                              ~  ^
   include/linux/compiler.h:312:22: note: expanded from macro 'WRITE_ONCE'
           __write_once_size(&(x), __u.__c, sizeof(x));    \
                               ^
   In file included from kernel/rcu/update.c:562:
   kernel/rcu/tasks.h:217:17: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
                   WRITE_ONCE(t->rcu_tasks_holdout, false);
                              ~  ^
   include/linux/compiler.h:312:42: note: expanded from macro 'WRITE_ONCE'
           __write_once_size(&(x), __u.__c, sizeof(x));    \
                                                   ^
   In file included from kernel/rcu/update.c:562:
>> kernel/rcu/tasks.h:218:21: error: no member named 'rcu_tasks_holdout_list' in 'struct task_struct'
                   list_del_init(&t->rcu_tasks_holdout_list);
                                  ~  ^
   kernel/rcu/tasks.h:233:7: error: no member named 'rcu_tasks_nvcsw' in 'struct task_struct'
                    t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
                    ~  ^
   include/linux/printk.h:300:35: note: expanded from macro 'pr_alert'
           printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
                                            ^~~~~~~~~~~
   In file included from kernel/rcu/update.c:562:
   kernel/rcu/tasks.h:233:37: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
                    t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
                                                  ~  ^
   include/linux/printk.h:300:35: note: expanded from macro 'pr_alert'
           printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
                                            ^~~~~~~~~~~
   In file included from kernel/rcu/update.c:562:
   kernel/rcu/tasks.h:234:7: error: no member named 'rcu_tasks_idle_cpu' in 'struct task_struct'
                    t->rcu_tasks_idle_cpu, cpu);
                    ~  ^
   include/linux/printk.h:300:35: note: expanded from macro 'pr_alert'
           printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
                                            ^~~~~~~~~~~
   In file included from kernel/rcu/update.c:562:
>> kernel/rcu/tasks.h:239:38: warning: declaration of 'struct rcu_tasks' will not be visible outside of this function [-Wvisibility]
   static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
                                        ^
   kernel/rcu/tasks.h:271:7: error: no member named 'rcu_tasks_nvcsw' in 'struct task_struct'
                           t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
                           ~  ^
   kernel/rcu/tasks.h:272:18: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
                           WRITE_ONCE(t->rcu_tasks_holdout, true);
                                      ~  ^
   include/linux/compiler.h:310:17: note: expanded from macro 'WRITE_ONCE'
           union { typeof(x) __val; char __c[1]; } __u =   \
                          ^
   In file included from kernel/rcu/update.c:562:
   kernel/rcu/tasks.h:272:18: error: no member named 'rcu_tasks_holdout' in 'struct task_struct'
                           WRITE_ONCE(t->rcu_tasks_holdout, true);
                                      ~  ^
   include/linux/compiler.h:311:30: note: expanded from macro 'WRITE_ONCE'
                   { .__val = (__force typeof(x)) (val) }; \
                                              ^
   fatal error: too many errors emitted, stopping now [-ferror-limit=]
   1 warning and 20 errors generated.

vim +212 kernel/rcu/tasks.h

6b80543d90000c Paul E. McKenney 2020-03-02  205  
6b80543d90000c Paul E. McKenney 2020-03-02  206  /* See if tasks are still holding out, complain if so. */
6b80543d90000c Paul E. McKenney 2020-03-02  207  static void check_holdout_task(struct task_struct *t,
6b80543d90000c Paul E. McKenney 2020-03-02  208  			       bool needreport, bool *firstreport)
6b80543d90000c Paul E. McKenney 2020-03-02  209  {
6b80543d90000c Paul E. McKenney 2020-03-02  210  	int cpu;
6b80543d90000c Paul E. McKenney 2020-03-02  211  
6b80543d90000c Paul E. McKenney 2020-03-02 @212  	if (!READ_ONCE(t->rcu_tasks_holdout) ||
6b80543d90000c Paul E. McKenney 2020-03-02 @213  	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
6b80543d90000c Paul E. McKenney 2020-03-02  214  	    !READ_ONCE(t->on_rq) ||
6b80543d90000c Paul E. McKenney 2020-03-02  215  	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
6b80543d90000c Paul E. McKenney 2020-03-02 @216  	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
6b80543d90000c Paul E. McKenney 2020-03-02  217  		WRITE_ONCE(t->rcu_tasks_holdout, false);
6b80543d90000c Paul E. McKenney 2020-03-02 @218  		list_del_init(&t->rcu_tasks_holdout_list);
6b80543d90000c Paul E. McKenney 2020-03-02  219  		put_task_struct(t);
6b80543d90000c Paul E. McKenney 2020-03-02  220  		return;
6b80543d90000c Paul E. McKenney 2020-03-02  221  	}
6b80543d90000c Paul E. McKenney 2020-03-02  222  	rcu_request_urgent_qs_task(t);
6b80543d90000c Paul E. McKenney 2020-03-02  223  	if (!needreport)
6b80543d90000c Paul E. McKenney 2020-03-02  224  		return;
6b80543d90000c Paul E. McKenney 2020-03-02  225  	if (*firstreport) {
6b80543d90000c Paul E. McKenney 2020-03-02  226  		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
6b80543d90000c Paul E. McKenney 2020-03-02  227  		*firstreport = false;
6b80543d90000c Paul E. McKenney 2020-03-02  228  	}
6b80543d90000c Paul E. McKenney 2020-03-02  229  	cpu = task_cpu(t);
6b80543d90000c Paul E. McKenney 2020-03-02  230  	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
6b80543d90000c Paul E. McKenney 2020-03-02  231  		 t, ".I"[is_idle_task(t)],
6b80543d90000c Paul E. McKenney 2020-03-02  232  		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
6b80543d90000c Paul E. McKenney 2020-03-02  233  		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
6b80543d90000c Paul E. McKenney 2020-03-02  234  		 t->rcu_tasks_idle_cpu, cpu);
6b80543d90000c Paul E. McKenney 2020-03-02  235  	sched_show_task(t);
6b80543d90000c Paul E. McKenney 2020-03-02  236  }
6b80543d90000c Paul E. McKenney 2020-03-02  237  
61f7110d6b78f4 Paul E. McKenney 2020-03-02  238  /* Wait for one RCU-tasks grace period. */
61f7110d6b78f4 Paul E. McKenney 2020-03-02 @239  static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
6b80543d90000c Paul E. McKenney 2020-03-02  240  {
6b80543d90000c Paul E. McKenney 2020-03-02  241  	struct task_struct *g, *t;
6b80543d90000c Paul E. McKenney 2020-03-02  242  	unsigned long lastreport;
6b80543d90000c Paul E. McKenney 2020-03-02  243  	LIST_HEAD(rcu_tasks_holdouts);
6b80543d90000c Paul E. McKenney 2020-03-02  244  	int fract;
6b80543d90000c Paul E. McKenney 2020-03-02  245  
6b80543d90000c Paul E. McKenney 2020-03-02  246  	/*
61f7110d6b78f4 Paul E. McKenney 2020-03-02  247  	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
61f7110d6b78f4 Paul E. McKenney 2020-03-02  248  	 * to complete.  Invoking synchronize_rcu() suffices because all
61f7110d6b78f4 Paul E. McKenney 2020-03-02  249  	 * these transitions occur with interrupts disabled.  Without this
61f7110d6b78f4 Paul E. McKenney 2020-03-02  250  	 * synchronize_rcu(), a read-side critical section that started
61f7110d6b78f4 Paul E. McKenney 2020-03-02  251  	 * before the grace period might be incorrectly seen as having
61f7110d6b78f4 Paul E. McKenney 2020-03-02  252  	 * started after the grace period.
6b80543d90000c Paul E. McKenney 2020-03-02  253  	 *
61f7110d6b78f4 Paul E. McKenney 2020-03-02  254  	 * This synchronize_rcu() also dispenses with the need for a
61f7110d6b78f4 Paul E. McKenney 2020-03-02  255  	 * memory barrier on the first store to t->rcu_tasks_holdout,
61f7110d6b78f4 Paul E. McKenney 2020-03-02  256  	 * as it forces the store to happen after the beginning of the
61f7110d6b78f4 Paul E. McKenney 2020-03-02  257  	 * grace period.
6b80543d90000c Paul E. McKenney 2020-03-02  258  	 */
6b80543d90000c Paul E. McKenney 2020-03-02  259  	synchronize_rcu();
6b80543d90000c Paul E. McKenney 2020-03-02  260  
6b80543d90000c Paul E. McKenney 2020-03-02  261  	/*
61f7110d6b78f4 Paul E. McKenney 2020-03-02  262  	 * There were callbacks, so we need to wait for an RCU-tasks
61f7110d6b78f4 Paul E. McKenney 2020-03-02  263  	 * grace period.  Start off by scanning the task list for tasks
61f7110d6b78f4 Paul E. McKenney 2020-03-02  264  	 * that are not already voluntarily blocked.  Mark these tasks
61f7110d6b78f4 Paul E. McKenney 2020-03-02  265  	 * and make a list of them in rcu_tasks_holdouts.
6b80543d90000c Paul E. McKenney 2020-03-02  266  	 */
6b80543d90000c Paul E. McKenney 2020-03-02  267  	rcu_read_lock();
6b80543d90000c Paul E. McKenney 2020-03-02  268  	for_each_process_thread(g, t) {
61f7110d6b78f4 Paul E. McKenney 2020-03-02  269  		if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
6b80543d90000c Paul E. McKenney 2020-03-02  270  			get_task_struct(t);
6b80543d90000c Paul E. McKenney 2020-03-02  271  			t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
6b80543d90000c Paul E. McKenney 2020-03-02  272  			WRITE_ONCE(t->rcu_tasks_holdout, true);
6b80543d90000c Paul E. McKenney 2020-03-02  273  			list_add(&t->rcu_tasks_holdout_list,
6b80543d90000c Paul E. McKenney 2020-03-02  274  				 &rcu_tasks_holdouts);
6b80543d90000c Paul E. McKenney 2020-03-02  275  		}
6b80543d90000c Paul E. McKenney 2020-03-02  276  	}
6b80543d90000c Paul E. McKenney 2020-03-02  277  	rcu_read_unlock();
6b80543d90000c Paul E. McKenney 2020-03-02  278  
6b80543d90000c Paul E. McKenney 2020-03-02  279  	/*
61f7110d6b78f4 Paul E. McKenney 2020-03-02  280  	 * Wait for tasks that are in the process of exiting.  This
61f7110d6b78f4 Paul E. McKenney 2020-03-02  281  	 * does only part of the job, ensuring that all tasks that were
61f7110d6b78f4 Paul E. McKenney 2020-03-02  282  	 * previously exiting reach the point where they have disabled
61f7110d6b78f4 Paul E. McKenney 2020-03-02  283  	 * preemption, allowing the later synchronize_rcu() to finish
61f7110d6b78f4 Paul E. McKenney 2020-03-02  284  	 * the job.
6b80543d90000c Paul E. McKenney 2020-03-02  285  	 */
6b80543d90000c Paul E. McKenney 2020-03-02  286  	synchronize_srcu(&tasks_rcu_exit_srcu);
6b80543d90000c Paul E. McKenney 2020-03-02  287  
6b80543d90000c Paul E. McKenney 2020-03-02  288  	/*
61f7110d6b78f4 Paul E. McKenney 2020-03-02  289  	 * Each pass through the following loop scans the list of holdout
61f7110d6b78f4 Paul E. McKenney 2020-03-02  290  	 * tasks, removing any that are no longer holdouts.  When the list
61f7110d6b78f4 Paul E. McKenney 2020-03-02  291  	 * is empty, we are done.
6b80543d90000c Paul E. McKenney 2020-03-02  292  	 */
6b80543d90000c Paul E. McKenney 2020-03-02  293  	lastreport = jiffies;
6b80543d90000c Paul E. McKenney 2020-03-02  294  
61f7110d6b78f4 Paul E. McKenney 2020-03-02  295  	/* Start off with HZ/10 wait and slowly back off to 1 HZ wait. */
6b80543d90000c Paul E. McKenney 2020-03-02  296  	fract = 10;
6b80543d90000c Paul E. McKenney 2020-03-02  297  
6b80543d90000c Paul E. McKenney 2020-03-02  298  	for (;;) {
6b80543d90000c Paul E. McKenney 2020-03-02  299  		bool firstreport;
6b80543d90000c Paul E. McKenney 2020-03-02  300  		bool needreport;
6b80543d90000c Paul E. McKenney 2020-03-02  301  		int rtst;
6b80543d90000c Paul E. McKenney 2020-03-02  302  		struct task_struct *t1;
6b80543d90000c Paul E. McKenney 2020-03-02  303  
6b80543d90000c Paul E. McKenney 2020-03-02  304  		if (list_empty(&rcu_tasks_holdouts))
6b80543d90000c Paul E. McKenney 2020-03-02  305  			break;
6b80543d90000c Paul E. McKenney 2020-03-02  306  
6b80543d90000c Paul E. McKenney 2020-03-02  307  		/* Slowly back off waiting for holdouts */
6b80543d90000c Paul E. McKenney 2020-03-02  308  		schedule_timeout_interruptible(HZ/fract);
6b80543d90000c Paul E. McKenney 2020-03-02  309  
6b80543d90000c Paul E. McKenney 2020-03-02  310  		if (fract > 1)
6b80543d90000c Paul E. McKenney 2020-03-02  311  			fract--;
6b80543d90000c Paul E. McKenney 2020-03-02  312  
6b80543d90000c Paul E. McKenney 2020-03-02  313  		rtst = READ_ONCE(rcu_task_stall_timeout);
61f7110d6b78f4 Paul E. McKenney 2020-03-02  314  		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
6b80543d90000c Paul E. McKenney 2020-03-02  315  		if (needreport)
6b80543d90000c Paul E. McKenney 2020-03-02  316  			lastreport = jiffies;
6b80543d90000c Paul E. McKenney 2020-03-02  317  		firstreport = true;
6b80543d90000c Paul E. McKenney 2020-03-02  318  		WARN_ON(signal_pending(current));
6b80543d90000c Paul E. McKenney 2020-03-02  319  		list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
6b80543d90000c Paul E. McKenney 2020-03-02  320  					 rcu_tasks_holdout_list) {
6b80543d90000c Paul E. McKenney 2020-03-02  321  			check_holdout_task(t, needreport, &firstreport);
6b80543d90000c Paul E. McKenney 2020-03-02  322  			cond_resched();
6b80543d90000c Paul E. McKenney 2020-03-02  323  		}
6b80543d90000c Paul E. McKenney 2020-03-02  324  	}
6b80543d90000c Paul E. McKenney 2020-03-02  325  
6b80543d90000c Paul E. McKenney 2020-03-02  326  	/*
61f7110d6b78f4 Paul E. McKenney 2020-03-02  327  	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
61f7110d6b78f4 Paul E. McKenney 2020-03-02  328  	 * memory barriers prior to them in the schedule() path, memory
61f7110d6b78f4 Paul E. McKenney 2020-03-02  329  	 * reordering on other CPUs could cause their RCU-tasks read-side
61f7110d6b78f4 Paul E. McKenney 2020-03-02  330  	 * critical sections to extend past the end of the grace period.
61f7110d6b78f4 Paul E. McKenney 2020-03-02  331  	 * However, because these ->nvcsw updates are carried out with
61f7110d6b78f4 Paul E. McKenney 2020-03-02  332  	 * interrupts disabled, we can use synchronize_rcu() to force the
61f7110d6b78f4 Paul E. McKenney 2020-03-02  333  	 * needed ordering on all such CPUs.
6b80543d90000c Paul E. McKenney 2020-03-02  334  	 *
61f7110d6b78f4 Paul E. McKenney 2020-03-02  335  	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
61f7110d6b78f4 Paul E. McKenney 2020-03-02  336  	 * accesses to be within the grace period, avoiding the need for
61f7110d6b78f4 Paul E. McKenney 2020-03-02  337  	 * memory barriers for ->rcu_tasks_holdout accesses.
6b80543d90000c Paul E. McKenney 2020-03-02  338  	 *
61f7110d6b78f4 Paul E. McKenney 2020-03-02  339  	 * In addition, this synchronize_rcu() waits for exiting tasks
61f7110d6b78f4 Paul E. McKenney 2020-03-02  340  	 * to complete their final preempt_disable() region of execution,
61f7110d6b78f4 Paul E. McKenney 2020-03-02  341  	 * cleaning up after the synchronize_srcu() above.
6b80543d90000c Paul E. McKenney 2020-03-02  342  	 */
6b80543d90000c Paul E. McKenney 2020-03-02  343  	synchronize_rcu();
61f7110d6b78f4 Paul E. McKenney 2020-03-02  344  }
6b80543d90000c Paul E. McKenney 2020-03-02  345  

:::::: The code at line 212 was first introduced by commit
:::::: 6b80543d90000c684123b05f075ac1433d99fa85 tasks-rcu: Move Tasks RCU to its own file

:::::: TO: Paul E. McKenney <paulmck@...nel.org>
:::::: CC: Paul E. McKenney <paulmck@...nel.org>

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

Download attachment ".config.gz" of type "application/gzip" (29001 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ