lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Sat, 25 Jun 2022 10:58:20 +0800
From:   kernel test robot <lkp@...el.com>
To:     Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc:     llvm@...ts.linux.dev, kbuild-all@...ts.01.org,
        linux-kernel@...r.kernel.org
Subject: [rt-devel:linux-5.19.y-rt-rebase 10/52] kernel/sched/core.c:3310:9:
 error: cannot take the address of an rvalue of type 'int'

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-5.19.y-rt-rebase
head:   35dc0a4128c81e71e419b8660076e23aab99c2ac
commit: 407451e93cd578f4eb292f3f4fbe86b865ca8051 [10/52] sched: Consider task_struct::saved_state in wait_task_inactive().
config: x86_64-randconfig-a016
compiler: clang version 15.0.0 (https://github.com/llvm/llvm-project 6fa9120080c35a5ff851c3fc3358692c4ef7ce0d)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git/commit/?id=407451e93cd578f4eb292f3f4fbe86b865ca8051
        git remote add rt-devel https://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git
        git fetch --no-tags rt-devel linux-5.19.y-rt-rebase
        git checkout 407451e93cd578f4eb292f3f4fbe86b865ca8051
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@...el.com>

All errors (new ones prefixed by >>):

>> kernel/sched/core.c:3310:9: error: cannot take the address of an rvalue of type 'int'
                                   if (READ_ONCE(p->__state != match_state)
                                       ^         ~~~~~~~~~~~~~~~~~~~~~~~~~
   include/asm-generic/rwonce.h:50:2: note: expanded from macro 'READ_ONCE'
           __READ_ONCE(x);                                                 \
           ^           ~
   include/asm-generic/rwonce.h:44:70: note: expanded from macro '__READ_ONCE'
   #define __READ_ONCE(x)  (*(const volatile __unqual_scalar_typeof(x) *)&(x))
                                                                         ^ ~
>> kernel/sched/core.c:3311:6: error: expected ')'
                                           mismatch = true;
                                           ^
   kernel/sched/core.c:3310:8: note: to match this '('
                                   if (READ_ONCE(p->__state != match_state)
                                      ^
   2 errors generated.


vim +/int +3310 kernel/sched/core.c

  3259	
  3260	/*
  3261	 * wait_task_inactive - wait for a thread to unschedule.
  3262	 *
  3263	 * If @match_state is nonzero, it's the @p->state value just checked and
  3264	 * not expected to change.  If it changes, i.e. @p might have woken up,
  3265	 * then return zero.  When we succeed in waiting for @p to be off its CPU,
  3266	 * we return a positive number (its total switch count).  If a second call
  3267	 * a short while later returns the same number, the caller can be sure that
  3268	 * @p has remained unscheduled the whole time.
  3269	 *
  3270	 * The caller must ensure that the task *will* unschedule sometime soon,
  3271	 * else this function might spin for a *long* time. This function can't
  3272	 * be called with interrupts off, or it may introduce deadlock with
  3273	 * smp_call_function() if an IPI is sent by the same process we are
  3274	 * waiting to become inactive.
  3275	 */
  3276	unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
  3277	{
  3278		int running, queued;
  3279		struct rq_flags rf;
  3280		unsigned long ncsw;
  3281		struct rq *rq;
  3282		bool saved_state_match;
  3283		bool update_ncsw;
  3284	
  3285		for (;;) {
  3286			/*
  3287			 * We do the initial early heuristics without holding
  3288			 * any task-queue locks at all. We'll only try to get
  3289			 * the runqueue lock when things look like they will
  3290			 * work out!
  3291			 */
  3292			rq = task_rq(p);
  3293	
  3294			/*
  3295			 * If the task is actively running on another CPU
  3296			 * still, just relax and busy-wait without holding
  3297			 * any locks.
  3298			 *
  3299			 * NOTE! Since we don't hold any locks, it's not
  3300			 * even sure that "rq" stays as the right runqueue!
  3301			 * But we don't care, since "task_running()" will
  3302			 * return false if the runqueue has changed and p
  3303			 * is actually now running somewhere else!
  3304			 */
  3305			while (task_running(rq, p)) {
  3306	
  3307				if (match_state) {
  3308					bool mismatch = false;
  3309	#ifndef CONFIG_PREEMPT_RT
> 3310					if (READ_ONCE(p->__state != match_state)
> 3311						mismatch = true;
  3312	#else
  3313					unsigned long flags;
  3314	
  3315					raw_spin_lock_irqsave(&p->pi_lock, flags);
  3316					if (READ_ONCE(p->__state) != match_state &&
  3317					    READ_ONCE(p->saved_state) != match_state)
  3318						mismatch = true;
  3319					raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  3320	#endif
  3321					if (mismatch)
  3322						return 0;
  3323				}
  3324				cpu_relax();
  3325			}
  3326	
  3327			/*
  3328			 * Ok, time to look more closely! We need the rq
  3329			 * lock now, to be *sure*. If we're wrong, we'll
  3330			 * just go back and repeat.
  3331			 */
  3332			rq = task_rq_lock(p, &rf);
  3333			trace_sched_wait_task(p);
  3334			running = task_running(rq, p);
  3335			queued = task_on_rq_queued(p);
  3336			ncsw = 0;
  3337			update_ncsw = false;
  3338			saved_state_match = false;
  3339	
  3340			if (!match_state) {
  3341				update_ncsw = true;
  3342			} else if (READ_ONCE(p->__state) == match_state) {
  3343				update_ncsw = true;
  3344	#ifdef CONFIG_PREEMPT_RT
  3345			} else if (READ_ONCE(p->saved_state) == match_state) {
  3346				update_ncsw = true;
  3347				saved_state_match = true;
  3348	#endif
  3349			}
  3350	
  3351			if (update_ncsw)
  3352				ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
  3353			task_rq_unlock(rq, p, &rf);
  3354	
  3355			/*
  3356			 * If it changed from the expected state, bail out now.
  3357			 */
  3358			if (unlikely(!ncsw))
  3359				break;
  3360	
  3361			/*
  3362			 * Was it really running after all now that we
  3363			 * checked with the proper locks actually held?
  3364			 *
  3365			 * Oops. Go back and try again..
  3366			 */
  3367			if (unlikely(running)) {
  3368				cpu_relax();
  3369				continue;
  3370			}
  3371	
  3372			/*
  3373			 * It's not enough that it's not actively running,
  3374			 * it must be off the runqueue _entirely_, and not
  3375			 * preempted!
  3376			 *
  3377			 * So if it was still runnable (but just not actively
  3378			 * running right now), it's preempted, and we should
  3379			 * yield - it could be a while.
  3380			 */
  3381			if (unlikely(queued) || saved_state_match) {
  3382				ktime_t to = NSEC_PER_SEC / HZ;
  3383	
  3384				set_current_state(TASK_UNINTERRUPTIBLE);
  3385				schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
  3386				continue;
  3387			}
  3388	
  3389			/*
  3390			 * Ahh, all good. It wasn't running, and it wasn't
  3391			 * runnable, which means that it will never become
  3392			 * running in the future either. We're all done!
  3393			 */
  3394			break;
  3395		}
  3396	
  3397		return ncsw;
  3398	}
  3399	

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp

View attachment "config" of type "text/plain" (152482 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ