[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8c0f011a-db83-001e-3cce-326cb8228a75@windriver.com>
Date: Sat, 16 Oct 2021 02:41:17 +0800
From: "Xu, Yanfei" <yanfei.xu@...driver.com>
To: kernel test robot <lkp@...el.com>
Cc: llvm@...ts.linux.dev, kbuild-all@...ts.01.org,
linux-kernel@...r.kernel.org, Peter Zijlstra <peterz@...radead.org>
Subject: Re: [peterz-queue:locking/core 41/43] kernel/locking/rwsem.c:1023:19:
error: variable has incomplete type 'enum owner_state'
On 10/15/21 9:51 PM, kernel test robot wrote:
> [Please note: This e-mail is from an EXTERNAL e-mail address]
>
> tree: https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git locking/core
> head: 44e63f63c47dfb202eb25cdd97d04ec7e47f51d8
> commit: b08614038dba3f6982e1e7701f23784bb0aedba6 [41/43] locking/rwsem: disable preemption for spinning region
> config: hexagon-randconfig-r041-20211014 (attached as .config)
> compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project acb3b187c4c88650a6a717a1bcb234d27d0d7f54)
> reproduce (this is a W=1 build):
> wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
> chmod +x ~/bin/make.cross
> # https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git/commit/?id=b08614038dba3f6982e1e7701f23784bb0aedba6
> git remote add peterz-queue https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git
> git fetch --no-tags peterz-queue locking/core
> git checkout b08614038dba3f6982e1e7701f23784bb0aedba6
> # save the attached .config to linux build tree
> mkdir build_dir
> COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=hexagon SHELL=/bin/bash kernel/locking/
>
> If you fix the issue, kindly add following tag as appropriate
> Reported-by: kernel test robot <lkp@...el.com>
>
> All errors (new ones prefixed by >>):
>
>>> kernel/locking/rwsem.c:1023:19: error: variable has incomplete type 'enum owner_state'
> enum owner_state owner_state;
> ^
> kernel/locking/rwsem.c:1023:7: note: forward declaration of 'enum owner_state'
> enum owner_state owner_state;
> ^
> 1 error generated.
>
Hi Peter,
I send a patch named ("locking/rwsem: Introduce
__rwsem_spin_on_owner()") for fixing this.
Thanks,
Yanfei
>
> vim +1023 kernel/locking/rwsem.c
>
> 1012
> 1013 /*
> 1014 * Wait until we successfully acquire the write lock
> 1015 */
> 1016 static struct rw_semaphore *
> 1017 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
> 1018 {
> 1019 long count;
> 1020 enum writer_wait_state wstate;
> 1021 struct rwsem_waiter waiter;
> 1022 struct rw_semaphore *ret = sem;
>> 1023 enum owner_state owner_state;
> 1024 DEFINE_WAKE_Q(wake_q);
> 1025
> 1026 /* do optimistic spinning and steal lock if possible */
> 1027 if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
> 1028 /* rwsem_optimistic_spin() implies ACQUIRE on success */
> 1029 return sem;
> 1030 }
> 1031
> 1032 /*
> 1033 * Optimistic spinning failed, proceed to the slowpath
> 1034 * and block until we can acquire the sem.
> 1035 */
> 1036 waiter.task = current;
> 1037 waiter.type = RWSEM_WAITING_FOR_WRITE;
> 1038 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
> 1039
> 1040 raw_spin_lock_irq(&sem->wait_lock);
> 1041
> 1042 /* account for this before adding a new element to the list */
> 1043 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
> 1044
> 1045 list_add_tail(&waiter.list, &sem->wait_list);
> 1046
> 1047 /* we're now waiting on the lock */
> 1048 if (wstate == WRITER_NOT_FIRST) {
> 1049 count = atomic_long_read(&sem->count);
> 1050
> 1051 /*
> 1052 * If there were already threads queued before us and:
> 1053 * 1) there are no active locks, wake the front
> 1054 * queued process(es) as the handoff bit might be set.
> 1055 * 2) there are no active writers and some readers, the lock
> 1056 * must be read owned; so we try to wake any read lock
> 1057 * waiters that were queued ahead of us.
> 1058 */
> 1059 if (count & RWSEM_WRITER_MASK)
> 1060 goto wait;
> 1061
> 1062 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
> 1063 ? RWSEM_WAKE_READERS
> 1064 : RWSEM_WAKE_ANY, &wake_q);
> 1065
> 1066 if (!wake_q_empty(&wake_q)) {
> 1067 /*
> 1068 * We want to minimize wait_lock hold time especially
> 1069 * when a large number of readers are to be woken up.
> 1070 */
> 1071 raw_spin_unlock_irq(&sem->wait_lock);
> 1072 wake_up_q(&wake_q);
> 1073 wake_q_init(&wake_q); /* Used again, reinit */
> 1074 raw_spin_lock_irq(&sem->wait_lock);
> 1075 }
> 1076 } else {
> 1077 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
> 1078 }
> 1079
> 1080 wait:
> 1081 /* wait until we successfully acquire the lock */
> 1082 set_current_state(state);
> 1083 for (;;) {
> 1084 if (rwsem_try_write_lock(sem, wstate)) {
> 1085 /* rwsem_try_write_lock() implies ACQUIRE on success */
> 1086 break;
> 1087 }
> 1088
> 1089 raw_spin_unlock_irq(&sem->wait_lock);
> 1090
> 1091 /*
> 1092 * After setting the handoff bit and failing to acquire
> 1093 * the lock, attempt to spin on owner to accelerate lock
> 1094 * transfer. If the previous owner is a on-cpu writer and it
> 1095 * has just released the lock, OWNER_NULL will be returned.
> 1096 * In this case, we attempt to acquire the lock again
> 1097 * without sleeping.
> 1098 */
> 1099 if (wstate == WRITER_HANDOFF) {
> 1100 preempt_disable();
> 1101 owner_state = rwsem_spin_on_owner(sem);
> 1102 preempt_enable();
> 1103 if (owner_state == OWNER_NULL)
> 1104 goto trylock_again;
> 1105 }
> 1106
> 1107 /* Block until there are no active lockers. */
> 1108 for (;;) {
> 1109 if (signal_pending_state(state, current))
> 1110 goto out_nolock;
> 1111
> 1112 schedule();
> 1113 lockevent_inc(rwsem_sleep_writer);
> 1114 set_current_state(state);
> 1115 /*
> 1116 * If HANDOFF bit is set, unconditionally do
> 1117 * a trylock.
> 1118 */
> 1119 if (wstate == WRITER_HANDOFF)
> 1120 break;
> 1121
> 1122 if ((wstate == WRITER_NOT_FIRST) &&
> 1123 (rwsem_first_waiter(sem) == &waiter))
> 1124 wstate = WRITER_FIRST;
> 1125
> 1126 count = atomic_long_read(&sem->count);
> 1127 if (!(count & RWSEM_LOCK_MASK))
> 1128 break;
> 1129
> 1130 /*
> 1131 * The setting of the handoff bit is deferred
> 1132 * until rwsem_try_write_lock() is called.
> 1133 */
> 1134 if ((wstate == WRITER_FIRST) && (rt_task(current) ||
> 1135 time_after(jiffies, waiter.timeout))) {
> 1136 wstate = WRITER_HANDOFF;
> 1137 lockevent_inc(rwsem_wlock_handoff);
> 1138 break;
> 1139 }
> 1140 }
> 1141 trylock_again:
> 1142 raw_spin_lock_irq(&sem->wait_lock);
> 1143 }
> 1144 __set_current_state(TASK_RUNNING);
> 1145 list_del(&waiter.list);
> 1146 raw_spin_unlock_irq(&sem->wait_lock);
> 1147 lockevent_inc(rwsem_wlock);
> 1148
> 1149 return ret;
> 1150
> 1151 out_nolock:
> 1152 __set_current_state(TASK_RUNNING);
> 1153 raw_spin_lock_irq(&sem->wait_lock);
> 1154 list_del(&waiter.list);
> 1155
> 1156 if (unlikely(wstate == WRITER_HANDOFF))
> 1157 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
> 1158
> 1159 if (list_empty(&sem->wait_list))
> 1160 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
> 1161 else
> 1162 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
> 1163 raw_spin_unlock_irq(&sem->wait_lock);
> 1164 wake_up_q(&wake_q);
> 1165 lockevent_inc(rwsem_wlock_fail);
> 1166
> 1167 return ERR_PTR(-EINTR);
> 1168 }
> 1169
>
> ---
> 0-DAY CI Kernel Test Service, Intel Corporation
> https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
>
Powered by blists - more mailing lists