[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202601300057.iTAe6Gxb-lkp@intel.com>
Date: Fri, 30 Jan 2026 00:23:24 +0800
From: kernel test robot <lkp@...el.com>
To: Nathan Chancellor <nathan@...nel.org>
Cc: oe-kbuild-all@...ts.linux.dev, linux-kernel@...r.kernel.org,
Paul Walmsley <pjw@...nel.org>
Subject: arch/riscv/kvm/vcpu_sbi_sta.c:59:13: sparse: sparse: cast to
restricted __le32
tree: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
head: 8dfce8991b95d8625d0a1d2896e42f93b9d7f68d
commit: bdce162f2e57a969803e5e9375999a3e0546905f riscv: Use 64-bit variable for output in __get_user_asm
date: 13 days ago
config: riscv-randconfig-r111-20260129 (https://download.01.org/0day-ci/archive/20260130/202601300057.iTAe6Gxb-lkp@intel.com/config)
compiler: clang version 18.1.8 (https://github.com/llvm/llvm-project 3b5b5c1ec4a3095ab096dd780e84d7ab81f3d7ff)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260130/202601300057.iTAe6Gxb-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@...el.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202601300057.iTAe6Gxb-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> arch/riscv/kvm/vcpu_sbi_sta.c:59:13: sparse: sparse: cast to restricted __le32
>> arch/riscv/kvm/vcpu_sbi_sta.c:59:13: sparse: sparse: cast to restricted __le32
>> arch/riscv/kvm/vcpu_sbi_sta.c:59:13: sparse: sparse: cast to restricted __le32
>> arch/riscv/kvm/vcpu_sbi_sta.c:59:13: sparse: sparse: cast to restricted __le32
>> arch/riscv/kvm/vcpu_sbi_sta.c:68:14: sparse: sparse: cast to restricted __le64
>> arch/riscv/kvm/vcpu_sbi_sta.c:68:14: sparse: sparse: cast to restricted __le64
>> arch/riscv/kvm/vcpu_sbi_sta.c:68:14: sparse: sparse: cast to restricted __le64
>> arch/riscv/kvm/vcpu_sbi_sta.c:68:14: sparse: sparse: cast to restricted __le64
vim +59 arch/riscv/kvm/vcpu_sbi_sta.c
38b3390ee48801 Andrew Jones 2023-12-20 24
2a1f6bf079700f Andrew Jones 2023-12-20 25 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
2a1f6bf079700f Andrew Jones 2023-12-20 26 {
38b3390ee48801 Andrew Jones 2023-12-20 27 gpa_t shmem = vcpu->arch.sta.shmem;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 28 u64 last_steal = vcpu->arch.sta.last_steal;
f072b272aa27d5 Andrew Jones 2024-01-31 29 __le32 __user *sequence_ptr;
f072b272aa27d5 Andrew Jones 2024-01-31 30 __le64 __user *steal_ptr;
f072b272aa27d5 Andrew Jones 2024-01-31 31 __le32 sequence_le;
f072b272aa27d5 Andrew Jones 2024-01-31 32 __le64 steal_le;
f072b272aa27d5 Andrew Jones 2024-01-31 33 u32 sequence;
f072b272aa27d5 Andrew Jones 2024-01-31 34 u64 steal;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 35 unsigned long hva;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 36 gfn_t gfn;
38b3390ee48801 Andrew Jones 2023-12-20 37
38b3390ee48801 Andrew Jones 2023-12-20 38 if (shmem == INVALID_GPA)
38b3390ee48801 Andrew Jones 2023-12-20 39 return;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 40
e9f12b5fff8ad0 Andrew Jones 2023-12-20 41 /*
e9f12b5fff8ad0 Andrew Jones 2023-12-20 42 * shmem is 64-byte aligned (see the enforcement in
e9f12b5fff8ad0 Andrew Jones 2023-12-20 43 * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
e9f12b5fff8ad0 Andrew Jones 2023-12-20 44 * is 64 bytes, so we know all its offsets are in the same page.
e9f12b5fff8ad0 Andrew Jones 2023-12-20 45 */
e9f12b5fff8ad0 Andrew Jones 2023-12-20 46 gfn = shmem >> PAGE_SHIFT;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 47 hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
e9f12b5fff8ad0 Andrew Jones 2023-12-20 48
e9f12b5fff8ad0 Andrew Jones 2023-12-20 49 if (WARN_ON(kvm_is_error_hva(hva))) {
e9f12b5fff8ad0 Andrew Jones 2023-12-20 50 vcpu->arch.sta.shmem = INVALID_GPA;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 51 return;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 52 }
e9f12b5fff8ad0 Andrew Jones 2023-12-20 53
f072b272aa27d5 Andrew Jones 2024-01-31 54 sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
e9f12b5fff8ad0 Andrew Jones 2023-12-20 55 offsetof(struct sbi_sta_struct, sequence));
f072b272aa27d5 Andrew Jones 2024-01-31 56 steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
e9f12b5fff8ad0 Andrew Jones 2023-12-20 57 offsetof(struct sbi_sta_struct, steal));
e9f12b5fff8ad0 Andrew Jones 2023-12-20 58
f072b272aa27d5 Andrew Jones 2024-01-31 @59 if (WARN_ON(get_user(sequence_le, sequence_ptr)))
e9f12b5fff8ad0 Andrew Jones 2023-12-20 60 return;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 61
f072b272aa27d5 Andrew Jones 2024-01-31 62 sequence = le32_to_cpu(sequence_le);
e9f12b5fff8ad0 Andrew Jones 2023-12-20 63 sequence += 1;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 64
e9f12b5fff8ad0 Andrew Jones 2023-12-20 65 if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
e9f12b5fff8ad0 Andrew Jones 2023-12-20 66 return;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 67
f072b272aa27d5 Andrew Jones 2024-01-31 @68 if (!WARN_ON(get_user(steal_le, steal_ptr))) {
f072b272aa27d5 Andrew Jones 2024-01-31 69 steal = le64_to_cpu(steal_le);
e9f12b5fff8ad0 Andrew Jones 2023-12-20 70 vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
e9f12b5fff8ad0 Andrew Jones 2023-12-20 71 steal += vcpu->arch.sta.last_steal - last_steal;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 72 WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
e9f12b5fff8ad0 Andrew Jones 2023-12-20 73 }
e9f12b5fff8ad0 Andrew Jones 2023-12-20 74
e9f12b5fff8ad0 Andrew Jones 2023-12-20 75 sequence += 1;
e9f12b5fff8ad0 Andrew Jones 2023-12-20 76 WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
e9f12b5fff8ad0 Andrew Jones 2023-12-20 77
e9f12b5fff8ad0 Andrew Jones 2023-12-20 78 kvm_vcpu_mark_page_dirty(vcpu, gfn);
2a1f6bf079700f Andrew Jones 2023-12-20 79 }
2a1f6bf079700f Andrew Jones 2023-12-20 80
:::::: The code at line 59 was first introduced by commit
:::::: f072b272aa27d57cf7fe6fdedb30fb50f391974e RISC-V: KVM: Use correct restricted types
:::::: TO: Andrew Jones <ajones@...tanamicro.com>
:::::: CC: Anup Patel <anup@...infault.org>
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Powered by blists - more mailing lists