diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c index 780acd3dff22..80252bbaf0d8 100644 --- a/arch/x86/events/amd/brs.c +++ b/arch/x86/events/amd/brs.c @@ -285,7 +285,7 @@ void amd_brs_drain(void) struct perf_branch_entry *br = cpuc->lbr_entries; union amd_debug_extn_cfg cfg; u32 i, nr = 0, num, tos, start; - u32 shift = 64 - boot_cpu_data.x86_virt_bits; + u32 shift = 64 - x86_virt_bits(); /* * BRS event forced on PMC0, diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c index eb31f850841a..bf89d47607e7 100644 --- a/arch/x86/events/amd/lbr.c +++ b/arch/x86/events/amd/lbr.c @@ -89,7 +89,7 @@ static __always_inline u64 amd_pmu_lbr_get_to(unsigned int idx) static __always_inline u64 sign_ext_branch_ip(u64 ip) { - u32 shift = 64 - boot_cpu_data.x86_virt_bits; + u32 shift = 64 - x86_virt_bits(); return (u64)(((s64)ip << shift) >> shift); } diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 8e2a12235e62..ff04bfee1ec2 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1453,8 +1453,8 @@ static void pt_event_addr_filters_sync(struct perf_event *event) * canonical addresses does not affect the result of the * address filter. */ - msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits); - msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits); + msr_a = clamp_to_ge_canonical_addr(a, x86_virt_bits()); + msr_b = clamp_to_le_canonical_addr(b, x86_virt_bits()); if (msr_b < msr_a) msr_a = msr_b = 0; } diff --git a/arch/x86/include/asm/kmsan.h b/arch/x86/include/asm/kmsan.h index 8fa6ac0e2d76..608a3311dde6 100644 --- a/arch/x86/include/asm/kmsan.h +++ b/arch/x86/include/asm/kmsan.h @@ -52,7 +52,7 @@ static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin) static inline bool kmsan_phys_addr_valid(unsigned long addr) { if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) - return !(addr >> boot_cpu_data.x86_phys_bits); + return !(addr >> x86_phys_bits()); else return true; } diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index de3118305838..ac80c32bccf1 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -89,7 +89,7 @@ #define MCI_MISC_ADDR_GENERIC 7 /* generic */ /* MCi_ADDR register defines */ -#define MCI_ADDR_PHYSADDR GENMASK_ULL(boot_cpu_data.x86_phys_bits - 1, 0) +#define MCI_ADDR_PHYSADDR GENMASK_ULL(x86_phys_bits() - 1, 0) /* CTL2 register defines */ #define MCI_CTL2_CMCI_EN BIT_ULL(30) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 26620d7642a9..54673015a602 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -767,4 +767,21 @@ static inline void weak_wrmsr_fence(void) alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE)); } +static inline u8 x86_phys_bits(void) +{ + WARN_ON_ONCE(!boot_cpu_data.x86_phys_bits); + return boot_cpu_data.x86_phys_bits; +} + +static inline u8 x86_virt_bits(void) +{ + WARN_ON_ONCE(!boot_cpu_data.x86_virt_bits); + return boot_cpu_data.x86_virt_bits; +} + +static inline u8 x86_clflush_size(void) +{ + WARN_ON_ONCE(!boot_cpu_data.x86_clflush_size); + return boot_cpu_data.x86_clflush_size; +} #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 345f7d905db6..1fb061ea15a2 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -62,7 +62,7 @@ static void init_c3(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_3DNOW); #endif if (c->x86 == 0x6 && c->x86_model >= 0xf) { - c->x86_cache_alignment = c->x86_clflush_size * 2; + c->x86_cache_alignment = x86_clflush_size() * 2; set_cpu_cap(c, X86_FEATURE_REP_GOOD); } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0b97bcde70c6..fd5d746d7d16 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -954,8 +954,9 @@ void cpu_detect(struct cpuinfo_x86 *c) c->x86_stepping = x86_stepping(tfms); if (cap0 & (1<<19)) { - c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; - c->x86_cache_alignment = c->x86_clflush_size; + if (c == &boot_cpu_data) + c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; + c->x86_cache_alignment = x86_clflush_size(); } } } @@ -1123,7 +1124,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c) } } c->x86_cache_bits = c->x86_phys_bits; - c->x86_cache_alignment = c->x86_clflush_size; + c->x86_cache_alignment = x86_clflush_size(); } static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) @@ -1822,16 +1823,19 @@ static void identify_cpu(struct cpuinfo_x86 *c) c->topo.llc_id = BAD_APICID; c->topo.l2c_id = BAD_APICID; #ifdef CONFIG_X86_64 - c->x86_clflush_size = 64; - c->x86_phys_bits = 36; - c->x86_virt_bits = 48; + if (WARN_ON_ONCE(!c->x86_clflush_size)) + c->x86_clflush_size = 64; + if (WARN_ON_ONCE(!c->x86_phys_bits)) + c->x86_phys_bits = 36; + if (WARN_ON_ONCE(!c->x86_virt_bits)) + c->x86_virt_bits = 48; #else c->cpuid_level = -1; /* CPUID not detected */ c->x86_clflush_size = 32; c->x86_phys_bits = 32; c->x86_virt_bits = 32; #endif - c->x86_cache_alignment = c->x86_clflush_size; + c->x86_cache_alignment = x86_clflush_size(); memset(&c->x86_capability, 0, sizeof(c->x86_capability)); #ifdef CONFIG_X86_VMX_FEATURE_NAMES memset(&c->vmx_capability, 0, sizeof(c->vmx_capability)); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index a927a8fc9624..e97626a8e993 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -653,7 +653,7 @@ static void init_intel(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_64 if (c->x86 == 15) - c->x86_cache_alignment = c->x86_clflush_size * 2; + c->x86_cache_alignment = x86_clflush_size() * 2; if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_REP_GOOD); #else diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 18cf79d6e2c5..3d91219ae13f 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -170,7 +170,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, return; } - mask = (1ULL << boot_cpu_data.x86_phys_bits) - 1; + mask = (1ULL << x86_phys_bits()) - 1; mask &= ~((((u64)sizek) << 10) - 1); base = ((u64)basek) << 10; diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index d3524778a545..bf29b4170df8 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -660,7 +660,7 @@ static void __init print_mtrr_state(void) } pr_info("MTRR variable ranges %sabled:\n", mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis"); - high_width = (boot_cpu_data.x86_phys_bits - (32 - PAGE_SHIFT) + 3) / 4; + high_width = (x86_phys_bits() - (32 - PAGE_SHIFT) + 3) / 4; for (i = 0; i < num_var_ranges; ++i) { if (mtrr_state.var_ranges[i].mask_lo & MTRR_PHYSMASK_V) diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 767bf1c71aad..50b1a6c318e6 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -253,7 +253,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, } if ((base | (base + size - 1)) >> - (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) { + (x86_phys_bits() - PAGE_SHIFT)) { pr_warn("base or size exceeds the MTRR width\n"); return -EINVAL; } @@ -556,7 +556,7 @@ void __init mtrr_bp_init(void) const char *why = "(not available)"; unsigned long config, dummy; - phys_hi_rsvd = GENMASK(31, boot_cpu_data.x86_phys_bits - 32); + phys_hi_rsvd = GENMASK(31, x86_phys_bits() - 32); if (!generic_mtrrs && mtrr_state.enabled) { /* diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index e65fae63660e..259bb5e74532 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -130,10 +130,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (c->x86_tlbsize > 0) seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); #endif - seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); + seq_printf(m, "clflush size\t: %u\n", x86_clflush_size()); seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", - c->x86_phys_bits, c->x86_virt_bits); + x86_phys_bits(), x86_virt_bits()); seq_puts(m, "power management:"); for (i = 0; i < 32; i++) { diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 84201071dfac..6206e0fcf3d3 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -813,7 +813,14 @@ void __init setup_arch(char **cmdline_p) */ early_reserve_memory(); - iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; + /* + * This was too big before. It ended up getting MAX_PHYSMEM_BITS + * even if .x86_phys_bits was eventually lowered below that. + * But that was evidently harmless, so leave it too big, but + * set it explicitly to MAX_PHYSMEM_BITS instead of taking a + * trip through .x86_phys_bits. + */ + iomem_resource.end = (1ULL << MAX_PHYSMEM_BITS) - 1; e820__memory_setup(); parse_setup_data(); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index adba49afb5fe..5426284fcd53 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1236,7 +1236,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) * the HPAs do not affect GPAs. */ if (!tdp_enabled) - g_phys_as = boot_cpu_data.x86_phys_bits; + g_phys_as = x86_phys_bits(); else if (!g_phys_as) g_phys_as = phys_as; diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 60f21bb4c27b..8437f1ee7ae9 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -84,10 +84,10 @@ static inline gfn_t kvm_mmu_max_gfn(void) static inline u8 kvm_get_shadow_phys_bits(void) { /* - * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected - * in CPU detection code, but the processor treats those reduced bits as - * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at - * the physical address bits reported by CPUID. + * x86_phys_bits() is reduced when MKTME or SME are detected in CPU + * detection code, but the processor treats those reduced bits as 'keyID' + * thus they are not reserved bits. Therefore KVM needs to look at the + * physical address bits reported by CPUID. */ if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) return cpuid_eax(0x80000008) & 0xff; @@ -97,7 +97,7 @@ static inline u8 kvm_get_shadow_phys_bits(void) * custom CPUID. Proceed with whatever the kernel found since these features * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). */ - return boot_cpu_data.x86_phys_bits; + return x86_phys_bits(); } void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 4a599130e9c9..d04e041a54a7 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -468,7 +468,7 @@ void kvm_mmu_reset_all_pte_masks(void) * the most significant bits of legal physical address space. */ shadow_nonpresent_or_rsvd_mask = 0; - low_phys_bits = boot_cpu_data.x86_phys_bits; + low_phys_bits = x86_phys_bits(); if (boot_cpu_has_bug(X86_BUG_L1TF) && !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index e90b429c84f1..7605560b57ac 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5054,7 +5054,7 @@ static __init void svm_adjust_mmio_mask(void) return; enc_bit = cpuid_ebx(0x8000001f) & 0x3f; - mask_bit = boot_cpu_data.x86_phys_bits; + mask_bit = x86_phys_bits(); /* Increment the mask bit if it is the same as the encryption bit */ if (enc_bit == mask_bit) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e262bc2ba4e5..f16023979ad1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8444,14 +8444,14 @@ static void __init vmx_setup_me_spte_mask(void) * kvm_get_shadow_phys_bits() returns shadow_phys_bits. Use * the former to avoid exposing shadow_phys_bits. * - * On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to + * On pre-MKTME system, x86_phys_bits() equals to * shadow_phys_bits. On MKTME and/or TDX capable systems, - * boot_cpu_data.x86_phys_bits holds the actual physical address - * w/o the KeyID bits, and shadow_phys_bits equals to MAXPHYADDR + * x86_phys_bits() holds the actual physical address w/o the + * KeyID bits, and shadow_phys_bits equals to MAXPHYADDR * reported by CPUID. Those bits between are KeyID bits. */ - if (boot_cpu_data.x86_phys_bits != kvm_get_shadow_phys_bits()) - me_mask = rsvd_bits(boot_cpu_data.x86_phys_bits, + if (x86_phys_bits() != kvm_get_shadow_phys_bits()) + me_mask = rsvd_bits(x86_phys_bits(), kvm_get_shadow_phys_bits() - 1); /* * Unlike SME, host kernel doesn't support setting up any diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index e3b0985bb74a..405579ff89c5 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -721,7 +721,7 @@ static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) if (!enable_ept) return true; - return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits; + return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < x86_phys_bits(); } static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu) diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index e9251b89a9e9..e8e6fdc94771 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -27,13 +27,12 @@ */ static void clean_cache_range(void *addr, size_t size) { - u16 x86_clflush_size = boot_cpu_data.x86_clflush_size; - unsigned long clflush_mask = x86_clflush_size - 1; + unsigned long clflush_mask = x86_clflush_size() - 1; void *vend = addr + size; void *p; for (p = (void *)((unsigned long)addr & ~clflush_mask); - p < vend; p += x86_clflush_size) + p < vend; p += x86_clflush_size()) clwb(p); } @@ -65,7 +64,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) clean_cache_range(dst, size); } else { if (!IS_ALIGNED(dest, 8)) { - dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); + dest = ALIGN(dest, x86_clflush_size()); clean_cache_range(dst, 1); } diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c index 6993f026adec..63d9b110060e 100644 --- a/arch/x86/mm/maccess.c +++ b/arch/x86/mm/maccess.c @@ -19,11 +19,14 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) * Allow everything during early boot before 'x86_virt_bits' * is initialized. Needed for instruction decoding in early * exception handlers. + * + * CHECKME: Is this now done early enough that we can remove + * this??? */ - if (!boot_cpu_data.x86_virt_bits) + if (!x86_virt_bits()) return true; - return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits); + return __is_canonical_address(vaddr, x86_virt_bits()); } #else bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index e9b448d1b1b7..8c3c63381a4f 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -314,7 +314,7 @@ static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) static void clflush_cache_range_opt(void *vaddr, unsigned int size) { - const unsigned long clflush_size = boot_cpu_data.x86_clflush_size; + const unsigned long clflush_size = x86_clflush_size(); void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); void *vend = vaddr + size; diff --git a/arch/x86/mm/physaddr.h b/arch/x86/mm/physaddr.h index 9f6419cafc32..c2d3e1243ab6 100644 --- a/arch/x86/mm/physaddr.h +++ b/arch/x86/mm/physaddr.h @@ -4,7 +4,7 @@ static inline int phys_addr_valid(resource_size_t addr) { #ifdef CONFIG_PHYS_ADDR_T_64BIT - return !(addr >> boot_cpu_data.x86_phys_bits); + return !(addr >> x86_phys_bits()); #else return 1; #endif diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index ddb798603201..66b210b07a92 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -480,22 +480,9 @@ void pcibios_scan_root(int busnum) void __init pcibios_set_cache_line_size(void) { - struct cpuinfo_x86 *c = &boot_cpu_data; - - /* - * Set PCI cacheline size to that of the CPU if the CPU has reported it. - * (For older CPUs that don't support cpuid, we se it to 32 bytes - * It's also good for 386/486s (which actually have 16) - * as quite a few PCI devices do not support smaller values. - */ - if (c->x86_clflush_size > 0) { - pci_dfl_cache_line_size = c->x86_clflush_size >> 2; - printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n", - pci_dfl_cache_line_size << 2); - } else { - pci_dfl_cache_line_size = 32 >> 2; - printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n"); - } + pci_dfl_cache_line_size = boot_cpu_data.x86_clflush_size >> 2; + printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n", + pci_dfl_cache_line_size << 2); } int __init pcibios_init(void) diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c index 271092f2700a..c6fc0e19eddd 100644 --- a/drivers/acpi/acpi_fpdt.c +++ b/drivers/acpi/acpi_fpdt.c @@ -151,7 +151,7 @@ static bool fpdt_address_valid(u64 address) * On some systems the table contains invalid addresses * with unsuppored high address bits set, check for this. */ - return !(address >> boot_cpu_data.x86_phys_bits); + return !(address >> x86_phys_bits()); } #else static bool fpdt_address_valid(u64 address) diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 7051c9c909c2..1d193e87d901 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -52,7 +52,7 @@ drm_clflush_page(struct page *page) { uint8_t *page_virtual; unsigned int i; - const int size = boot_cpu_data.x86_clflush_size; + const int size = x86_clflush_size(); if (unlikely(page == NULL)) return; @@ -160,7 +160,7 @@ drm_clflush_virt_range(void *addr, unsigned long length) { #if defined(CONFIG_X86) if (static_cpu_has(X86_FEATURE_CLFLUSH)) { - const int size = boot_cpu_data.x86_clflush_size; + const int size = x86_clflush_size(); void *end = addr + length; addr = (void *)(((unsigned long)addr) & -size); diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 2905df83e180..5c8385a69f37 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1203,8 +1203,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, */ remain = length; if (dst_needs_clflush & CLFLUSH_BEFORE) - remain = round_up(remain, - boot_cpu_data.x86_clflush_size); + remain = round_up(remain, x86_clflush_size()); ptr = dst; x = offset_in_page(offset); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 92758b6b41f0..4a7f753d81f3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -696,7 +696,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, */ partial_cacheline_write = 0; if (needs_clflush & CLFLUSH_BEFORE) - partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; + partial_cacheline_write = x86_clflush_size() - 1; user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 074cb785eafc..a5b804cb9f7a 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1229,7 +1229,7 @@ static void memcpy_flushcache_optimized(void *dest, void *source, size_t size) */ #ifdef CONFIG_X86 if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) && - likely(boot_cpu_data.x86_clflush_size == 64) && + likely(x86_clflush_size() == 64) && likely(size >= 768)) { do { memcpy((void *)dest, (void *)source, 64);