[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <2024091245-showdown-chance-de30@gregkh>
Date: Thu, 12 Sep 2024 13:48:45 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org,
akpm@...ux-foundation.org,
torvalds@...ux-foundation.org,
stable@...r.kernel.org
Cc: lwn@....net,
jslaby@...e.cz,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Subject: Re: Linux 6.6.51
diff --git a/MAINTAINERS b/MAINTAINERS
index f09415b2b3c5..ae4c0cec5073 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13702,7 +13702,7 @@ M: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
M: "Paul E. McKenney" <paulmck@...nel.org>
L: linux-kernel@...r.kernel.org
S: Supported
-F: arch/powerpc/include/asm/membarrier.h
+F: arch/*/include/asm/membarrier.h
F: include/uapi/linux/membarrier.h
F: kernel/sched/membarrier.c
diff --git a/Makefile b/Makefile
index f7efbb59c986..6dea0c216368 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
-SUBLEVEL = 50
+SUBLEVEL = 51
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 16b02f44c7d3..d657b84b6bf7 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -151,6 +151,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+#define pgdp_get(pgpd) READ_ONCE(*pgdp)
+
#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 6792a1f83f2a..a407f9cd549e 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -119,6 +119,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
return acpi_cpu_get_madt_gicc(cpu)->uid;
}
+static inline int get_cpu_for_acpi_id(u32 uid)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (acpi_cpu_get_madt_gicc(cpu) &&
+ uid == get_acpi_id_for_cpu(cpu))
+ return cpu;
+
+ return -EINVAL;
+}
+
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
int apei_claim_sea(struct pt_regs *regs);
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
index ccbff21ce1fa..2465f291c7e1 100644
--- a/arch/arm64/kernel/acpi_numa.c
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -34,17 +34,6 @@ int __init acpi_numa_get_nid(unsigned int cpu)
return acpi_early_node_map[cpu];
}
-static inline int get_cpu_for_acpi_id(u32 uid)
-{
- int cpu;
-
- for (cpu = 0; cpu < nr_cpu_ids; cpu++)
- if (uid == get_acpi_id_for_cpu(cpu))
- return cpu;
-
- return -EINVAL;
-}
-
static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
const unsigned long end)
{
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index 1acfa704c8d0..0eddd4a66b87 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -13,6 +13,7 @@
#include <asm/bootinfo.h>
#include <asm/early_ioremap.h>
#include <asm/inst.h>
+#include <asm/io.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -170,7 +171,7 @@ unsigned long __init relocate_kernel(void)
unsigned long kernel_length;
unsigned long random_offset = 0;
void *location_new = _text; /* Default to original kernel start */
- char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
+ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
@@ -182,6 +183,7 @@ unsigned long __init relocate_kernel(void)
random_offset = (unsigned long)location_new - (unsigned long)(_text);
#endif
reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
+ early_memunmap(cmdline, COMMAND_LINE_SIZE);
if (random_offset) {
kernel_length = (long)(_end) - (long)(_text);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 368e8475870f..5f6e9e2ebbdb 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -303,13 +303,6 @@ int r4k_clockevent_init(void)
if (!c0_compare_int_usable())
return -ENXIO;
- /*
- * With vectored interrupts things are getting platform specific.
- * get_c0_compare_int is a hook to allow a platform to return the
- * interrupt number of its liking.
- */
- irq = get_c0_compare_int();
-
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
@@ -320,7 +313,6 @@ int r4k_clockevent_init(void)
min_delta = calculate_min_delta();
cd->rating = 300;
- cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
cd->event_handler = mips_event_handler;
@@ -332,6 +324,13 @@ int r4k_clockevent_init(void)
cp0_timer_irq_installed = 1;
+ /*
+ * With vectored interrupts things are getting platform specific.
+ * get_c0_compare_int is a hook to allow a platform to return the
+ * interrupt number of its liking.
+ */
+ irq = get_c0_compare_int();
+
if (request_irq(irq, c0_compare_interrupt, flags, "timer",
c0_compare_interrupt))
pr_err("Failed to request irq %d (timer)\n", irq);
diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h
index 6ddced0415cb..7dc24b8632d7 100644
--- a/arch/powerpc/include/asm/nohash/mmu-e500.h
+++ b/arch/powerpc/include/asm/nohash/mmu-e500.h
@@ -303,8 +303,7 @@ extern unsigned long linear_map_top;
extern int book3e_htw_mode;
#define PPC_HTW_NONE 0
-#define PPC_HTW_IBM 1
-#define PPC_HTW_E6500 2
+#define PPC_HTW_E6500 1
/*
* 64-bit booke platforms don't load the tlb in the tlb miss handler code.
diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S
index 426e1ccc6971..8f57107000a2 100644
--- a/arch/powerpc/kernel/vdso/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso32.lds.S
@@ -74,6 +74,8 @@ SECTIONS
.got : { *(.got) } :text
.plt : { *(.plt) }
+ .rela.dyn : { *(.rela .rela*) }
+
_end = .;
__end = .;
PROVIDE(end = .);
@@ -87,7 +89,7 @@ SECTIONS
*(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
- *(.got1 .glink .iplt .rela*)
+ *(.got1 .glink .iplt)
}
}
diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S
index bda6c8cdd459..400819258c06 100644
--- a/arch/powerpc/kernel/vdso/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso64.lds.S
@@ -69,7 +69,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.gcc_except_table : { *(.gcc_except_table) }
- .rela.dyn ALIGN(8) : { *(.rela.dyn) }
+ .rela.dyn ALIGN(8) : { *(.rela .rela*) }
.got ALIGN(8) : { *(.got .toc) }
@@ -86,7 +86,7 @@ SECTIONS
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
*(.opd)
- *(.glink .iplt .plt .rela*)
+ *(.glink .iplt .plt)
}
}
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 6dd2f46bd3ef..8830267789c9 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -715,7 +715,15 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
}
release:
- qnodesp->count--; /* release the node */
+ /*
+ * Clear the lock before releasing the node, as another CPU might see stale
+ * values if an interrupt occurs after we increment qnodesp->count
+ * but before node->lock is initialized. The barrier ensures that
+ * there are no further stores to the node after it has been released.
+ */
+ node->lock = NULL;
+ barrier();
+ qnodesp->count--;
}
void queued_spin_lock_slowpath(struct qspinlock *lock)
diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile
index f3894e79d5f7..24b445a5fcac 100644
--- a/arch/powerpc/mm/nohash/Makefile
+++ b/arch/powerpc/mm/nohash/Makefile
@@ -3,7 +3,7 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y += mmu_context.o tlb.o tlb_low.o kup.o
-obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o
+obj-$(CONFIG_PPC_BOOK3E_64) += tlb_64e.o tlb_low_64e.o book3e_pgtable.o
obj-$(CONFIG_40x) += 40x.o
obj-$(CONFIG_44x) += 44x.o
obj-$(CONFIG_PPC_8xx) += 8xx.o
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index 5ffa0af4328a..f57dc721d063 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -110,28 +110,6 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
};
#endif
-/* The variables below are currently only used on 64-bit Book3E
- * though this will probably be made common with other nohash
- * implementations at some point
- */
-#ifdef CONFIG_PPC64
-
-int mmu_pte_psize; /* Page size used for PTE pages */
-int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
-int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
-unsigned long linear_map_top; /* Top of linear mapping */
-
-
-/*
- * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
- * exceptions. This is used for bolted and e6500 TLB miss handlers which
- * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
- * this is set to zero.
- */
-int extlb_level_exc;
-
-#endif /* CONFIG_PPC64 */
-
#ifdef CONFIG_PPC_E500
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
DEFINE_PER_CPU(int, next_tlbcam_idx);
@@ -358,381 +336,7 @@ void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
}
-/*
- * Below are functions specific to the 64-bit variant of Book3E though that
- * may change in the future
- */
-
-#ifdef CONFIG_PPC64
-
-/*
- * Handling of virtual linear page tables or indirect TLB entries
- * flushing when PTE pages are freed
- */
-void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
-{
- int tsize = mmu_psize_defs[mmu_pte_psize].enc;
-
- if (book3e_htw_mode != PPC_HTW_NONE) {
- unsigned long start = address & PMD_MASK;
- unsigned long end = address + PMD_SIZE;
- unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
-
- /* This isn't the most optimal, ideally we would factor out the
- * while preempt & CPU mask mucking around, or even the IPI but
- * it will do for now
- */
- while (start < end) {
- __flush_tlb_page(tlb->mm, start, tsize, 1);
- start += size;
- }
- } else {
- unsigned long rmask = 0xf000000000000000ul;
- unsigned long rid = (address & rmask) | 0x1000000000000000ul;
- unsigned long vpte = address & ~rmask;
-
- vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
- vpte |= rid;
- __flush_tlb_page(tlb->mm, vpte, tsize, 0);
- }
-}
-
-static void __init setup_page_sizes(void)
-{
- unsigned int tlb0cfg;
- unsigned int tlb0ps;
- unsigned int eptcfg;
- int i, psize;
-
-#ifdef CONFIG_PPC_E500
- unsigned int mmucfg = mfspr(SPRN_MMUCFG);
- int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
-
- if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
- unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
- unsigned int min_pg, max_pg;
-
- min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
- max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
-
- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
- struct mmu_psize_def *def;
- unsigned int shift;
-
- def = &mmu_psize_defs[psize];
- shift = def->shift;
-
- if (shift == 0 || shift & 1)
- continue;
-
- /* adjust to be in terms of 4^shift Kb */
- shift = (shift - 10) >> 1;
-
- if ((shift >= min_pg) && (shift <= max_pg))
- def->flags |= MMU_PAGE_SIZE_DIRECT;
- }
-
- goto out;
- }
-
- if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
- u32 tlb1cfg, tlb1ps;
-
- tlb0cfg = mfspr(SPRN_TLB0CFG);
- tlb1cfg = mfspr(SPRN_TLB1CFG);
- tlb1ps = mfspr(SPRN_TLB1PS);
- eptcfg = mfspr(SPRN_EPTCFG);
-
- if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
- book3e_htw_mode = PPC_HTW_E6500;
-
- /*
- * We expect 4K subpage size and unrestricted indirect size.
- * The lack of a restriction on indirect size is a Freescale
- * extension, indicated by PSn = 0 but SPSn != 0.
- */
- if (eptcfg != 2)
- book3e_htw_mode = PPC_HTW_NONE;
-
- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
- struct mmu_psize_def *def = &mmu_psize_defs[psize];
-
- if (!def->shift)
- continue;
-
- if (tlb1ps & (1U << (def->shift - 10))) {
- def->flags |= MMU_PAGE_SIZE_DIRECT;
-
- if (book3e_htw_mode && psize == MMU_PAGE_2M)
- def->flags |= MMU_PAGE_SIZE_INDIRECT;
- }
- }
-
- goto out;
- }
-#endif
-
- tlb0cfg = mfspr(SPRN_TLB0CFG);
- tlb0ps = mfspr(SPRN_TLB0PS);
- eptcfg = mfspr(SPRN_EPTCFG);
-
- /* Look for supported direct sizes */
- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
- struct mmu_psize_def *def = &mmu_psize_defs[psize];
-
- if (tlb0ps & (1U << (def->shift - 10)))
- def->flags |= MMU_PAGE_SIZE_DIRECT;
- }
-
- /* Indirect page sizes supported ? */
- if ((tlb0cfg & TLBnCFG_IND) == 0 ||
- (tlb0cfg & TLBnCFG_PT) == 0)
- goto out;
-
- book3e_htw_mode = PPC_HTW_IBM;
-
- /* Now, we only deal with one IND page size for each
- * direct size. Hopefully all implementations today are
- * unambiguous, but we might want to be careful in the
- * future.
- */
- for (i = 0; i < 3; i++) {
- unsigned int ps, sps;
-
- sps = eptcfg & 0x1f;
- eptcfg >>= 5;
- ps = eptcfg & 0x1f;
- eptcfg >>= 5;
- if (!ps || !sps)
- continue;
- for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
- struct mmu_psize_def *def = &mmu_psize_defs[psize];
-
- if (ps == (def->shift - 10))
- def->flags |= MMU_PAGE_SIZE_INDIRECT;
- if (sps == (def->shift - 10))
- def->ind = ps + 10;
- }
- }
-
-out:
- /* Cleanup array and print summary */
- pr_info("MMU: Supported page sizes\n");
- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
- struct mmu_psize_def *def = &mmu_psize_defs[psize];
- const char *__page_type_names[] = {
- "unsupported",
- "direct",
- "indirect",
- "direct & indirect"
- };
- if (def->flags == 0) {
- def->shift = 0;
- continue;
- }
- pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
- __page_type_names[def->flags & 0x3]);
- }
-}
-
-static void __init setup_mmu_htw(void)
-{
- /*
- * If we want to use HW tablewalk, enable it by patching the TLB miss
- * handlers to branch to the one dedicated to it.
- */
-
- switch (book3e_htw_mode) {
- case PPC_HTW_IBM:
- patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
- patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
- break;
-#ifdef CONFIG_PPC_E500
- case PPC_HTW_E6500:
- extlb_level_exc = EX_TLB_SIZE;
- patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
- patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
- break;
-#endif
- }
- pr_info("MMU: Book3E HW tablewalk %s\n",
- book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
-}
-
-/*
- * Early initialization of the MMU TLB code
- */
-static void early_init_this_mmu(void)
-{
- unsigned int mas4;
-
- /* Set MAS4 based on page table setting */
-
- mas4 = 0x4 << MAS4_WIMGED_SHIFT;
- switch (book3e_htw_mode) {
- case PPC_HTW_E6500:
- mas4 |= MAS4_INDD;
- mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
- mas4 |= MAS4_TLBSELD(1);
- mmu_pte_psize = MMU_PAGE_2M;
- break;
-
- case PPC_HTW_IBM:
- mas4 |= MAS4_INDD;
- mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
- mmu_pte_psize = MMU_PAGE_1M;
- break;
-
- case PPC_HTW_NONE:
- mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
- mmu_pte_psize = mmu_virtual_psize;
- break;
- }
- mtspr(SPRN_MAS4, mas4);
-
-#ifdef CONFIG_PPC_E500
- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
- unsigned int num_cams;
- bool map = true;
-
- /* use a quarter of the TLBCAM for bolted linear map */
- num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
-
- /*
- * Only do the mapping once per core, or else the
- * transient mapping would cause problems.
- */
-#ifdef CONFIG_SMP
- if (hweight32(get_tensr()) > 1)
- map = false;
-#endif
-
- if (map)
- linear_map_top = map_mem_in_cams(linear_map_top,
- num_cams, false, true);
- }
-#endif
-
- /* A sync won't hurt us after mucking around with
- * the MMU configuration
- */
- mb();
-}
-
-static void __init early_init_mmu_global(void)
-{
- /* XXX This should be decided at runtime based on supported
- * page sizes in the TLB, but for now let's assume 16M is
- * always there and a good fit (which it probably is)
- *
- * Freescale booke only supports 4K pages in TLB0, so use that.
- */
- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
- mmu_vmemmap_psize = MMU_PAGE_4K;
- else
- mmu_vmemmap_psize = MMU_PAGE_16M;
-
- /* XXX This code only checks for TLB 0 capabilities and doesn't
- * check what page size combos are supported by the HW. It
- * also doesn't handle the case where a separate array holds
- * the IND entries from the array loaded by the PT.
- */
- /* Look for supported page sizes */
- setup_page_sizes();
-
- /* Look for HW tablewalk support */
- setup_mmu_htw();
-
-#ifdef CONFIG_PPC_E500
- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
- if (book3e_htw_mode == PPC_HTW_NONE) {
- extlb_level_exc = EX_TLB_SIZE;
- patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
- patch_exception(0x1e0,
- exc_instruction_tlb_miss_bolted_book3e);
- }
- }
-#endif
-
- /* Set the global containing the top of the linear mapping
- * for use by the TLB miss code
- */
- linear_map_top = memblock_end_of_DRAM();
-
- ioremap_bot = IOREMAP_BASE;
-}
-
-static void __init early_mmu_set_memory_limit(void)
-{
-#ifdef CONFIG_PPC_E500
- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
- /*
- * Limit memory so we dont have linear faults.
- * Unlike memblock_set_current_limit, which limits
- * memory available during early boot, this permanently
- * reduces the memory available to Linux. We need to
- * do this because highmem is not supported on 64-bit.
- */
- memblock_enforce_memory_limit(linear_map_top);
- }
-#endif
-
- memblock_set_current_limit(linear_map_top);
-}
-
-/* boot cpu only */
-void __init early_init_mmu(void)
-{
- early_init_mmu_global();
- early_init_this_mmu();
- early_mmu_set_memory_limit();
-}
-
-void early_init_mmu_secondary(void)
-{
- early_init_this_mmu();
-}
-
-void setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size)
-{
- /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
- * the bolted TLB entry. We know for now that only 1G
- * entries are supported though that may eventually
- * change.
- *
- * on FSL Embedded 64-bit, usually all RAM is bolted, but with
- * unusual memory sizes it's possible for some RAM to not be mapped
- * (such RAM is not used at all by Linux, since we don't support
- * highmem on 64-bit). We limit ppc64_rma_size to what would be
- * mappable if this memblock is the only one. Additional memblocks
- * can only increase, not decrease, the amount that ends up getting
- * mapped. We still limit max to 1G even if we'll eventually map
- * more. This is due to what the early init code is set up to do.
- *
- * We crop it to the size of the first MEMBLOCK to
- * avoid going over total available memory just in case...
- */
-#ifdef CONFIG_PPC_E500
- if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
- unsigned long linear_sz;
- unsigned int num_cams;
-
- /* use a quarter of the TLBCAM for bolted linear map */
- num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
-
- linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
- true, true);
-
- ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
- } else
-#endif
- ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
-
- /* Finally limit subsequent allocations */
- memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
-}
-#else /* ! CONFIG_PPC64 */
+#ifndef CONFIG_PPC64
void __init early_init_mmu(void)
{
unsigned long root = of_get_flat_dt_root();
diff --git a/arch/powerpc/mm/nohash/tlb_64e.c b/arch/powerpc/mm/nohash/tlb_64e.c
new file mode 100644
index 000000000000..b6af3ec4d001
--- /dev/null
+++ b/arch/powerpc/mm/nohash/tlb_64e.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2008,2009 Ben Herrenschmidt <benh@...nel.crashing.org>
+ * IBM Corp.
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@...uxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@...anu.edu.au)
+ * and Cort Dougan (PReP) (cort@...nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/memblock.h>
+
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+#include <asm/code-patching.h>
+#include <asm/cputhreads.h>
+
+#include <mm/mmu_decl.h>
+
+/* The variables below are currently only used on 64-bit Book3E
+ * though this will probably be made common with other nohash
+ * implementations at some point
+ */
+static int mmu_pte_psize; /* Page size used for PTE pages */
+int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
+int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
+unsigned long linear_map_top; /* Top of linear mapping */
+
+
+/*
+ * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
+ * exceptions. This is used for bolted and e6500 TLB miss handlers which
+ * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
+ * this is set to zero.
+ */
+int extlb_level_exc;
+
+/*
+ * Handling of virtual linear page tables or indirect TLB entries
+ * flushing when PTE pages are freed
+ */
+void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
+{
+ int tsize = mmu_psize_defs[mmu_pte_psize].enc;
+
+ if (book3e_htw_mode != PPC_HTW_NONE) {
+ unsigned long start = address & PMD_MASK;
+ unsigned long end = address + PMD_SIZE;
+ unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
+
+ /* This isn't the most optimal, ideally we would factor out the
+ * while preempt & CPU mask mucking around, or even the IPI but
+ * it will do for now
+ */
+ while (start < end) {
+ __flush_tlb_page(tlb->mm, start, tsize, 1);
+ start += size;
+ }
+ } else {
+ unsigned long rmask = 0xf000000000000000ul;
+ unsigned long rid = (address & rmask) | 0x1000000000000000ul;
+ unsigned long vpte = address & ~rmask;
+
+ vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
+ vpte |= rid;
+ __flush_tlb_page(tlb->mm, vpte, tsize, 0);
+ }
+}
+
+static void __init setup_page_sizes(void)
+{
+ unsigned int tlb0cfg;
+ unsigned int eptcfg;
+ int psize;
+
+#ifdef CONFIG_PPC_E500
+ unsigned int mmucfg = mfspr(SPRN_MMUCFG);
+ int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
+
+ if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
+ unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
+ unsigned int min_pg, max_pg;
+
+ min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
+ max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+ struct mmu_psize_def *def;
+ unsigned int shift;
+
+ def = &mmu_psize_defs[psize];
+ shift = def->shift;
+
+ if (shift == 0 || shift & 1)
+ continue;
+
+ /* adjust to be in terms of 4^shift Kb */
+ shift = (shift - 10) >> 1;
+
+ if ((shift >= min_pg) && (shift <= max_pg))
+ def->flags |= MMU_PAGE_SIZE_DIRECT;
+ }
+
+ goto out;
+ }
+
+ if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
+ u32 tlb1cfg, tlb1ps;
+
+ tlb0cfg = mfspr(SPRN_TLB0CFG);
+ tlb1cfg = mfspr(SPRN_TLB1CFG);
+ tlb1ps = mfspr(SPRN_TLB1PS);
+ eptcfg = mfspr(SPRN_EPTCFG);
+
+ if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
+ book3e_htw_mode = PPC_HTW_E6500;
+
+ /*
+ * We expect 4K subpage size and unrestricted indirect size.
+ * The lack of a restriction on indirect size is a Freescale
+ * extension, indicated by PSn = 0 but SPSn != 0.
+ */
+ if (eptcfg != 2)
+ book3e_htw_mode = PPC_HTW_NONE;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+ struct mmu_psize_def *def = &mmu_psize_defs[psize];
+
+ if (!def->shift)
+ continue;
+
+ if (tlb1ps & (1U << (def->shift - 10))) {
+ def->flags |= MMU_PAGE_SIZE_DIRECT;
+
+ if (book3e_htw_mode && psize == MMU_PAGE_2M)
+ def->flags |= MMU_PAGE_SIZE_INDIRECT;
+ }
+ }
+
+ goto out;
+ }
+#endif
+out:
+ /* Cleanup array and print summary */
+ pr_info("MMU: Supported page sizes\n");
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+ struct mmu_psize_def *def = &mmu_psize_defs[psize];
+ const char *__page_type_names[] = {
+ "unsupported",
+ "direct",
+ "indirect",
+ "direct & indirect"
+ };
+ if (def->flags == 0) {
+ def->shift = 0;
+ continue;
+ }
+ pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
+ __page_type_names[def->flags & 0x3]);
+ }
+}
+
+static void __init setup_mmu_htw(void)
+{
+ /*
+ * If we want to use HW tablewalk, enable it by patching the TLB miss
+ * handlers to branch to the one dedicated to it.
+ */
+
+ switch (book3e_htw_mode) {
+#ifdef CONFIG_PPC_E500
+ case PPC_HTW_E6500:
+ extlb_level_exc = EX_TLB_SIZE;
+ patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
+ patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
+ break;
+#endif
+ }
+ pr_info("MMU: Book3E HW tablewalk %s\n",
+ book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
+}
+
+/*
+ * Early initialization of the MMU TLB code
+ */
+static void early_init_this_mmu(void)
+{
+ unsigned int mas4;
+
+ /* Set MAS4 based on page table setting */
+
+ mas4 = 0x4 << MAS4_WIMGED_SHIFT;
+ switch (book3e_htw_mode) {
+ case PPC_HTW_E6500:
+ mas4 |= MAS4_INDD;
+ mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
+ mas4 |= MAS4_TLBSELD(1);
+ mmu_pte_psize = MMU_PAGE_2M;
+ break;
+
+ case PPC_HTW_NONE:
+ mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
+ mmu_pte_psize = mmu_virtual_psize;
+ break;
+ }
+ mtspr(SPRN_MAS4, mas4);
+
+#ifdef CONFIG_PPC_E500
+ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+ unsigned int num_cams;
+ bool map = true;
+
+ /* use a quarter of the TLBCAM for bolted linear map */
+ num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
+
+ /*
+ * Only do the mapping once per core, or else the
+ * transient mapping would cause problems.
+ */
+#ifdef CONFIG_SMP
+ if (hweight32(get_tensr()) > 1)
+ map = false;
+#endif
+
+ if (map)
+ linear_map_top = map_mem_in_cams(linear_map_top,
+ num_cams, false, true);
+ }
+#endif
+
+ /* A sync won't hurt us after mucking around with
+ * the MMU configuration
+ */
+ mb();
+}
+
+static void __init early_init_mmu_global(void)
+{
+ /* XXX This should be decided at runtime based on supported
+ * page sizes in the TLB, but for now let's assume 16M is
+ * always there and a good fit (which it probably is)
+ *
+ * Freescale booke only supports 4K pages in TLB0, so use that.
+ */
+ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
+ mmu_vmemmap_psize = MMU_PAGE_4K;
+ else
+ mmu_vmemmap_psize = MMU_PAGE_16M;
+
+ /* XXX This code only checks for TLB 0 capabilities and doesn't
+ * check what page size combos are supported by the HW. It
+ * also doesn't handle the case where a separate array holds
+ * the IND entries from the array loaded by the PT.
+ */
+ /* Look for supported page sizes */
+ setup_page_sizes();
+
+ /* Look for HW tablewalk support */
+ setup_mmu_htw();
+
+#ifdef CONFIG_PPC_E500
+ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+ if (book3e_htw_mode == PPC_HTW_NONE) {
+ extlb_level_exc = EX_TLB_SIZE;
+ patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
+ patch_exception(0x1e0,
+ exc_instruction_tlb_miss_bolted_book3e);
+ }
+ }
+#endif
+
+ /* Set the global containing the top of the linear mapping
+ * for use by the TLB miss code
+ */
+ linear_map_top = memblock_end_of_DRAM();
+
+ ioremap_bot = IOREMAP_BASE;
+}
+
+static void __init early_mmu_set_memory_limit(void)
+{
+#ifdef CONFIG_PPC_E500
+ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+ /*
+ * Limit memory so we dont have linear faults.
+ * Unlike memblock_set_current_limit, which limits
+ * memory available during early boot, this permanently
+ * reduces the memory available to Linux. We need to
+ * do this because highmem is not supported on 64-bit.
+ */
+ memblock_enforce_memory_limit(linear_map_top);
+ }
+#endif
+
+ memblock_set_current_limit(linear_map_top);
+}
+
+/* boot cpu only */
+void __init early_init_mmu(void)
+{
+ early_init_mmu_global();
+ early_init_this_mmu();
+ early_mmu_set_memory_limit();
+}
+
+void early_init_mmu_secondary(void)
+{
+ early_init_this_mmu();
+}
+
+void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+ phys_addr_t first_memblock_size)
+{
+ /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
+ * the bolted TLB entry. We know for now that only 1G
+ * entries are supported though that may eventually
+ * change.
+ *
+ * on FSL Embedded 64-bit, usually all RAM is bolted, but with
+ * unusual memory sizes it's possible for some RAM to not be mapped
+ * (such RAM is not used at all by Linux, since we don't support
+ * highmem on 64-bit). We limit ppc64_rma_size to what would be
+ * mappable if this memblock is the only one. Additional memblocks
+ * can only increase, not decrease, the amount that ends up getting
+ * mapped. We still limit max to 1G even if we'll eventually map
+ * more. This is due to what the early init code is set up to do.
+ *
+ * We crop it to the size of the first MEMBLOCK to
+ * avoid going over total available memory just in case...
+ */
+#ifdef CONFIG_PPC_E500
+ if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+ unsigned long linear_sz;
+ unsigned int num_cams;
+
+ /* use a quarter of the TLBCAM for bolted linear map */
+ num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
+
+ linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
+ true, true);
+
+ ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
+ } else
+#endif
+ ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
+
+ /* Finally limit subsequent allocations */
+ memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
+}
diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
index 7e0b8fe1c279..b0eb3f7eaed1 100644
--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
@@ -893,201 +893,6 @@ virt_page_table_tlb_miss_whacko_fault:
TLB_MISS_EPILOG_ERROR
b exc_data_storage_book3e
-
-/**************************************************************
- * *
- * TLB miss handling for Book3E with hw page table support *
- * *
- **************************************************************/
-
-
-/* Data TLB miss */
- START_EXCEPTION(data_tlb_miss_htw)
- TLB_MISS_PROLOG
-
- /* Now we handle the fault proper. We only save DEAR in normal
- * fault case since that's the only interesting values here.
- * We could probably also optimize by not saving SRR0/1 in the
- * linear mapping case but I'll leave that for later
- */
- mfspr r14,SPRN_ESR
- mfspr r16,SPRN_DEAR /* get faulting address */
- srdi r11,r16,44 /* get region */
- xoris r11,r11,0xc
- cmpldi cr0,r11,0 /* linear mapping ? */
- beq tlb_load_linear /* yes -> go to linear map load */
- cmpldi cr1,r11,1 /* vmalloc mapping ? */
-
- /* We do the user/kernel test for the PID here along with the RW test
- */
- srdi. r11,r16,60 /* Check for user region */
- ld r15,PACAPGD(r13) /* Load user pgdir */
- beq htw_tlb_miss
-
- /* XXX replace the RMW cycles with immediate loads + writes */
-1: mfspr r10,SPRN_MAS1
- rlwinm r10,r10,0,16,1 /* Clear TID */
- mtspr SPRN_MAS1,r10
- ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
- beq+ cr1,htw_tlb_miss
-
- /* We got a crappy address, just fault with whatever DEAR and ESR
- * are here
- */
- TLB_MISS_EPILOG_ERROR
- b exc_data_storage_book3e
-
-/* Instruction TLB miss */
- START_EXCEPTION(instruction_tlb_miss_htw)
- TLB_MISS_PROLOG
-
- /* If we take a recursive fault, the second level handler may need
- * to know whether we are handling a data or instruction fault in
- * order to get to the right store fault handler. We provide that
- * info by keeping a crazy value for ESR in r14
- */
- li r14,-1 /* store to exception frame is done later */
-
- /* Now we handle the fault proper. We only save DEAR in the non
- * linear mapping case since we know the linear mapping case will
- * not re-enter. We could indeed optimize and also not save SRR0/1
- * in the linear mapping case but I'll leave that for later
- *
- * Faulting address is SRR0 which is already in r16
- */
- srdi r11,r16,44 /* get region */
- xoris r11,r11,0xc
- cmpldi cr0,r11,0 /* linear mapping ? */
- beq tlb_load_linear /* yes -> go to linear map load */
- cmpldi cr1,r11,1 /* vmalloc mapping ? */
-
- /* We do the user/kernel test for the PID here along with the RW test
- */
- srdi. r11,r16,60 /* Check for user region */
- ld r15,PACAPGD(r13) /* Load user pgdir */
- beq htw_tlb_miss
-
- /* XXX replace the RMW cycles with immediate loads + writes */
-1: mfspr r10,SPRN_MAS1
- rlwinm r10,r10,0,16,1 /* Clear TID */
- mtspr SPRN_MAS1,r10
- ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
- beq+ htw_tlb_miss
-
- /* We got a crappy address, just fault */
- TLB_MISS_EPILOG_ERROR
- b exc_instruction_storage_book3e
-
-
-/*
- * This is the guts of the second-level TLB miss handler for direct
- * misses. We are entered with:
- *
- * r16 = virtual page table faulting address
- * r15 = PGD pointer
- * r14 = ESR
- * r13 = PACA
- * r12 = TLB exception frame in PACA
- * r11 = crap (free to use)
- * r10 = crap (free to use)
- *
- * It can be re-entered by the linear mapping miss handler. However, to
- * avoid too much complication, it will save/restore things for us
- */
-htw_tlb_miss:
-#ifdef CONFIG_PPC_KUAP
- mfspr r10,SPRN_MAS1
- rlwinm. r10,r10,0,0x3fff0000
- beq- htw_tlb_miss_fault /* KUAP fault */
-#endif
- /* Search if we already have a TLB entry for that virtual address, and
- * if we do, bail out.
- *
- * MAS1:IND should be already set based on MAS4
- */
- PPC_TLBSRX_DOT(0,R16)
- beq htw_tlb_miss_done
-
- /* Now, we need to walk the page tables. First check if we are in
- * range.
- */
- rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
- bne- htw_tlb_miss_fault
-
- /* Get the PGD pointer */
- cmpldi cr0,r15,0
- beq- htw_tlb_miss_fault
-
- /* Get to PGD entry */
- rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
- clrrdi r10,r11,3
- ldx r15,r10,r15
- cmpdi cr0,r15,0
- bge htw_tlb_miss_fault
-
- /* Get to PUD entry */
- rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
- clrrdi r10,r11,3
- ldx r15,r10,r15
- cmpdi cr0,r15,0
- bge htw_tlb_miss_fault
-
- /* Get to PMD entry */
- rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
- clrrdi r10,r11,3
- ldx r15,r10,r15
- cmpdi cr0,r15,0
- bge htw_tlb_miss_fault
-
- /* Ok, we're all right, we can now create an indirect entry for
- * a 1M or 256M page.
- *
- * The last trick is now that because we use "half" pages for
- * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
- * for an added LSB bit to the RPN. For 64K pages, there is no
- * problem as we already use 32K arrays (half PTE pages), but for
- * 4K page we need to extract a bit from the virtual address and
- * insert it into the "PA52" bit of the RPN.
- */
- rlwimi r15,r16,32-9,20,20
- /* Now we build the MAS:
- *
- * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
- * MAS 1 : Almost fully setup
- * - PID already updated by caller if necessary
- * - TSIZE for now is base ind page size always
- * MAS 2 : Use defaults
- * MAS 3+7 : Needs to be done
- */
- ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
-
- srdi r16,r10,32
- mtspr SPRN_MAS3,r10
- mtspr SPRN_MAS7,r16
-
- tlbwe
-
-htw_tlb_miss_done:
- /* We don't bother with restoring DEAR or ESR since we know we are
- * level 0 and just going back to userland. They are only needed
- * if you are going to take an access fault
- */
- TLB_MISS_EPILOG_SUCCESS
- rfi
-
-htw_tlb_miss_fault:
- /* We need to check if it was an instruction miss. We know this
- * though because r14 would contain -1
- */
- cmpdi cr0,r14,-1
- beq 1f
- mtspr SPRN_DEAR,r16
- mtspr SPRN_ESR,r14
- TLB_MISS_EPILOG_ERROR
- b exc_data_storage_book3e
-1: TLB_MISS_EPILOG_ERROR
- b exc_instruction_storage_book3e
-
/*
* This is the guts of "any" level TLB miss handler for kernel linear
* mapping misses. We are entered with:
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index c785a0200573..fe30b24d52e1 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -27,6 +27,7 @@ config RISCV
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
+ select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MMIOWB
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PMEM_API
@@ -489,8 +490,8 @@ config RISCV_ISA_SVPBMT
config TOOLCHAIN_HAS_V
bool
default y
- depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64iv)
- depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32iv)
+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64imv)
+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32imv)
depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800
depends on AS_HAS_OPTION_ARCH
diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
index 0bbffd528096..7388edd88986 100644
--- a/arch/riscv/include/asm/kfence.h
+++ b/arch/riscv/include/asm/kfence.h
@@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
pte_t *pte = virt_to_kpte(addr);
if (protect)
- set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT));
else
- set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+ set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
diff --git a/arch/riscv/include/asm/membarrier.h b/arch/riscv/include/asm/membarrier.h
new file mode 100644
index 000000000000..6c016ebb5020
--- /dev/null
+++ b/arch/riscv/include/asm/membarrier.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_RISCV_MEMBARRIER_H
+#define _ASM_RISCV_MEMBARRIER_H
+
+static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ /*
+ * Only need the full barrier when switching between processes.
+ * Barrier when switching from kernel to userspace is not
+ * required here, given that it is implied by mmdrop(). Barrier
+ * when switching from userspace to kernel is not needed after
+ * store to rq->curr.
+ */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ likely(!(atomic_read(&next->membarrier_state) &
+ (MEMBARRIER_STATE_PRIVATE_EXPEDITED |
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
+ return;
+
+ /*
+ * The membarrier system call requires a full memory barrier
+ * after storing to rq->curr, before going back to user-space.
+ * Matches a full barrier in the proximity of the membarrier
+ * system call entry.
+ */
+ smp_mb();
+}
+
+#endif /* _ASM_RISCV_MEMBARRIER_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 7a5097202e15..3272ca7a5270 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -198,7 +198,7 @@ static inline int pud_user(pud_t pud)
static inline void set_pud(pud_t *pudp, pud_t pud)
{
- *pudp = pud;
+ WRITE_ONCE(*pudp, pud);
}
static inline void pud_clear(pud_t *pudp)
@@ -274,7 +274,7 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
{
if (pgtable_l4_enabled)
- *p4dp = p4d;
+ WRITE_ONCE(*p4dp, p4d);
else
set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
}
@@ -336,18 +336,12 @@ static inline struct page *p4d_page(p4d_t p4d)
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
#define pud_offset pud_offset
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
-{
- if (pgtable_l4_enabled)
- return p4d_pgtable(*p4d) + pud_index(address);
-
- return (pud_t *)p4d;
-}
+pud_t *pud_offset(p4d_t *p4d, unsigned long address);
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
if (pgtable_l5_enabled)
- *pgdp = pgd;
+ WRITE_ONCE(*pgdp, pgd);
else
set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
}
@@ -400,12 +394,6 @@ static inline struct page *pgd_page(pgd_t pgd)
#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
#define p4d_offset p4d_offset
-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
-{
- if (pgtable_l5_enabled)
- return pgd_pgtable(*pgd) + p4d_index(address);
-
- return (p4d_t *)pgd;
-}
+p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);
#endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 719c3041ae1c..37829dab4a0a 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -248,7 +248,7 @@ static inline int pmd_leaf(pmd_t pmd)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
- *pmdp = pmd;
+ WRITE_ONCE(*pmdp, pmd);
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -515,7 +515,7 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
*/
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
- *ptep = pteval;
+ WRITE_ONCE(*ptep, pteval);
}
void flush_icache_pte(pte_t pte);
@@ -549,19 +549,12 @@ static inline void pte_clear(struct mm_struct *mm,
__set_pte_at(ptep, __pte(0));
}
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-static inline int ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
- pte_t entry, int dirty)
-{
- if (!pte_same(*ptep, entry))
- __set_pte_at(ptep, entry);
- /*
- * update_mmu_cache will unconditionally execute, handling both
- * the case that the PTE changed and the spurious fault case.
- */
- return true;
-}
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, pte_t entry, int dirty);
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
+extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
@@ -574,16 +567,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
-{
- if (!pte_young(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
-}
-
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c
index aa6209a74c83..b64bf1624a05 100644
--- a/arch/riscv/kernel/efi.c
+++ b/arch/riscv/kernel/efi.c
@@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = ptep_get(ptep);
unsigned long val;
if (md->attribute & EFI_MEMORY_RO) {
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 0097c145385f..9691fa8f2faa 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -305,6 +305,9 @@ clear_bss_done:
#else
mv a0, a1
#endif /* CONFIG_BUILTIN_DTB */
+ /* Set trap vector to spin forever to help debug */
+ la a3, .Lsecondary_park
+ csrw CSR_TVEC, a3
call setup_vm
#ifdef CONFIG_MMU
la a0, early_pg_dir
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index 2f08c14a933d..fecbbcf40ac3 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -28,9 +28,8 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
p->ainsn.api.restore = (unsigned long)p->addr + offset;
- patch_text(p->ainsn.api.insn, &p->opcode, 1);
- patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
- &insn, 1);
+ patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1);
+ patch_text_nosync(p->ainsn.api.insn + offset, &insn, 1);
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 068c74593871..a9e2fd7245e1 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -103,7 +103,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
*ptep_level = current_level;
ptep = (pte_t *)kvm->arch.pgd;
ptep = &ptep[gstage_pte_index(addr, current_level)];
- while (ptep && pte_val(*ptep)) {
+ while (ptep && pte_val(ptep_get(ptep))) {
if (gstage_pte_leaf(ptep)) {
*ptep_level = current_level;
*ptepp = ptep;
@@ -113,7 +113,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
if (current_level) {
current_level--;
*ptep_level = current_level;
- ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+ ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
ptep = &ptep[gstage_pte_index(addr, current_level)];
} else {
ptep = NULL;
@@ -149,25 +149,25 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
if (gstage_pte_leaf(ptep))
return -EEXIST;
- if (!pte_val(*ptep)) {
+ if (!pte_val(ptep_get(ptep))) {
if (!pcache)
return -ENOMEM;
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep)
return -ENOMEM;
- *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
- __pgprot(_PAGE_TABLE));
+ set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
+ __pgprot(_PAGE_TABLE)));
} else {
if (gstage_pte_leaf(ptep))
return -EEXIST;
- next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
}
current_level--;
ptep = &next_ptep[gstage_pte_index(addr, current_level)];
}
- *ptep = *new_pte;
+ set_pte(ptep, *new_pte);
if (gstage_pte_leaf(ptep))
gstage_remote_tlb_flush(kvm, current_level, addr);
@@ -239,11 +239,11 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
BUG_ON(addr & (page_size - 1));
- if (!pte_val(*ptep))
+ if (!pte_val(ptep_get(ptep)))
return;
if (ptep_level && !gstage_pte_leaf(ptep)) {
- next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
next_ptep_level = ptep_level - 1;
ret = gstage_level_to_page_size(next_ptep_level,
&next_page_size);
@@ -261,7 +261,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
if (op == GSTAGE_OP_CLEAR)
set_pte(ptep, __pte(0));
else if (op == GSTAGE_OP_WP)
- set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
+ set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
gstage_remote_tlb_flush(kvm, ptep_level, addr);
}
}
@@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
&ptep, &ptep_level))
return false;
- return pte_young(*ptep);
+ return pte_young(ptep_get(ptep));
}
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 3a4dfc8babcf..2c869f8026a8 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,10 +13,9 @@ endif
KCOV_INSTRUMENT_init.o := n
obj-y += init.o
-obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o
+obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
obj-y += cacheflush.o
obj-y += context.o
-obj-y += pgtable.o
obj-y += pmem.o
ifeq ($(CONFIG_MMU),y)
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 217fd4de6134..ba8eb3944687 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -323,6 +323,8 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (unlikely(prev == next))
return;
+ membarrier_arch_switch_mm(prev, next, task);
+
/*
* Mark the current MM context as inactive, and the next as
* active. This is at least used by the icache flushing
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 655b2b1bb529..8960f4c84497 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -137,24 +137,24 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;
- if (!pgd_present(*pgd_k)) {
+ if (!pgd_present(pgdp_get(pgd_k))) {
no_context(regs, addr);
return;
}
- set_pgd(pgd, *pgd_k);
+ set_pgd(pgd, pgdp_get(pgd_k));
p4d_k = p4d_offset(pgd_k, addr);
- if (!p4d_present(*p4d_k)) {
+ if (!p4d_present(p4dp_get(p4d_k))) {
no_context(regs, addr);
return;
}
pud_k = pud_offset(p4d_k, addr);
- if (!pud_present(*pud_k)) {
+ if (!pud_present(pudp_get(pud_k))) {
no_context(regs, addr);
return;
}
- if (pud_leaf(*pud_k))
+ if (pud_leaf(pudp_get(pud_k)))
goto flush_tlb;
/*
@@ -162,11 +162,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* to copy individual PTEs
*/
pmd_k = pmd_offset(pud_k, addr);
- if (!pmd_present(*pmd_k)) {
+ if (!pmd_present(pmdp_get(pmd_k))) {
no_context(regs, addr);
return;
}
- if (pmd_leaf(*pmd_k))
+ if (pmd_leaf(pmdp_get(pmd_k)))
goto flush_tlb;
/*
@@ -176,7 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* silently loop forever.
*/
pte_k = pte_offset_kernel(pmd_k, addr);
- if (!pte_present(*pte_k)) {
+ if (!pte_present(ptep_get(pte_k))) {
no_context(regs, addr);
return;
}
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index fbe918801667..5ef2a6891158 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
}
if (sz == PMD_SIZE) {
- if (want_pmd_share(vma, addr) && pud_none(*pud))
+ if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud)))
pte = huge_pmd_share(mm, vma, addr, pud);
else
pte = (pte_t *)pmd_alloc(mm, pud, addr);
@@ -93,11 +93,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
- if (!pgd_present(*pgd))
+ if (!pgd_present(pgdp_get(pgd)))
return NULL;
p4d = p4d_offset(pgd, addr);
- if (!p4d_present(*p4d))
+ if (!p4d_present(p4dp_get(p4d)))
return NULL;
pud = pud_offset(p4d, addr);
@@ -105,7 +105,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
/* must be pud huge, non-present or none */
return (pte_t *)pud;
- if (!pud_present(*pud))
+ if (!pud_present(pudp_get(pud)))
return NULL;
pmd = pmd_offset(pud, addr);
@@ -113,7 +113,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
/* must be pmd huge, non-present or none */
return (pte_t *)pmd;
- if (!pmd_present(*pmd))
+ if (!pmd_present(pmdp_get(pmd)))
return NULL;
for_each_napot_order(order) {
@@ -351,7 +351,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_t *ptep,
unsigned long sz)
{
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = ptep_get(ptep);
int i, pte_num;
if (!pte_napot(pte)) {
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index abe7a7a7686c..3245bb525212 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -235,7 +235,7 @@ static void __init setup_bootmem(void)
* The size of the linear page mapping may restrict the amount of
* usable RAM.
*/
- if (IS_ENABLED(CONFIG_64BIT)) {
+ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
memblock_cap_memory_range(phys_ram_base,
max_mapped_addr - phys_ram_base);
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 5e39dcf23fdb..e96251853037 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -31,7 +31,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
phys_addr_t phys_addr;
pte_t *ptep, *p;
- if (pmd_none(*pmd)) {
+ if (pmd_none(pmdp_get(pmd))) {
p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -39,7 +39,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
ptep = pte_offset_kernel(pmd, vaddr);
do {
- if (pte_none(*ptep)) {
+ if (pte_none(ptep_get(ptep))) {
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
@@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
pmd_t *pmdp, *p;
unsigned long next;
- if (pud_none(*pud)) {
+ if (pud_none(pudp_get(pud))) {
p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -63,7 +63,8 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
do {
next = pmd_addr_end(vaddr, end);
- if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
+ if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
+ (next - vaddr) >= PMD_SIZE) {
phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
if (phys_addr) {
set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -83,7 +84,7 @@ static void __init kasan_populate_pud(p4d_t *p4d,
pud_t *pudp, *p;
unsigned long next;
- if (p4d_none(*p4d)) {
+ if (p4d_none(p4dp_get(p4d))) {
p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -93,7 +94,8 @@ static void __init kasan_populate_pud(p4d_t *p4d,
do {
next = pud_addr_end(vaddr, end);
- if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
+ if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
+ (next - vaddr) >= PUD_SIZE) {
phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
if (phys_addr) {
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -113,7 +115,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
p4d_t *p4dp, *p;
unsigned long next;
- if (pgd_none(*pgd)) {
+ if (pgd_none(pgdp_get(pgd))) {
p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -123,7 +125,8 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
do {
next = p4d_addr_end(vaddr, end);
- if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
+ if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
+ (next - vaddr) >= P4D_SIZE) {
phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
if (phys_addr) {
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -145,7 +148,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp,
do {
next = pgd_addr_end(vaddr, end);
- if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
+ if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
if (phys_addr) {
@@ -168,7 +171,7 @@ static void __init kasan_early_clear_pud(p4d_t *p4dp,
if (!pgtable_l4_enabled) {
pudp = (pud_t *)p4dp;
} else {
- base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
+ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
pudp = base_pud + pud_index(vaddr);
}
@@ -193,7 +196,7 @@ static void __init kasan_early_clear_p4d(pgd_t *pgdp,
if (!pgtable_l5_enabled) {
p4dp = (p4d_t *)pgdp;
} else {
- base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
+ base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
p4dp = base_p4d + p4d_index(vaddr);
}
@@ -239,14 +242,14 @@ static void __init kasan_early_populate_pud(p4d_t *p4dp,
if (!pgtable_l4_enabled) {
pudp = (pud_t *)p4dp;
} else {
- base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
+ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
pudp = base_pud + pud_index(vaddr);
}
do {
next = pud_addr_end(vaddr, end);
- if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
+ if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
(next - vaddr) >= PUD_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -277,14 +280,14 @@ static void __init kasan_early_populate_p4d(pgd_t *pgdp,
if (!pgtable_l5_enabled) {
p4dp = (p4d_t *)pgdp;
} else {
- base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
+ base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
p4dp = base_p4d + p4d_index(vaddr);
}
do {
next = p4d_addr_end(vaddr, end);
- if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
+ if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
(next - vaddr) >= P4D_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -305,7 +308,7 @@ static void __init kasan_early_populate_pgd(pgd_t *pgdp,
do {
next = pgd_addr_end(vaddr, end);
- if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
+ if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -381,7 +384,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d,
do {
next = pud_addr_end(vaddr, end);
- if (pud_none(*pud_k)) {
+ if (pud_none(pudp_get(pud_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@@ -401,7 +404,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
do {
next = p4d_addr_end(vaddr, end);
- if (p4d_none(*p4d_k)) {
+ if (p4d_none(p4dp_get(p4d_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@@ -420,7 +423,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
do {
next = pgd_addr_end(vaddr, end);
- if (pgd_none(*pgd_k)) {
+ if (pgd_none(pgdp_get(pgd_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@@ -451,7 +454,7 @@ static void __init create_tmp_mapping(void)
/* Copy the last p4d since it is shared with the kernel mapping. */
if (pgtable_l5_enabled) {
- ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
+ ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END)));
memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
@@ -462,7 +465,7 @@ static void __init create_tmp_mapping(void)
/* Copy the last pud since it is shared with the kernel mapping. */
if (pgtable_l4_enabled) {
- ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
+ ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END)));
memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index f61b2f8291e3..271d01a5ba4d 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -29,7 +29,7 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- p4d_t val = READ_ONCE(*p4d);
+ p4d_t val = p4dp_get(p4d);
if (p4d_leaf(val)) {
val = __p4d(set_pageattr_masks(p4d_val(val), walk));
@@ -42,7 +42,7 @@ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- pud_t val = READ_ONCE(*pud);
+ pud_t val = pudp_get(pud);
if (pud_leaf(val)) {
val = __pud(set_pageattr_masks(pud_val(val), walk));
@@ -55,7 +55,7 @@ static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- pmd_t val = READ_ONCE(*pmd);
+ pmd_t val = pmdp_get(pmd);
if (pmd_leaf(val)) {
val = __pmd(set_pageattr_masks(pmd_val(val), walk));
@@ -68,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- pte_t val = READ_ONCE(*pte);
+ pte_t val = ptep_get(pte);
val = __pte(set_pageattr_masks(pte_val(val), walk));
set_pte(pte, val);
@@ -108,10 +108,10 @@ static int __split_linear_mapping_pmd(pud_t *pudp,
vaddr <= (vaddr & PMD_MASK) && end >= next)
continue;
- if (pmd_leaf(*pmdp)) {
+ if (pmd_leaf(pmdp_get(pmdp))) {
struct page *pte_page;
- unsigned long pfn = _pmd_pfn(*pmdp);
- pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
+ unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
+ pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
pte_t *ptep_new;
int i;
@@ -148,10 +148,10 @@ static int __split_linear_mapping_pud(p4d_t *p4dp,
vaddr <= (vaddr & PUD_MASK) && end >= next)
continue;
- if (pud_leaf(*pudp)) {
+ if (pud_leaf(pudp_get(pudp))) {
struct page *pmd_page;
- unsigned long pfn = _pud_pfn(*pudp);
- pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
+ unsigned long pfn = _pud_pfn(pudp_get(pudp));
+ pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
pmd_t *pmdp_new;
int i;
@@ -197,10 +197,10 @@ static int __split_linear_mapping_p4d(pgd_t *pgdp,
vaddr <= (vaddr & P4D_MASK) && end >= next)
continue;
- if (p4d_leaf(*p4dp)) {
+ if (p4d_leaf(p4dp_get(p4dp))) {
struct page *pud_page;
- unsigned long pfn = _p4d_pfn(*p4dp);
- pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
+ unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
+ pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
pud_t *pudp_new;
int i;
@@ -427,29 +427,29 @@ bool kernel_page_present(struct page *page)
pte_t *pte;
pgd = pgd_offset_k(addr);
- if (!pgd_present(*pgd))
+ if (!pgd_present(pgdp_get(pgd)))
return false;
- if (pgd_leaf(*pgd))
+ if (pgd_leaf(pgdp_get(pgd)))
return true;
p4d = p4d_offset(pgd, addr);
- if (!p4d_present(*p4d))
+ if (!p4d_present(p4dp_get(p4d)))
return false;
- if (p4d_leaf(*p4d))
+ if (p4d_leaf(p4dp_get(p4d)))
return true;
pud = pud_offset(p4d, addr);
- if (!pud_present(*pud))
+ if (!pud_present(pudp_get(pud)))
return false;
- if (pud_leaf(*pud))
+ if (pud_leaf(pudp_get(pud)))
return true;
pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd))
+ if (!pmd_present(pmdp_get(pmd)))
return false;
- if (pmd_leaf(*pmd))
+ if (pmd_leaf(pmdp_get(pmd)))
return true;
pte = pte_offset_kernel(pmd, addr);
- return pte_present(*pte);
+ return pte_present(ptep_get(pte));
}
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index fef4e7328e49..ef887efcb679 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -5,6 +5,47 @@
#include <linux/kernel.h>
#include <linux/pgtable.h>
+int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(ptep_get(ptep), entry))
+ __set_pte_at(ptep, entry);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
+int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ if (!pte_young(ptep_get(ptep)))
+ return 0;
+ return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+}
+EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
+
+#ifdef CONFIG_64BIT
+pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ if (pgtable_l4_enabled)
+ return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
+
+ return (pud_t *)p4d;
+}
+
+p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+ if (pgtable_l5_enabled)
+ return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
+
+ return (p4d_t *)pgd;
+}
+#endif
+
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -25,7 +66,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
int pud_clear_huge(pud_t *pud)
{
- if (!pud_leaf(READ_ONCE(*pud)))
+ if (!pud_leaf(pudp_get(pud)))
return 0;
pud_clear(pud);
return 1;
@@ -33,7 +74,7 @@ int pud_clear_huge(pud_t *pud)
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
- pmd_t *pmd = pud_pgtable(*pud);
+ pmd_t *pmd = pud_pgtable(pudp_get(pud));
int i;
pud_clear(pud);
@@ -63,7 +104,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
int pmd_clear_huge(pmd_t *pmd)
{
- if (!pmd_leaf(READ_ONCE(*pmd)))
+ if (!pmd_leaf(pmdp_get(pmd)))
return 0;
pmd_clear(pmd);
return 1;
@@ -71,7 +112,7 @@ int pmd_clear_huge(pmd_t *pmd)
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
- pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
+ pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
pmd_clear(pmd);
@@ -88,7 +129,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- VM_BUG_ON(pmd_trans_huge(*pmdp));
+ VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
/*
* When leaf PTE entries (regular pages) are collapsed into a leaf
* PMD entry (huge page), a valid non-leaf PTE is converted into a
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 2ae201ebf90b..de5f9f623f5b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -71,6 +71,15 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
+ .data.rel.ro : {
+ *(.data.rel.ro .data.rel.ro.*)
+ }
+ .got : {
+ __got_start = .;
+ *(.got)
+ __got_end = .;
+ }
+
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
BOOT_DATA_PRESERVED
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 375200e9aba9..2ba4e0d4e26b 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -383,6 +383,7 @@ int setup_one_line(struct line *lines, int n, char *init,
parse_chan_pair(NULL, line, n, opts, error_out);
err = 0;
}
+ *error_out = "configured as 'none'";
} else {
char *new = kstrdup(init, GFP_KERNEL);
if (!new) {
@@ -406,6 +407,7 @@ int setup_one_line(struct line *lines, int n, char *init,
}
}
if (err) {
+ *error_out = "failed to parse channel pair";
line->init_str = NULL;
line->valid = 0;
kfree(new);
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index f3c75809fed2..006041fbb65f 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -362,7 +362,6 @@ static bool mmio_read(int size, unsigned long addr, unsigned long *val)
.r12 = size,
.r13 = EPT_READ,
.r14 = addr,
- .r15 = *val,
};
if (__tdx_hypercall_ret(&args))
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index bc4fcf0d9405..688550e336ce 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4465,6 +4465,25 @@ static u8 adl_get_hybrid_cpu_type(void)
return hybrid_big;
}
+static inline bool erratum_hsw11(struct perf_event *event)
+{
+ return (event->hw.config & INTEL_ARCH_EVENT_MASK) ==
+ X86_CONFIG(.event=0xc0, .umask=0x01);
+}
+
+/*
+ * The HSW11 requires a period larger than 100 which is the same as the BDM11.
+ * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
+ *
+ * The message 'interrupt took too long' can be observed on any counter which
+ * was armed with a period < 32 and two events expired in the same NMI.
+ * A minimum period of 32 is enforced for the rest of the events.
+ */
+static void hsw_limit_period(struct perf_event *event, s64 *left)
+{
+ *left = max(*left, erratum_hsw11(event) ? 128 : 32);
+}
+
/*
* Broadwell:
*
@@ -4482,8 +4501,7 @@ static u8 adl_get_hybrid_cpu_type(void)
*/
static void bdw_limit_period(struct perf_event *event, s64 *left)
{
- if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
- X86_CONFIG(.event=0xc0, .umask=0x01)) {
+ if (erratum_hsw11(event)) {
if (*left < 128)
*left = 128;
*left &= ~0x3fULL;
@@ -6392,6 +6410,7 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
+ x86_pmu.limit_period = hsw_limit_period;
x86_pmu.lbr_double_abort = true;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index eb810074f1e7..fd5fb43d920b 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -589,6 +589,13 @@ struct fpu_state_config {
* even without XSAVE support, i.e. legacy features FP + SSE
*/
u64 legacy_features;
+ /*
+ * @independent_features:
+ *
+ * Features that are supported by XSAVES, but not managed as part of
+ * the FPU core, such as LBR
+ */
+ u64 independent_features;
};
/* FPU state configuration information */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index cc6b8e087192..9dab85aba7af 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -17,6 +17,7 @@ extern unsigned long phys_base;
extern unsigned long page_offset_base;
extern unsigned long vmalloc_base;
extern unsigned long vmemmap_base;
+extern unsigned long physmem_end;
static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
{
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 38b54b992f32..35c416f06155 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -140,6 +140,10 @@ extern unsigned int ptrs_per_p4d;
# define VMEMMAP_START __VMEMMAP_BASE_L4
#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
+#ifdef CONFIG_RANDOMIZE_MEMORY
+# define PHYSMEM_END physmem_end
+#endif
+
/*
* End of the region for which vmalloc page tables are pre-allocated.
* For non-KMSAN builds, this is the same as VMALLOC_END.
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 06d5a7eeee81..f2e605ee75d9 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1812,12 +1812,9 @@ static __init void apic_set_fixmap(bool read_apic);
static __init void x2apic_disable(void)
{
- u32 x2apic_id, state = x2apic_state;
+ u32 x2apic_id;
- x2apic_mode = 0;
- x2apic_state = X2APIC_DISABLED;
-
- if (state != X2APIC_ON)
+ if (x2apic_state < X2APIC_ON)
return;
x2apic_id = read_apic_id();
@@ -1830,6 +1827,10 @@ static __init void x2apic_disable(void)
}
__x2apic_disable();
+
+ x2apic_mode = 0;
+ x2apic_state = X2APIC_DISABLED;
+
/*
* Don't reread the APIC ID as it was already done from
* check_x2apic() and the APIC driver still is a x2APIC variant,
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 571a43b3105d..255ff8f6c527 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -788,6 +788,9 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
goto out_disable;
}
+ fpu_kernel_cfg.independent_features = fpu_kernel_cfg.max_features &
+ XFEATURE_MASK_INDEPENDENT;
+
/*
* Clear XSAVE features that are disabled in the normal CPUID.
*/
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index 19ca623ffa2a..544224611e23 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -64,9 +64,9 @@ static inline u64 xfeatures_mask_supervisor(void)
static inline u64 xfeatures_mask_independent(void)
{
if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
- return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
+ return fpu_kernel_cfg.independent_features & ~XFEATURE_MASK_LBR;
- return XFEATURE_MASK_INDEPENDENT;
+ return fpu_kernel_cfg.independent_features;
}
/* XSAVE/XRSTOR wrapper functions */
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index e3c2acc1adc7..413f1f2aadd1 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2869,6 +2869,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_CSTAR:
msr_info->data = svm->vmcb01.ptr->save.cstar;
break;
+ case MSR_GS_BASE:
+ msr_info->data = svm->vmcb01.ptr->save.gs.base;
+ break;
+ case MSR_FS_BASE:
+ msr_info->data = svm->vmcb01.ptr->save.fs.base;
+ break;
case MSR_KERNEL_GS_BASE:
msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
break;
@@ -3090,6 +3096,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_CSTAR:
svm->vmcb01.ptr->save.cstar = data;
break;
+ case MSR_GS_BASE:
+ svm->vmcb01.ptr->save.gs.base = data;
+ break;
+ case MSR_FS_BASE:
+ svm->vmcb01.ptr->save.fs.base = data;
+ break;
case MSR_KERNEL_GS_BASE:
svm->vmcb01.ptr->save.kernel_gs_base = data;
break;
@@ -5166,6 +5178,9 @@ static __init void svm_set_cpu_caps(void)
/* CPUID 0x8000001F (SME/SEV features) */
sev_set_cpu_caps();
+
+ /* Don't advertise Bus Lock Detect to guest if SVM support is absent */
+ kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
}
static __init int svm_hardware_setup(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c7e7ab1593d5..50cc822e1290 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5829,7 +5829,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
break;
+ kvm_vcpu_srcu_read_lock(vcpu);
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
+ kvm_vcpu_srcu_read_unlock(vcpu);
break;
}
case KVM_GET_DEBUGREGS: {
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index e0411a3774d4..5eecb45d05d5 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -25,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
{
+ const void *orig_to = to;
+ const size_t orig_n = n;
+
if (unlikely(!n))
return;
@@ -39,7 +42,7 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si
}
rep_movs(to, (const void *)from, n);
/* KMSAN must treat values read from devices as initialized. */
- kmsan_unpoison_memory(to, n);
+ kmsan_unpoison_memory(orig_to, orig_n);
}
static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 19d209b412d7..aa69353da49f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -950,8 +950,12 @@ static void update_end_of_memory_vars(u64 start, u64 size)
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
struct mhp_params *params)
{
+ unsigned long end = ((start_pfn + nr_pages) << PAGE_SHIFT) - 1;
int ret;
+ if (WARN_ON_ONCE(end > PHYSMEM_END))
+ return -ERANGE;
+
ret = __add_pages(nid, start_pfn, nr_pages, params);
WARN_ON_ONCE(ret);
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 37db264866b6..230f1dee4f09 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -47,13 +47,24 @@ static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
*/
static __initdata struct kaslr_memory_region {
unsigned long *base;
+ unsigned long *end;
unsigned long size_tb;
} kaslr_regions[] = {
- { &page_offset_base, 0 },
- { &vmalloc_base, 0 },
- { &vmemmap_base, 0 },
+ {
+ .base = &page_offset_base,
+ .end = &physmem_end,
+ },
+ {
+ .base = &vmalloc_base,
+ },
+ {
+ .base = &vmemmap_base,
+ },
};
+/* The end of the possible address space for physical memory */
+unsigned long physmem_end __ro_after_init;
+
/* Get size in bytes used by the memory region */
static inline unsigned long get_padding(struct kaslr_memory_region *region)
{
@@ -82,6 +93,8 @@ void __init kernel_randomize_memory(void)
BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
+ /* Preset the end of the possible address space for physical memory */
+ physmem_end = ((1ULL << MAX_PHYSMEM_BITS) - 1);
if (!kaslr_memory_enabled())
return;
@@ -128,11 +141,18 @@ void __init kernel_randomize_memory(void)
vaddr += entropy;
*kaslr_regions[i].base = vaddr;
+ /* Calculate the end of the region */
+ vaddr += get_padding(&kaslr_regions[i]);
/*
- * Jump the region and add a minimum padding based on
- * randomization alignment.
+ * KASLR trims the maximum possible size of the
+ * direct-map. Update the physmem_end boundary.
+ * No rounding required as the region starts
+ * PUD aligned and size is in units of TB.
*/
- vaddr += get_padding(&kaslr_regions[i]);
+ if (kaslr_regions[i].end)
+ *kaslr_regions[i].end = __pa_nodebug(vaddr - 1);
+
+ /* Add a minimum padding based on randomization alignment. */
vaddr = round_up(vaddr + 1, PUD_SIZE);
remain_entropy -= entropy;
}
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 41d8c8f475a7..83a6bdf0b498 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
*
* Returns a pointer to a PTE on success, or NULL on failure.
*/
-static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
pmd_t *pmd;
@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
if (!pmd)
return NULL;
- /* We can't do anything sensible if we hit a large mapping. */
+ /* Large PMD mapping found */
if (pmd_large(*pmd)) {
- WARN_ON(1);
- return NULL;
+ /* Clear the PMD if we hit a large mapping from the first round */
+ if (late_text) {
+ set_pmd(pmd, __pmd(0));
+ } else {
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
}
if (pmd_none(*pmd)) {
@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void)
if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
return;
- target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
+ target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
if (WARN_ON(!target_pte))
return;
@@ -301,7 +306,7 @@ enum pti_clone_level {
static void
pti_clone_pgtable(unsigned long start, unsigned long end,
- enum pti_clone_level level)
+ enum pti_clone_level level, bool late_text)
{
unsigned long addr;
@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
return;
/* Allocate PTE in the user page-table */
- target_pte = pti_user_pagetable_walk_pte(addr);
+ target_pte = pti_user_pagetable_walk_pte(addr, late_text);
if (WARN_ON(!target_pte))
return;
@@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void)
phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
pte_t *target_pte;
- target_pte = pti_user_pagetable_walk_pte(va);
+ target_pte = pti_user_pagetable_walk_pte(va, false);
if (WARN_ON(!target_pte))
return;
@@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void)
start = CPU_ENTRY_AREA_BASE;
end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
- pti_clone_pgtable(start, end, PTI_CLONE_PMD);
+ pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
}
#endif /* CONFIG_X86_64 */
@@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void)
/*
* Clone the populated PMDs of the entry text and force it RO.
*/
-static void pti_clone_entry_text(void)
+static void pti_clone_entry_text(bool late)
{
pti_clone_pgtable((unsigned long) __entry_text_start,
(unsigned long) __entry_text_end,
- PTI_LEVEL_KERNEL_IMAGE);
+ PTI_LEVEL_KERNEL_IMAGE, late);
}
/*
@@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void)
* pti_set_kernel_image_nonglobal() did to clear the
* global bit.
*/
- pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
+ pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
/*
* pti_clone_pgtable() will set the global bit in any PMDs
@@ -638,8 +643,15 @@ void __init pti_init(void)
/* Undo all global bits from the init pagetables in head_64.S: */
pti_set_kernel_image_nonglobal();
+
/* Replace some of the global bits just for shared entry text: */
- pti_clone_entry_text();
+ /*
+ * This is very early in boot. Device and Late initcalls can do
+ * modprobe before free_initmem() and mark_readonly(). This
+ * pti_clone_entry_text() allows those user-mode-helpers to function,
+ * but notably the text is still RW.
+ */
+ pti_clone_entry_text(false);
pti_setup_espfix64();
pti_setup_vsyscall();
}
@@ -656,10 +668,11 @@ void pti_finalize(void)
if (!boot_cpu_has(X86_FEATURE_PTI))
return;
/*
- * We need to clone everything (again) that maps parts of the
- * kernel image.
+ * This is after free_initmem() (all initcalls are done) and we've done
+ * mark_readonly(). Text is now NX which might've split some PMDs
+ * relative to the early clone.
*/
- pti_clone_entry_text();
+ pti_clone_entry_text(true);
pti_clone_kernel_text();
debug_checkwx_user();
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
index 908710524dc9..493e556cd31b 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
@@ -479,6 +479,7 @@ static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = {
mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS,
mmDCORE0_EDMA0_CORE_CTX_IDX,
mmDCORE0_EDMA0_CORE_CTX_IDX_INC,
+ mmDCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND,
mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG,
mmDCORE0_EDMA0_QM_CQ_CFG0_0,
mmDCORE0_EDMA0_QM_CQ_CFG0_1,
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 0f5218e361df..7053f1b9fc1d 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -415,7 +415,7 @@ static int acpi_processor_add(struct acpi_device *device,
result = acpi_processor_get_info(device);
if (result) /* Processor is not physically present or unavailable */
- return 0;
+ goto err_clear_driver_data;
BUG_ON(pr->id >= nr_cpu_ids);
@@ -430,7 +430,7 @@ static int acpi_processor_add(struct acpi_device *device,
"BIOS reported wrong ACPI id %d for the processor\n",
pr->id);
/* Give up, but do not abort the namespace scan. */
- goto err;
+ goto err_clear_driver_data;
}
/*
* processor_device_array is not cleared on errors to allow buggy BIOS
@@ -442,12 +442,12 @@ static int acpi_processor_add(struct acpi_device *device,
dev = get_cpu_device(pr->id);
if (!dev) {
result = -ENODEV;
- goto err;
+ goto err_clear_per_cpu;
}
result = acpi_bind_one(dev, device);
if (result)
- goto err;
+ goto err_clear_per_cpu;
pr->dev = dev;
@@ -458,10 +458,11 @@ static int acpi_processor_add(struct acpi_device *device,
dev_err(dev, "Processor driver could not be attached\n");
acpi_unbind_one(dev);
- err:
- free_cpumask_var(pr->throttling.shared_cpu_map);
- device->driver_data = NULL;
+ err_clear_per_cpu:
per_cpu(processors, pr->id) = NULL;
+ err_clear_driver_data:
+ device->driver_data = NULL;
+ free_cpumask_var(pr->throttling.shared_cpu_map);
err_free_pr:
kfree(pr);
return result;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index d3b9da75a815..d6934ba7a315 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1196,6 +1196,19 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
}
+/**
+ * cppc_get_highest_perf - Get the highest performance register value.
+ * @cpunum: CPU from which to get highest performance.
+ * @highest_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
+{
+ return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
+}
+EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
+
/**
* cppc_get_epp_perf - Get the epp register value.
* @cpunum: CPU from which to get epp preference value.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index ccaceedbc2c0..94f10c6eb336 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3342,6 +3342,7 @@ static void binder_transaction(struct binder_proc *proc,
*/
copy_size = object_offset - user_offset;
if (copy_size && (user_offset > object_offset ||
+ object_offset > tr->data_size ||
binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
t->buffer, user_offset,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 373d23af1d9a..4ed90d46a017 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5593,8 +5593,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
}
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
- if (!dr)
+ if (!dr) {
+ kfree(host);
goto err_out;
+ }
devres_add(dev, dr);
dev_set_drvdata(dev, host);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 277bf0e8ed09..c91f8746289f 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -242,10 +242,17 @@ void ata_scsi_set_sense_information(struct ata_device *dev,
*/
static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc)
{
+ struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+ "missing result TF: can't set ATA PT sense fields\n");
+ return;
+ }
+
if ((sb[0] & 0x7f) >= 0x72) {
unsigned char *desc;
u8 len;
@@ -924,12 +931,16 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
*/
static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
{
+ struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
- unsigned char *sb = cmd->sense_buffer;
u8 sense_key, asc, ascq;
- memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+ "missing result TF: can't generate ATA PT sense data\n");
+ return;
+ }
/*
* Use ata_to_sense_error() to map status register bits
@@ -976,14 +987,19 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
u64 block;
u8 sense_key, asc, ascq;
- memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
-
if (ata_dev_disabled(dev)) {
/* Device disabled after error recovery */
/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
return;
}
+
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(dev,
+ "missing result TF: can't generate sense data\n");
+ return;
+ }
+
/* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
*/
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 17f6ccee53c7..ffbb2e8591ce 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -541,7 +541,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
while (sg_len) {
/* table overflow should never happen */
- BUG_ON (pi++ >= MAX_DCMDS);
+ if (WARN_ON_ONCE(pi >= MAX_DCMDS))
+ return AC_ERR_SYSTEM;
len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
@@ -553,11 +554,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
addr += len;
sg_len -= len;
++table;
+ ++pi;
}
}
/* Should never happen according to Tejun */
- BUG_ON(!pi);
+ if (WARN_ON_ONCE(!pi))
+ return AC_ERR_SYSTEM;
/* Convert the last command to an input/output */
table--;
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 8d709dbd4e0c..e9b0d94aeabd 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -567,6 +567,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
grp->id = grp;
if (id)
grp->id = id;
+ grp->color = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &grp->node[0]);
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 55999a50ccc0..0b6c2277128b 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -110,7 +110,8 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, min, max);
unsigned long *entry, *lower, *upper;
- unsigned long lower_index, lower_last;
+ /* initialized to work around false-positive -Wuninitialized warning */
+ unsigned long lower_index = 0, lower_last = 0;
unsigned long upper_index, upper_last;
int ret = 0;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index f4e0573c4711..bf7f68e90953 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -2603,6 +2603,8 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
mutex_lock(&ub->mutex);
if (!ublk_can_use_recovery(ub))
goto out_unlock;
+ if (!ub->nr_queues_ready)
+ goto out_unlock;
/*
* START_RECOVERY is only allowd after:
*
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 814dd966b1a4..5ee9a8b8dcfd 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1326,8 +1326,10 @@ static int btnxpuart_close(struct hci_dev *hdev)
serdev_device_close(nxpdev->serdev);
skb_queue_purge(&nxpdev->txq);
- kfree_skb(nxpdev->rx_skb);
- nxpdev->rx_skb = NULL;
+ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
+ }
clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
return 0;
}
@@ -1342,8 +1344,10 @@ static int btnxpuart_flush(struct hci_dev *hdev)
cancel_work_sync(&nxpdev->tx_work);
- kfree_skb(nxpdev->rx_skb);
- nxpdev->rx_skb = NULL;
+ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
+ }
return 0;
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 9082456d80fb..7a552387129e 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1090,6 +1090,7 @@ static void qca_controller_memdump(struct work_struct *work)
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
cancel_delayed_work(&qca->ctrl_memdump_timeout);
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ clear_bit(QCA_IBS_DISABLED, &qca->flags);
mutex_unlock(&qca->hci_memdump_lock);
return;
}
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 85aa089650ea..1701cce74df7 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -40,7 +40,7 @@
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
# define PLL_POST_DIV_SHIFT 8
-# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
+# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
# define PLL_ALPHA_EN BIT(24)
# define PLL_ALPHA_MODE BIT(25)
# define PLL_VCO_SHIFT 20
@@ -1478,8 +1478,8 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
}
return regmap_update_bits(regmap, PLL_USER_CTL(pll),
- PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
- val << PLL_POST_DIV_SHIFT);
+ PLL_POST_DIV_MASK(pll) << pll->post_div_shift,
+ val << pll->post_div_shift);
}
const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index e6d84c8c7989..84c497f361bc 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -176,6 +176,7 @@ extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
extern const struct clk_ops clk_rcg2_shared_ops;
+extern const struct clk_ops clk_rcg2_shared_no_init_park_ops;
extern const struct clk_ops clk_dp_ops;
struct clk_rcg_dfs_data {
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b9f2a29be927..461f54fe5e4f 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1182,6 +1182,36 @@ const struct clk_ops clk_rcg2_shared_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ /*
+ * Read the config register so that the parent is properly mapped at
+ * registration time.
+ */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
+
+ return 0;
+}
+
+/*
+ * Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left
+ * unchanged at registration time.
+ */
+const struct clk_ops clk_rcg2_shared_no_init_park_ops = {
+ .init = clk_rcg2_shared_no_init_park,
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_shared_get_parent,
+ .set_parent = clk_rcg2_shared_set_parent,
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops);
+
/* Common APIs to be used for DFS based RCGR */
static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
struct freq_tbl *f)
diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
index f8b9a1e93bef..cdbbf2cc9c5d 100644
--- a/drivers/clk/qcom/gcc-ipq9574.c
+++ b/drivers/clk/qcom/gcc-ipq9574.c
@@ -65,7 +65,7 @@ static const struct clk_parent_data gcc_sleep_clk_data[] = {
static struct clk_alpha_pll gpll0_main = {
.offset = 0x20000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x0b000,
.enable_mask = BIT(0),
@@ -93,7 +93,7 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
static struct clk_alpha_pll_postdiv gpll0 = {
.offset = 0x20000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.width = 4,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll0",
@@ -107,7 +107,7 @@ static struct clk_alpha_pll_postdiv gpll0 = {
static struct clk_alpha_pll gpll4_main = {
.offset = 0x22000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x0b000,
.enable_mask = BIT(2),
@@ -122,7 +122,7 @@ static struct clk_alpha_pll gpll4_main = {
static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x22000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.width = 4,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll4",
@@ -136,7 +136,7 @@ static struct clk_alpha_pll_postdiv gpll4 = {
static struct clk_alpha_pll gpll2_main = {
.offset = 0x21000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x0b000,
.enable_mask = BIT(1),
@@ -151,7 +151,7 @@ static struct clk_alpha_pll gpll2_main = {
static struct clk_alpha_pll_postdiv gpll2 = {
.offset = 0x21000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.width = 4,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll2",
diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
index b883dffe5f7a..eb3765c57b65 100644
--- a/drivers/clk/qcom/gcc-sm8550.c
+++ b/drivers/clk/qcom/gcc-sm8550.c
@@ -536,7 +536,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -551,7 +551,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -566,7 +566,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -581,7 +581,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -596,7 +596,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -611,7 +611,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -626,7 +626,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -641,7 +641,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -656,7 +656,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -700,7 +700,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -717,7 +717,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -750,7 +750,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -767,7 +767,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -784,7 +784,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -801,7 +801,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -818,7 +818,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -835,7 +835,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@@ -852,7 +852,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@@ -869,7 +869,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@@ -886,7 +886,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@@ -903,7 +903,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@@ -920,7 +920,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@@ -937,7 +937,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@@ -975,7 +975,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
.parent_data = gcc_parent_data_8,
.num_parents = ARRAY_SIZE(gcc_parent_data_8),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
@@ -992,7 +992,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
@@ -1159,7 +1159,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
},
};
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
index 3884eff9fe93..58ef7c6d69cc 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
@@ -385,6 +385,32 @@ int jh7110_reset_controller_register(struct jh71x0_clk_priv *priv,
}
EXPORT_SYMBOL_GPL(jh7110_reset_controller_register);
+/*
+ * This clock notifier is called when the rate of PLL0 clock is to be changed.
+ * The cpu_root clock should save the curent parent clock and switch its parent
+ * clock to osc before PLL0 rate will be changed. Then switch its parent clock
+ * back after the PLL0 rate is completed.
+ */
+static int jh7110_pll0_clk_notifier_cb(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct jh71x0_clk_priv *priv = container_of(nb, struct jh71x0_clk_priv, pll_clk_nb);
+ struct clk *cpu_root = priv->reg[JH7110_SYSCLK_CPU_ROOT].hw.clk;
+ int ret = 0;
+
+ if (action == PRE_RATE_CHANGE) {
+ struct clk *osc = clk_get(priv->dev, "osc");
+
+ priv->original_clk = clk_get_parent(cpu_root);
+ ret = clk_set_parent(cpu_root, osc);
+ clk_put(osc);
+ } else if (action == POST_RATE_CHANGE) {
+ ret = clk_set_parent(cpu_root, priv->original_clk);
+ }
+
+ return notifier_from_errno(ret);
+}
+
static int __init jh7110_syscrg_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -413,7 +439,10 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
if (IS_ERR(priv->pll[0]))
return PTR_ERR(priv->pll[0]);
} else {
- clk_put(pllclk);
+ priv->pll_clk_nb.notifier_call = jh7110_pll0_clk_notifier_cb;
+ ret = clk_notifier_register(pllclk, &priv->pll_clk_nb);
+ if (ret)
+ return ret;
priv->pll[0] = NULL;
}
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h
index 34bb11c72eb7..ebc55b9ef837 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.h
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.h
@@ -114,6 +114,8 @@ struct jh71x0_clk_priv {
spinlock_t rmw_lock;
struct device *dev;
void __iomem *base;
+ struct clk *original_clk;
+ struct notifier_block pll_clk_nb;
struct clk_hw *pll[3];
struct jh71x0_clk reg[];
};
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index bd64a8a8427f..92c025b70eb6 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -83,20 +83,28 @@ static u64 notrace tpm_read_sched_clock(void)
static int tpm_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- unsigned long next, now;
+ unsigned long next, prev, now;
- next = tpm_read_counter();
- next += delta;
+ prev = tpm_read_counter();
+ next = prev + delta;
writel(next, timer_base + TPM_C0V);
now = tpm_read_counter();
+ /*
+ * Need to wait CNT increase at least 1 cycle to make sure
+ * the C0V has been updated into HW.
+ */
+ if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
+ while (now == tpm_read_counter())
+ ;
+
/*
* NOTE: We observed in a very small probability, the bus fabric
* contention between GPU and A7 may results a few cycles delay
* of writing CNT registers which may cause the min_delta event got
* missed, so we need add a ETIME check here in case it happened.
*/
- return (int)(next - now) <= 0 ? -ETIME : 0;
+ return (now - prev) >= delta ? -ETIME : 0;
}
static int tpm_set_state_oneshot(struct clock_event_device *evt)
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index c3f54d9912be..420202bf76e4 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -25,10 +25,7 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
struct clock_event_device *clkevt = &to->clkevt;
- if (of_irq->percpu)
- free_percpu_irq(of_irq->irq, clkevt);
- else
- free_irq(of_irq->irq, clkevt);
+ free_irq(of_irq->irq, clkevt);
}
/**
@@ -42,9 +39,6 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
* - Get interrupt number by name
* - Get interrupt number by index
*
- * When the interrupt is per CPU, 'request_percpu_irq()' is called,
- * otherwise 'request_irq()' is used.
- *
* Returns 0 on success, < 0 otherwise
*/
static __init int timer_of_irq_init(struct device_node *np,
@@ -69,12 +63,9 @@ static __init int timer_of_irq_init(struct device_node *np,
return -EINVAL;
}
- ret = of_irq->percpu ?
- request_percpu_irq(of_irq->irq, of_irq->handler,
- np->full_name, clkevt) :
- request_irq(of_irq->irq, of_irq->handler,
- of_irq->flags ? of_irq->flags : IRQF_TIMER,
- np->full_name, clkevt);
+ ret = request_irq(of_irq->irq, of_irq->handler,
+ of_irq->flags ? of_irq->flags : IRQF_TIMER,
+ np->full_name, clkevt);
if (ret) {
pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
return ret;
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index a5478f3e8589..01a2c6b7db06 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -11,7 +11,6 @@
struct of_timer_irq {
int irq;
int index;
- int percpu;
const char *name;
unsigned long flags;
irq_handler_t handler;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 23c74e9f04c4..f461f99eb040 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/static_call.h>
#include <linux/amd-pstate.h>
+#include <linux/topology.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
@@ -49,6 +50,8 @@
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
+#define CPPC_HIGHEST_PERF_PERFORMANCE 196
+#define CPPC_HIGHEST_PERF_DEFAULT 166
/*
* TODO: We need more time to fine tune processors with shared memory solution
@@ -64,6 +67,7 @@ static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
static bool cppc_enabled;
+static bool amd_pstate_prefcore = true;
/*
* AMD Energy Preference Performance (EPP)
@@ -310,6 +314,21 @@ static inline int amd_pstate_enable(bool enable)
return static_call(amd_pstate_enable)(enable);
}
+static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ /*
+ * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
+ * the highest performance level is set to 196.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218759
+ */
+ if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
+ return CPPC_HIGHEST_PERF_PERFORMANCE;
+
+ return CPPC_HIGHEST_PERF_DEFAULT;
+}
+
static int pstate_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
@@ -320,13 +339,14 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- /*
- * TODO: Introduce AMD specific power feature.
- *
- * CPPC entry doesn't indicate the highest performance in some ASICs.
+ /* For platforms that do not support the preferred core feature, the
+ * highest_pef may be configured with 166 or 255, to avoid max frequency
+ * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
+ * the default max perf.
*/
- highest_perf = amd_get_highest_perf();
- if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
+ if (cpudata->hw_prefcore)
+ highest_perf = amd_pstate_highest_perf_set(cpudata);
+ else
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
WRITE_ONCE(cpudata->highest_perf, highest_perf);
@@ -347,8 +367,9 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- highest_perf = amd_get_highest_perf();
- if (highest_perf > cppc_perf.highest_perf)
+ if (cpudata->hw_prefcore)
+ highest_perf = amd_pstate_highest_perf_set(cpudata);
+ else
highest_perf = cppc_perf.highest_perf;
WRITE_ONCE(cpudata->highest_perf, highest_perf);
@@ -709,6 +730,80 @@ static void amd_perf_ctl_reset(unsigned int cpu)
wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
}
+/*
+ * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
+ * due to locking, so queue the work for later.
+ */
+static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
+{
+ sched_set_itmt_support();
+}
+static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
+
+/*
+ * Get the highest performance register value.
+ * @cpu: CPU from which to get highest performance.
+ * @highest_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
+{
+ int ret;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 cap1;
+
+ ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ if (ret)
+ return ret;
+ WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
+ } else {
+ u64 cppc_highest_perf;
+
+ ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
+ if (ret)
+ return ret;
+ WRITE_ONCE(*highest_perf, cppc_highest_perf);
+ }
+
+ return (ret);
+}
+
+#define CPPC_MAX_PERF U8_MAX
+
+static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
+{
+ int ret, prio;
+ u32 highest_perf;
+
+ ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
+ if (ret)
+ return;
+
+ cpudata->hw_prefcore = true;
+ /* check if CPPC preferred core feature is enabled*/
+ if (highest_perf < CPPC_MAX_PERF)
+ prio = (int)highest_perf;
+ else {
+ pr_debug("AMD CPPC preferred core is unsupported!\n");
+ cpudata->hw_prefcore = false;
+ return;
+ }
+
+ if (!amd_pstate_prefcore)
+ return;
+
+ /*
+ * The priorities can be set regardless of whether or not
+ * sched_set_itmt_support(true) has been called and it is valid to
+ * update them at any time after it has been called.
+ */
+ sched_set_itmt_core_prio(prio, cpudata->cpu);
+
+ schedule_work(&sched_prefcore_work);
+}
+
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
@@ -730,6 +825,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
+ amd_pstate_init_prefcore(cpudata);
+
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
@@ -880,6 +977,17 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
return sysfs_emit(buf, "%u\n", perf);
}
+static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
+ char *buf)
+{
+ bool hw_prefcore;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
+
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
+}
+
static ssize_t show_energy_performance_available_preferences(
struct cpufreq_policy *policy, char *buf)
{
@@ -1077,18 +1185,27 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
return ret < 0 ? ret : count;
}
+static ssize_t prefcore_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
+}
+
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
cpufreq_freq_attr_rw(energy_performance_preference);
cpufreq_freq_attr_ro(energy_performance_available_preferences);
static DEVICE_ATTR_RW(status);
+static DEVICE_ATTR_RO(prefcore);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
+ &amd_pstate_hw_prefcore,
NULL,
};
@@ -1096,6 +1213,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
+ &amd_pstate_hw_prefcore,
&energy_performance_preference,
&energy_performance_available_preferences,
NULL,
@@ -1103,6 +1221,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
static struct attribute *pstate_global_attributes[] = {
&dev_attr_status.attr,
+ &dev_attr_prefcore.attr,
NULL
};
@@ -1154,6 +1273,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
cpudata->epp_policy = 0;
+ amd_pstate_init_prefcore(cpudata);
+
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
@@ -1577,7 +1698,17 @@ static int __init amd_pstate_param(char *str)
return amd_pstate_set_driver(mode_idx);
}
+
+static int __init amd_prefcore_param(char *str)
+{
+ if (!strcmp(str, "disable"))
+ amd_pstate_prefcore = false;
+
+ return 0;
+}
+
early_param("amd_pstate", amd_pstate_param);
+early_param("amd_prefcore", amd_prefcore_param);
MODULE_AUTHOR("Huang Rui <ray.huang@....com>");
MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
index 70ef11963938..43af81fcab86 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
@@ -100,7 +100,9 @@ static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
- errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
+ /* Update only section of errmsk3 related to VF2PF */
+ errmsk3 &= ~ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+ errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
/* Return the sources of the (new) interrupt(s) */
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 09551f949126..0e40897cc983 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -191,8 +191,12 @@ static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
- errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
- errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
+ /* Update only section of errmsk3 and errmsk5 related to VF2PF */
+ errmsk3 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+ errmsk5 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+
+ errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
+ errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
index fe011d50473d..607f70292b21 100644
--- a/drivers/crypto/starfive/jh7110-cryp.h
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -30,6 +30,7 @@
#define MAX_KEY_SIZE SHA512_BLOCK_SIZE
#define STARFIVE_AES_IV_LEN AES_BLOCK_SIZE
#define STARFIVE_AES_CTR_LEN AES_BLOCK_SIZE
+#define STARFIVE_RSA_MAX_KEYSZ 256
union starfive_aes_csr {
u32 v;
@@ -212,12 +213,11 @@ struct starfive_cryp_request_ctx {
struct scatterlist *out_sg;
struct ahash_request ahash_fbk_req;
size_t total;
- size_t nents;
unsigned int blksize;
unsigned int digsize;
unsigned long in_sg_len;
unsigned char *adata;
- u8 rsa_data[] __aligned(sizeof(u32));
+ u8 rsa_data[STARFIVE_RSA_MAX_KEYSZ] __aligned(sizeof(u32));
};
struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx);
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
index f31bbd825f88..1db9a3d02848 100644
--- a/drivers/crypto/starfive/jh7110-rsa.c
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -37,7 +37,6 @@
// A * A * R mod N ==> A
#define CRYPTO_CMD_AARN 0x7
-#define STARFIVE_RSA_MAX_KEYSZ 256
#define STARFIVE_RSA_RESET 0x2
static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
@@ -91,7 +90,7 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
{
struct starfive_cryp_dev *cryp = ctx->cryp;
struct starfive_cryp_request_ctx *rctx = ctx->rctx;
- int count = rctx->total / sizeof(u32) - 1;
+ int count = (ALIGN(rctx->total, 4) / 4) - 1;
int loop;
u32 temp;
u8 opsize;
@@ -274,12 +273,17 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
struct starfive_cryp_dev *cryp = ctx->cryp;
struct starfive_cryp_request_ctx *rctx = ctx->rctx;
struct starfive_rsa_key *key = &ctx->rsa_key;
- int ret = 0;
+ int ret = 0, shift = 0;
writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents,
- rctx->rsa_data, rctx->total);
+ if (!IS_ALIGNED(rctx->total, sizeof(u32))) {
+ shift = sizeof(u32) - (rctx->total & 0x3);
+ memset(rctx->rsa_data, 0, shift);
+ }
+
+ rctx->total = sg_copy_to_buffer(rctx->in_sg, sg_nents(rctx->in_sg),
+ rctx->rsa_data + shift, rctx->total);
if (enc) {
key->bitlen = key->e_bitlen;
@@ -329,7 +333,6 @@ static int starfive_rsa_enc(struct akcipher_request *req)
rctx->in_sg = req->src;
rctx->out_sg = req->dst;
rctx->total = req->src_len;
- rctx->nents = sg_nents(rctx->in_sg);
ctx->rctx = rctx;
return starfive_rsa_enc_core(ctx, 1);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index bc5a43897d57..5060d9802795 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -1528,10 +1528,13 @@ static int cxl_region_attach_position(struct cxl_region *cxlr,
const struct cxl_dport *dport, int pos)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
+ struct cxl_decoder *cxld = &cxlsd->cxld;
+ int iw = cxld->interleave_ways;
struct cxl_port *iter;
int rc;
- if (cxlrd->calc_hb(cxlrd, pos) != dport) {
+ if (dport != cxlrd->cxlsd.target[pos % iw]) {
dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
dev_name(&cxlrd->cxlsd.cxld.dev));
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index a1da7581adb0..e62ffffe5fb8 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -796,6 +796,9 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
lockdep_assert_held(&ctl->dsp->pwr_lock);
+ if (ctl->flags && !(ctl->flags & WMFW_CTL_FLAG_WRITEABLE))
+ return -EPERM;
+
if (len + off * sizeof(u32) > ctl->len)
return -EINVAL;
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index b35b9604413f..caeb3bdc78f8 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -713,6 +713,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
return -ENODEV;
pctldev = of_pinctrl_get(pctlnp);
+ of_node_put(pctlnp);
if (!pctldev)
return -EPROBE_DEFER;
diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c
index a0d69387c153..2f3c9ebfa78d 100644
--- a/drivers/gpio/gpio-zynqmp-modepin.c
+++ b/drivers/gpio/gpio-zynqmp-modepin.c
@@ -146,6 +146,7 @@ static const struct of_device_id modepin_platform_id[] = {
{ .compatible = "xlnx,zynqmp-gpio-modepin", },
{ }
};
+MODULE_DEVICE_TABLE(of, modepin_platform_id);
static struct platform_driver modepin_platform_driver = {
.driver = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 61668a784315..e361dc37a089 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1096,6 +1096,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
unsigned int i;
int r;
+ /*
+ * We can't use gang submit on with reserved VMIDs when the VM changes
+ * can't be invalidated by more than one engine at the same time.
+ */
+ if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
+ for (i = 0; i < p->gang_size; ++i) {
+ struct drm_sched_entity *entity = p->entities[i];
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched);
+
+ if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
+ return -EINVAL;
+ }
+ }
+
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 578aeba49ea8..5fbb9caa7415 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -909,8 +909,7 @@ static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
{
u64 micro_tile_mode;
- /* Zero swizzle mode means linear */
- if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
+ if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */
return 0;
micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
@@ -1034,6 +1033,30 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
block_width = 256 / format_info->cpp[i];
block_height = 1;
block_size_log2 = 8;
+ } else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) {
+ int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
+
+ switch (swizzle) {
+ case AMD_FMT_MOD_TILE_GFX12_256B_2D:
+ block_size_log2 = 8;
+ break;
+ case AMD_FMT_MOD_TILE_GFX12_4K_2D:
+ block_size_log2 = 12;
+ break;
+ case AMD_FMT_MOD_TILE_GFX12_64K_2D:
+ block_size_log2 = 16;
+ break;
+ case AMD_FMT_MOD_TILE_GFX12_256K_2D:
+ block_size_log2 = 18;
+ break;
+ default:
+ drm_dbg_kms(rfb->base.dev,
+ "Gfx12 swizzle mode with unknown block size: %d\n", swizzle);
+ return -EINVAL;
+ }
+
+ get_block_dimensions(block_size_log2, format_info->cpp[i],
+ &block_width, &block_height);
} else {
int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
@@ -1069,7 +1092,8 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
return ret;
}
- if (AMD_FMT_MOD_GET(DCC, modifier)) {
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 &&
+ AMD_FMT_MOD_GET(DCC, modifier)) {
if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
block_size_log2 = get_dcc_block_size(modifier, false, false);
get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index ff1ea99292fb..69dfc699d78b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -409,7 +409,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle)
goto error;
- if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
+ if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
@@ -459,6 +459,19 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
return r;
}
+/*
+ * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
+ * @vm: the VM to check
+ * @vmhub: the VMHUB which will be used
+ *
+ * Returns: True if the VM will use a reserved VMID.
+ */
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
+{
+ return vm->reserved_vmid[vmhub] ||
+ (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
+}
+
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
unsigned vmhub)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index fa8c42c83d5d..240fa6751260 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
unsigned vmhub);
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index d9dc675b46ae..22575422ca7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -137,8 +137,10 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
if (virt->ops && virt->ops->req_full_gpu) {
r = virt->ops->req_full_gpu(adev, init);
- if (r)
+ if (r) {
+ adev->no_hw_access = true;
return r;
+ }
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index c81e98f0d17f..c813cd7b015e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -4269,11 +4269,11 @@ static int gfx_v11_0_hw_init(void *handle)
/* RLC autoload sequence 1: Program rlc ram */
if (adev->gfx.imu.funcs->program_rlc_ram)
adev->gfx.imu.funcs->program_rlc_ram(adev);
+ /* rlc autoload firmware */
+ r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
+ if (r)
+ return r;
}
- /* rlc autoload firmware */
- r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
- if (r)
- return r;
} else {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index f432dc72df6a..725b1a585088 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -135,6 +135,34 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+
+ if (enable) {
+ /* Unset the CLEAR_OVERFLOW bit to make sure the next step
+ * is switching the bit from 0 to 1
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Clear RB_OVERFLOW bit */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ }
+
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 44c155683824..f0ebf686b06f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -6937,7 +6937,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
}
}
- if (j == dc_state->stream_count)
+ if (j == dc_state->stream_count || pbn_div == 0)
continue;
slot_num = DIV_ROUND_UP(pbn, pbn_div);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 2c366866f570..33bb96f770b8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -629,14 +629,14 @@ static bool construct_phy(struct dc_link *link,
link->link_enc =
link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
- DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
- DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
-
if (!link->link_enc) {
DC_ERROR("Failed to create link encoder!\n");
goto link_enc_create_fail;
}
+ DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+ DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
+
/* Update link encoder tracking variables. These are used for the dynamic
* assignment of link encoders to streams.
*/
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 1ddb4f5eac8e..93c0455766dd 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -433,17 +433,20 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
}
if (status == MOD_HDCP_STATUS_SUCCESS)
- mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
&input->bstatus_read, &status,
- hdcp, "bstatus_read");
+ hdcp, "bstatus_read"))
+ goto out;
if (status == MOD_HDCP_STATUS_SUCCESS)
- mod_hdcp_execute_and_set(check_link_integrity_dp,
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
&input->link_integrity_check, &status,
- hdcp, "link_integrity_check");
+ hdcp, "link_integrity_check"))
+ goto out;
if (status == MOD_HDCP_STATUS_SUCCESS)
- mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
&input->reauth_request_check, &status,
- hdcp, "reauth_request_check");
+ hdcp, "reauth_request_check"))
+ goto out;
out:
return status;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index c9a9c758531b..c0d7fe5102e6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1883,7 +1883,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
smu_dpm_ctx->dpm_level = level;
}
- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
@@ -1962,7 +1963,8 @@ static int smu_switch_power_profile(void *handle,
workload[0] = smu->workload_setting[index];
}
- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0);
return 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
index 0d3b22a74365..e251e061d1ad 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -304,7 +304,7 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
- if (!intel_uc_fw_is_loadable(&gsc->fw))
+ if (!intel_uc_fw_is_loadable(&gsc->fw) || intel_uc_fw_is_in_error(&gsc->fw))
return;
if (intel_gsc_uc_fw_init_done(gsc))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 9a431726c8d5..ac7b3aad2222 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -258,6 +258,11 @@ static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
}
+static inline bool intel_uc_fw_is_in_error(struct intel_uc_fw *uc_fw)
+{
+ return intel_uc_fw_status_to_error(__intel_uc_fw_status(uc_fw)) != 0;
+}
+
static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
{
return uc_fw->user_overridden;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 8a9aad523eec..1d4cc91c0e40 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -51,7 +51,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
debug_object_init(fence, &i915_sw_fence_debug_descr);
}
-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
{
debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
}
@@ -77,7 +77,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
debug_object_destroy(fence, &i915_sw_fence_debug_descr);
}
-static inline void debug_fence_free(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
{
debug_object_free(fence, &i915_sw_fence_debug_descr);
smp_wmb(); /* flush the change in state before reallocation */
@@ -94,7 +94,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
{
}
-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
{
}
@@ -115,7 +115,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
{
}
-static inline void debug_fence_free(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
{
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
index 705b52337068..81f3024b7b1b 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
@@ -171,11 +171,13 @@ int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data)
void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
{
int i;
+ struct amdtp_hid_data *hid_data;
for (i = 0; i < cli_data->num_hid_devices; ++i) {
if (cli_data->hid_sensor_hubs[i]) {
- kfree(cli_data->hid_sensor_hubs[i]->driver_data);
+ hid_data = cli_data->hid_sensor_hubs[i]->driver_data;
hid_destroy_device(cli_data->hid_sensor_hubs[i]);
+ kfree(hid_data);
cli_data->hid_sensor_hubs[i] = NULL;
}
}
diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
index cb8bd8aae15b..0fa785f52707 100644
--- a/drivers/hid/hid-cougar.c
+++ b/drivers/hid/hid-cougar.c
@@ -106,7 +106,7 @@ static void cougar_fix_g6_mapping(void)
static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
+ if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
(rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
hid_info(hdev,
"usage count exceeds max: fixing up report descriptor\n");
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index edbb38f6956b..756aebf32473 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1962,6 +1962,7 @@ void vmbus_device_unregister(struct hv_device *device_obj)
*/
device_unregister(&device_obj->device);
}
+EXPORT_SYMBOL_GPL(vmbus_device_unregister);
#ifdef CONFIG_ACPI
/*
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 46e3c8c50765..73fd96799847 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -176,7 +176,7 @@ static ssize_t adc128_in_store(struct device *dev,
mutex_lock(&data->update_lock);
/* 10 mV LSB on limit registers */
- regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
+ regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10);
data->in[index][nr] = regval << 4;
reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
i2c_smbus_write_byte_data(data->client, reg, regval);
@@ -214,7 +214,7 @@ static ssize_t adc128_temp_store(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
data->temp[index] = regval << 1;
i2c_smbus_write_byte_data(data->client,
index == 1 ? ADC128_REG_TEMP_MAX
diff --git a/drivers/hwmon/hp-wmi-sensors.c b/drivers/hwmon/hp-wmi-sensors.c
index b5325d0e72b9..dfa1d6926dea 100644
--- a/drivers/hwmon/hp-wmi-sensors.c
+++ b/drivers/hwmon/hp-wmi-sensors.c
@@ -1637,6 +1637,8 @@ static void hp_wmi_notify(u32 value, void *context)
goto out_unlock;
wobj = out.pointer;
+ if (!wobj)
+ goto out_unlock;
err = populate_event_from_wobj(dev, &event, wobj);
if (err) {
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index 67b9d7636ee4..37e8e9679aeb 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000),
+ 1000);
mutex_lock(&data->update_lock);
data->tcrit2[index] = val;
@@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
mutex_lock(&data->update_lock);
data->tcrit1[index] = val;
@@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev,
if (ret < 0)
return ret;
- val = DIV_ROUND_CLOSEST(val, 1000);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
val = clamp_val((int)data->tcrit1[index] - val, 0, 31);
mutex_lock(&data->update_lock);
@@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr,
return ret;
/* Accuracy is 1/2 degrees C */
- val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
mutex_lock(&data->update_lock);
data->toffset[index] = val;
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index f3bf2e4701c3..8da7aa1614d7 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -2262,7 +2262,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
mutex_lock(&data->update_lock);
data->temp_offset[nr] = val;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index fe960c0a624f..7d7d70afde65 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -895,7 +895,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
@@ -920,7 +920,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
return err;
/* Limit the temp to 0C - 15C */
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000);
mutex_lock(&data->update_lock);
reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index 337c95d43f3f..edc3a69bfe31 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -291,7 +291,10 @@ static int hci_dma_init(struct i3c_hci *hci)
rh->ibi_chunk_sz = dma_get_cache_alignment();
rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
- BUG_ON(rh->ibi_chunk_sz > 256);
+ if (rh->ibi_chunk_sz > 256) {
+ ret = -EINVAL;
+ goto err_out;
+ }
ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index ccd0c4680be2..acc937275c18 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -1037,29 +1037,59 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
u8 *in, const u8 *out, unsigned int xfer_len,
unsigned int *actual_len, bool continued)
{
+ int retry = 2;
u32 reg;
int ret;
/* clean SVC_I3C_MINT_IBIWON w1c bits */
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
- writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
- xfer_type |
- SVC_I3C_MCTRL_IBIRESP_NACK |
- SVC_I3C_MCTRL_DIR(rnw) |
- SVC_I3C_MCTRL_ADDR(addr) |
- SVC_I3C_MCTRL_RDTERM(*actual_len),
- master->regs + SVC_I3C_MCTRL);
- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ while (retry--) {
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ xfer_type |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+ SVC_I3C_MCTRL_DIR(rnw) |
+ SVC_I3C_MCTRL_ADDR(addr) |
+ SVC_I3C_MCTRL_RDTERM(*actual_len),
+ master->regs + SVC_I3C_MCTRL);
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
- if (ret)
- goto emit_stop;
+ if (ret)
+ goto emit_stop;
- if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
- ret = -ENXIO;
- *actual_len = 0;
- goto emit_stop;
+ if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
+ /*
+ * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
+ * If the Controller chooses to start an I3C Message with an I3C Dynamic
+ * Address, then special provisions shall be made because that same I3C
+ * Target may be initiating an IBI or a Controller Role Request. So, one of
+ * three things may happen: (skip 1, 2)
+ *
+ * 3. The Addresses match and the RnW bits also match, and so neither
+ * Controller nor Target will ACK since both are expecting the other side to
+ * provide ACK. As a result, each side might think it had "won" arbitration,
+ * but neither side would continue, as each would subsequently see that the
+ * other did not provide ACK.
+ * ...
+ * For either value of RnW: Due to the NACK, the Controller shall defer the
+ * Private Write or Private Read, and should typically transmit the Target
+ * Address again after a Repeated START (i.e., the next one or any one prior
+ * to a STOP in the Frame). Since the Address Header following a Repeated
+ * START is not arbitrated, the Controller will always win (see Section
+ * 5.1.2.2.4).
+ */
+ if (retry && addr != 0x7e) {
+ writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
+ } else {
+ ret = -ENXIO;
+ *actual_len = 0;
+ goto emit_stop;
+ }
+ } else {
+ break;
+ }
}
/*
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index b9b206fcd748..2976c62b58c0 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -146,15 +146,18 @@ struct ad7124_chip_info {
struct ad7124_channel_config {
bool live;
unsigned int cfg_slot;
- enum ad7124_ref_sel refsel;
- bool bipolar;
- bool buf_positive;
- bool buf_negative;
- unsigned int vref_mv;
- unsigned int pga_bits;
- unsigned int odr;
- unsigned int odr_sel_bits;
- unsigned int filter_type;
+ /* Following fields are used to compare equality. */
+ struct_group(config_props,
+ enum ad7124_ref_sel refsel;
+ bool bipolar;
+ bool buf_positive;
+ bool buf_negative;
+ unsigned int vref_mv;
+ unsigned int pga_bits;
+ unsigned int odr;
+ unsigned int odr_sel_bits;
+ unsigned int filter_type;
+ );
};
struct ad7124_channel {
@@ -333,11 +336,12 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_
ptrdiff_t cmp_size;
int i;
- cmp_size = (u8 *)&cfg->live - (u8 *)cfg;
+ cmp_size = sizeof_field(struct ad7124_channel_config, config_props);
for (i = 0; i < st->num_channels; i++) {
cfg_aux = &st->channels[i].cfg;
- if (cfg_aux->live && !memcmp(cfg, cfg_aux, cmp_size))
+ if (cfg_aux->live &&
+ !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
return cfg_aux;
}
@@ -761,6 +765,7 @@ static int ad7124_soft_reset(struct ad7124_state *st)
if (ret < 0)
return ret;
+ fsleep(200);
timeout = 100;
do {
ret = ad_sd_read_reg(&st->sd, AD7124_STATUS, 1, &readval);
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index 1928d9ae5bcf..1c08c0921ee7 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -49,7 +49,7 @@ static const unsigned int ad7616_oversampling_avail[8] = {
1, 2, 4, 8, 16, 32, 64, 128,
};
-static int ad7606_reset(struct ad7606_state *st)
+int ad7606_reset(struct ad7606_state *st)
{
if (st->gpio_reset) {
gpiod_set_value(st->gpio_reset, 1);
@@ -60,6 +60,7 @@ static int ad7606_reset(struct ad7606_state *st)
return -ENODEV;
}
+EXPORT_SYMBOL_NS_GPL(ad7606_reset, IIO_AD7606);
static int ad7606_reg_access(struct iio_dev *indio_dev,
unsigned int reg,
@@ -88,31 +89,6 @@ static int ad7606_read_samples(struct ad7606_state *st)
{
unsigned int num = st->chip_info->num_channels - 1;
u16 *data = st->data;
- int ret;
-
- /*
- * The frstdata signal is set to high while and after reading the sample
- * of the first channel and low for all other channels. This can be used
- * to check that the incoming data is correctly aligned. During normal
- * operation the data should never become unaligned, but some glitch or
- * electrostatic discharge might cause an extra read or clock cycle.
- * Monitoring the frstdata signal allows to recover from such failure
- * situations.
- */
-
- if (st->gpio_frstdata) {
- ret = st->bops->read_block(st->dev, 1, data);
- if (ret)
- return ret;
-
- if (!gpiod_get_value(st->gpio_frstdata)) {
- ad7606_reset(st);
- return -EIO;
- }
-
- data++;
- num--;
- }
return st->bops->read_block(st->dev, num, data);
}
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 0c6a88cc4695..6649e84d25de 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -151,6 +151,8 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
const char *name, unsigned int id,
const struct ad7606_bus_ops *bops);
+int ad7606_reset(struct ad7606_state *st);
+
enum ad7606_supported_device_ids {
ID_AD7605_4,
ID_AD7606_8,
diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
index d8408052262e..6bc587b20f05 100644
--- a/drivers/iio/adc/ad7606_par.c
+++ b/drivers/iio/adc/ad7606_par.c
@@ -7,6 +7,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/err.h>
@@ -21,8 +22,29 @@ static int ad7606_par16_read_block(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- insw((unsigned long)st->base_address, buf, count);
+ /*
+ * On the parallel interface, the frstdata signal is set to high while
+ * and after reading the sample of the first channel and low for all
+ * other channels. This can be used to check that the incoming data is
+ * correctly aligned. During normal operation the data should never
+ * become unaligned, but some glitch or electrostatic discharge might
+ * cause an extra read or clock cycle. Monitoring the frstdata signal
+ * allows to recover from such failure situations.
+ */
+ int num = count;
+ u16 *_buf = buf;
+
+ if (st->gpio_frstdata) {
+ insw((unsigned long)st->base_address, _buf, 1);
+ if (!gpiod_get_value(st->gpio_frstdata)) {
+ ad7606_reset(st);
+ return -EIO;
+ }
+ _buf++;
+ num--;
+ }
+ insw((unsigned long)st->base_address, _buf, num);
return 0;
}
@@ -35,8 +57,28 @@ static int ad7606_par8_read_block(struct device *dev,
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
-
- insb((unsigned long)st->base_address, buf, count * 2);
+ /*
+ * On the parallel interface, the frstdata signal is set to high while
+ * and after reading the sample of the first channel and low for all
+ * other channels. This can be used to check that the incoming data is
+ * correctly aligned. During normal operation the data should never
+ * become unaligned, but some glitch or electrostatic discharge might
+ * cause an extra read or clock cycle. Monitoring the frstdata signal
+ * allows to recover from such failure situations.
+ */
+ int num = count;
+ u16 *_buf = buf;
+
+ if (st->gpio_frstdata) {
+ insb((unsigned long)st->base_address, _buf, 2);
+ if (!gpiod_get_value(st->gpio_frstdata)) {
+ ad7606_reset(st);
+ return -EIO;
+ }
+ _buf++;
+ num--;
+ }
+ insb((unsigned long)st->base_address, _buf, num * 2);
return 0;
}
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 0d53c0a07b0d..db5dbd60cf67 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -180,7 +180,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
ret = dma_get_slave_caps(chan, &caps);
if (ret < 0)
- goto err_free;
+ goto err_release;
/* Needs to be aligned to the maximum of the minimums */
if (caps.src_addr_widths)
@@ -206,6 +206,8 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
return &dmaengine_buffer->queue.buffer;
+err_release:
+ dma_release_channel(chan);
err_free:
kfree(dmaengine_buffer);
return ERR_PTR(ret);
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index b85556538475..80e1c45485c9 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -680,17 +680,17 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
break;
case IIO_VAL_INT_PLUS_MICRO:
if (scale_val2 < 0)
- *processed = -raw64 * scale_val;
+ *processed = -raw64 * scale_val * scale;
else
- *processed = raw64 * scale_val;
+ *processed = raw64 * scale_val * scale;
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
1000000LL);
break;
case IIO_VAL_INT_PLUS_NANO:
if (scale_val2 < 0)
- *processed = -raw64 * scale_val;
+ *processed = -raw64 * scale_val * scale;
else
- *processed = raw64 * scale_val;
+ *processed = raw64 * scale_val * scale;
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
1000000000LL);
break;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index d98212d55108..2c973f15cab7 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -417,6 +417,20 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
return -EINVAL;
}
+ /*
+ * Limit number of contacts to a reasonable value (100). This
+ * ensures that we need less than 2 pages for struct input_mt
+ * (we are not using in-kernel slot assignment so not going to
+ * allocate memory for the "red" table), and we should have no
+ * trouble getting this much memory.
+ */
+ if (code == ABS_MT_SLOT && max > 99) {
+ printk(KERN_DEBUG
+ "%s: unreasonably large number of slots requested: %d\n",
+ UINPUT_NAME, max);
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index ae7ba0c419f5..6a77babcf722 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -597,7 +597,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw,
* once, copy them all into this buffer at the right locations, and then
* do all operations on this linear buffer.
*/
- fw_buf = kzalloc(SZ_64K, GFP_KERNEL);
+ fw_buf = kvmalloc(SZ_64K, GFP_KERNEL);
if (!fw_buf)
return -ENOMEM;
@@ -627,7 +627,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw,
return 0;
err_big:
- kfree(fw_buf);
+ kvfree(fw_buf);
return error;
}
@@ -870,7 +870,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev,
ili210x_hardware_reset(priv->reset_gpio);
dev_dbg(dev, "Firmware update ended, error=%i\n", error);
enable_irq(client->irq);
- kfree(fwbuf);
+ kvfree(fwbuf);
return error;
}
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 23cb80d62a9a..84f0459e503c 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1422,7 +1422,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
*/
writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
- while (qi->desc_status[wait_index] != QI_DONE) {
+ while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
/*
* We will leave the interrupts disabled, to prevent interrupt
* context to queue another cmd while a cmd is already submitted
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 74c5cb93e900..94bd7f25f6f2 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -449,6 +449,7 @@ static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
+ iommu_write(iommu, IOMMU_BYPASS_REG, 0);
iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index a55528469278..91a42e2d7a13 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -566,6 +566,10 @@ static struct irq_chip armada_370_xp_irq_chip = {
static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
+ /* IRQs 0 and 1 cannot be mapped, they are handled internally */
+ if (hw <= 1)
+ return -EINVAL;
+
armada_370_xp_irq_mask(irq_get_irq_data(virq));
if (!is_percpu_irq(hw))
writel(hw, per_cpu_int_base +
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index f2ff4387870d..d83c2c85962c 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -438,12 +438,12 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
&res, 0);
- if (ret) {
- of_node_put(child);
+ if (ret)
break;
- }
}
+ if (ret && child)
+ of_node_put(child);
if (!ret)
ret = gicv2m_allocate_domains(parent);
if (ret)
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
index 9d91f21842f2..afe9bff7c7c1 100644
--- a/drivers/leds/leds-spi-byte.c
+++ b/drivers/leds/leds-spi-byte.c
@@ -91,7 +91,6 @@ static int spi_byte_probe(struct spi_device *spi)
dev_err(dev, "Device must have exactly one LED sub-node.");
return -EINVAL;
}
- child = of_get_next_available_child(dev_of_node(dev), NULL);
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
if (!led)
@@ -107,11 +106,13 @@ static int spi_byte_probe(struct spi_device *spi)
led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking;
+ child = of_get_next_available_child(dev_of_node(dev), NULL);
state = of_get_property(child, "default-state", NULL);
if (state) {
if (!strcmp(state, "on")) {
led->ldev.brightness = led->ldev.max_brightness;
} else if (strcmp(state, "off")) {
+ of_node_put(child);
/* all other cases except "off" */
dev_err(dev, "default-state can only be 'on' or 'off'");
return -EINVAL;
@@ -122,9 +123,12 @@ static int spi_byte_probe(struct spi_device *spi)
ret = devm_led_classdev_register(&spi->dev, &led->ldev);
if (ret) {
+ of_node_put(child);
mutex_destroy(&led->mutex);
return ret;
}
+
+ of_node_put(child);
spi_set_drvdata(spi, led);
return 0;
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index 2a71bcdba92d..b37bbe762500 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -212,8 +212,10 @@ static char __init *dm_parse_device_entry(struct dm_device *dev, char *str)
strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid));
/* minor */
if (strlen(field[2])) {
- if (kstrtoull(field[2], 0, &dev->dmi.dev))
+ if (kstrtoull(field[2], 0, &dev->dmi.dev) ||
+ dev->dmi.dev >= (1 << MINORBITS))
return ERR_PTR(-EINVAL);
+ dev->dmi.dev = huge_encode_dev((dev_t)dev->dmi.dev);
dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG;
}
/* flags */
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index c6df862c79e3..8fa6750b5b42 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -1038,8 +1038,11 @@ static int camss_of_parse_endpoint_node(struct device *dev,
struct v4l2_mbus_config_mipi_csi2 *mipi_csi2;
struct v4l2_fwnode_endpoint vep = { { 0 } };
unsigned int i;
+ int ret;
- v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
+ if (ret)
+ return ret;
csd->interface.csiphy_id = vep.base.port;
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index 3a06df35a2d7..99325bfed643 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -106,8 +106,9 @@ static int vid_cap_queue_setup(struct vb2_queue *vq,
if (*nplanes != buffers)
return -EINVAL;
for (p = 0; p < buffers; p++) {
- if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
- dev->fmt_cap->data_offset[p])
+ if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h /
+ dev->fmt_cap->vdownsampling[p] +
+ dev->fmt_cap->data_offset[p])
return -EINVAL;
}
} else {
@@ -1556,8 +1557,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
return -EINVAL;
if (edid->blocks == 0) {
dev->edid_blocks = 0;
- v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
- v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
+ if (dev->num_outputs) {
+ v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
+ v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
+ }
phys_addr = CEC_PHYS_ADDR_INVALID;
goto set_phys_addr;
}
@@ -1581,8 +1584,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
display_present |=
dev->display_present[i] << j++;
- v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
- v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
+ if (dev->num_outputs) {
+ v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
+ v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
+ }
set_phys_addr:
/* TODO: a proper hotplug detect cycle should be emulated here */
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
index 184a6df2c29f..d05f547a587c 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
@@ -63,14 +63,16 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
if (sizes[0] < size)
return -EINVAL;
for (p = 1; p < planes; p++) {
- if (sizes[p] < dev->bytesperline_out[p] * h +
- vfmt->data_offset[p])
+ if (sizes[p] < dev->bytesperline_out[p] * h /
+ vfmt->vdownsampling[p] +
+ vfmt->data_offset[p])
return -EINVAL;
}
} else {
for (p = 0; p < planes; p++)
- sizes[p] = p ? dev->bytesperline_out[p] * h +
- vfmt->data_offset[p] : size;
+ sizes[p] = p ? dev->bytesperline_out[p] * h /
+ vfmt->vdownsampling[p] +
+ vfmt->data_offset[p] : size;
}
if (vq->num_buffers + *nbuffers < 2)
@@ -127,7 +129,7 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
for (p = 0; p < planes; p++) {
if (p)
- size = dev->bytesperline_out[p] * h;
+ size = dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
size += vb->planes[p].data_offset;
if (vb2_get_plane_payload(vb, p) < size) {
@@ -334,8 +336,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
for (p = 0; p < mp->num_planes; p++) {
mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
mp->plane_fmt[p].sizeimage =
- mp->plane_fmt[p].bytesperline * mp->height +
- fmt->data_offset[p];
+ mp->plane_fmt[p].bytesperline * mp->height /
+ fmt->vdownsampling[p] + fmt->data_offset[p];
}
for (p = fmt->buffers; p < fmt->planes; p++) {
unsigned stride = dev->bytesperline_out[p];
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index f0626814d56b..4df0d7a0cd11 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1912,7 +1912,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
&args[0]);
if (err) {
dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
- goto err_invoke;
+ fastrpc_buf_free(buf);
+ return err;
}
/* update the buffer to be able to deallocate the memory on the DSP */
@@ -1950,8 +1951,6 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
err_assign:
fastrpc_req_munmap_impl(fl, buf);
-err_invoke:
- fastrpc_buf_free(buf);
return err;
}
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
index 692daa9eff34..19c9d2cdd277 100644
--- a/drivers/misc/vmw_vmci/vmci_resource.c
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -144,7 +144,8 @@ void vmci_resource_remove(struct vmci_resource *resource)
spin_lock(&vmci_resource_table.lock);
hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
- if (vmci_handle_is_equal(r->handle, resource->handle)) {
+ if (vmci_handle_is_equal(r->handle, resource->handle) &&
+ resource->type == r->type) {
hlist_del_init_rcu(&r->node);
break;
}
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index cca71867bc4a..92905fc46436 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -15,6 +15,19 @@
#include "card.h"
+static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
+ /*
+ * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
+ * This has so far only been observed on cards from 11/2019, while new
+ * cards from 2023/05 do not exhibit this behavior.
+ */
+ _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+
+ END_FIXUP
+};
+
static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
#define INAND_CMD38_ARG_EXT_CSD 113
#define INAND_CMD38_ARG_ERASE 0x00
@@ -53,15 +66,6 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
- /*
- * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
- * This has so far only been observed on cards from 11/2019, while new
- * cards from 2023/05 do not exhibit this behavior.
- */
- _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
- 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
- MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
-
/*
* Some SD cards lockup while using CMD23 multiblock transfers.
*/
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c3e554344c99..240469a881a2 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -26,6 +26,7 @@
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
+#include "quirks.h"
#include "sd.h"
#include "sd_ops.h"
@@ -1475,6 +1476,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
goto free_card;
}
+ /* Apply quirks prior to card setup */
+ mmc_fixup_device(card, mmc_sd_fixups);
+
err = mmc_sd_setup_card(host, card, oldcard != NULL);
if (err)
goto free_card;
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index 41e94cd14109..fe7a4eac9595 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -612,7 +612,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
cqhci_writel(cq_host, 0, CQHCI_CTL);
mmc->cqe_on = true;
pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
- if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
+ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
pr_err("%s: cqhci: CQE failed to exit halt state\n",
mmc_hostname(mmc));
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 02bee7afab37..2f0bc79ef856 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2952,8 +2952,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65535;
- mmc->max_seg_size = 0x1000;
- mmc->max_req_size = mmc->max_seg_size * host->ring_size;
+ mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
+ mmc->max_seg_size = mmc->max_req_size;
mmc->max_blk_count = mmc->max_req_size / 512;
} else if (host->use_dma == TRANS_MODE_EDMAC) {
mmc->max_segs = 64;
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 42d54532cabe..8379a0620c8f 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -510,6 +510,7 @@ static const struct of_device_id aspeed_sdhci_of_match[] = {
{ .compatible = "aspeed,ast2600-sdhci", .data = &ast2600_sdhci_pdata, },
{ }
};
+MODULE_DEVICE_TABLE(of, aspeed_sdhci_of_match);
static struct platform_driver aspeed_sdhci_driver = {
.driver = {
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 683203f87ae2..277493e41b07 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -82,7 +82,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
sizeof(ipversion))) {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
ipversion >>= 4;
@@ -92,7 +92,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
proto = htons(ETH_P_IPV6);
} else {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
@@ -106,7 +106,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
ipv4_is_multicast(tunnel_hdr->daddr)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
} else {
@@ -122,7 +122,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
(addr_type & IPV6_ADDR_MULTICAST)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
}
@@ -134,12 +134,12 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
proto,
!net_eq(bareudp->net,
dev_net(bareudp->dev)))) {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
if (!tun_dst) {
- bareudp->dev->stats.rx_dropped++;
+ DEV_STATS_INC(bareudp->dev, rx_dropped);
goto drop;
}
skb_dst_set(skb, &tun_dst->dst);
@@ -165,8 +165,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
&((struct ipv6hdr *)oiph)->saddr);
}
if (err > 1) {
- ++bareudp->dev->stats.rx_frame_errors;
- ++bareudp->dev->stats.rx_errors;
+ DEV_STATS_INC(bareudp->dev, rx_frame_errors);
+ DEV_STATS_INC(bareudp->dev, rx_errors);
goto drop;
}
}
@@ -462,11 +462,11 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
if (err == -ELOOP)
- dev->stats.collisions++;
+ DEV_STATS_INC(dev, collisions);
else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index a57005faa04f..c490b4ba065b 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -1580,23 +1580,15 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
return res;
}
-static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
kvaser_pciefd_read_buffer(pcie, 0);
- /* Reset DMA buffer 0 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- }
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
kvaser_pciefd_read_buffer(pcie, 1);
- /* Reset DMA buffer 1 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- }
if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
@@ -1605,6 +1597,7 @@ static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+ return irq;
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1631,27 +1624,31 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
{
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
- u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+ u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+ u32 srb_irq = 0;
+ u32 srb_release = 0;
int i;
- if (!(board_irq & irq_mask->all))
+ if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
- if (board_irq & irq_mask->kcan_rx0)
- kvaser_pciefd_receive_irq(pcie);
+ if (pci_irq & irq_mask->kcan_rx0)
+ srb_irq = kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
- if (!pcie->can[i]) {
- dev_err(&pcie->pci->dev,
- "IRQ mask points to unallocated controller\n");
- break;
- }
-
- /* Check that mask matches channel (i) IRQ mask */
- if (board_irq & irq_mask->kcan_tx[i])
+ if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
+ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
+ srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
+
+ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
+ srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
+
+ if (srb_release)
+ iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2395b1225cc8..fb77fd74de27 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1871,7 +1871,7 @@ static int m_can_open(struct net_device *dev)
/* start the m_can controller */
err = m_can_start(dev);
if (err)
- goto exit_irq_fail;
+ goto exit_start_fail;
if (!cdev->is_peripheral)
napi_enable(&cdev->napi);
@@ -1880,6 +1880,9 @@ static int m_can_open(struct net_device *dev)
return 0;
+exit_start_fail:
+ if (cdev->is_peripheral || dev->irq)
+ free_irq(dev->irq, dev);
exit_irq_fail:
if (cdev->is_peripheral)
destroy_workqueue(cdev->tx_wq);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 79c4bab5f724..8c56f85e87c1 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -753,7 +753,7 @@ static int mcp251x_hw_wake(struct spi_device *spi)
int ret;
/* Force wakeup interrupt to wake device, but don't execute IST */
- disable_irq(spi->irq);
+ disable_irq_nosync(spi->irq);
mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
/* Wait for oscillator startup timer after wake up */
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 1665f78abb5c..a9bafa96e2f9 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@...gutronix.de>
//
// Based on:
@@ -867,18 +867,18 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
static struct sk_buff *
mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
- struct can_frame **cf, u32 *timestamp)
+ struct can_frame **cf, u32 *ts_raw)
{
struct sk_buff *skb;
int err;
- err = mcp251xfd_get_timestamp(priv, timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, ts_raw);
if (err)
return NULL;
skb = alloc_can_err_skb(priv->ndev, cf);
if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw);
return skb;
}
@@ -889,7 +889,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
struct mcp251xfd_rx_ring *ring;
struct sk_buff *skb;
struct can_frame *cf;
- u32 timestamp, rxovif;
+ u32 ts_raw, rxovif;
int err, i;
stats->rx_over_errors++;
@@ -924,14 +924,14 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
return err;
}
- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp);
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
if (!skb)
return 0;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -948,12 +948,12 @@ static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
- u32 bdiag1, timestamp;
+ u32 bdiag1, ts_raw;
struct sk_buff *skb;
struct can_frame *cf = NULL;
int err;
- err = mcp251xfd_get_timestamp(priv, ×tamp);
+ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
if (err)
return err;
@@ -1035,8 +1035,8 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
if (!cf)
return 0;
- mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -1049,7 +1049,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
struct sk_buff *skb;
struct can_frame *cf = NULL;
enum can_state new_state, rx_state, tx_state;
- u32 trec, timestamp;
+ u32 trec, ts_raw;
int err;
err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
@@ -1079,7 +1079,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
/* The skb allocation might fail, but can_change_state()
* handles cf == NULL.
*/
- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp);
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
can_change_state(priv->ndev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1110,7 +1110,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
cf->data[7] = bec.rxerr;
}
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
index 9e8e82cdba46..61b0d6fa52dd 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
@@ -97,7 +97,16 @@ void can_ram_get_layout(struct can_ram_layout *layout,
if (ring) {
u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
- num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
+ /* If the ring parameters have been configured in
+ * CAN-CC mode, but and we are in CAN-FD mode now,
+ * they might be to big. Use the default CAN-FD values
+ * in this case.
+ */
+ num_rx = ring->rx_pending;
+ if (num_rx > layout->max_rx)
+ num_rx = layout->default_rx;
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
/* The ethtool doc says:
* To disable coalescing, set usecs = 0 and max_frames = 1.
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index 4cb79a4f2461..f72582d4d3e8 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -206,6 +206,7 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
int i, j;
mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ rx_ring->last_valid = timecounter_read(&priv->tc);
rx_ring->head = 0;
rx_ring->tail = 0;
rx_ring->base = *base;
@@ -468,11 +469,25 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
/* switching from CAN-2.0 to CAN-FD mode or vice versa */
if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
+ const struct ethtool_ringparam ring = {
+ .rx_pending = priv->rx_obj_num,
+ .tx_pending = priv->tx->obj_num,
+ };
+ const struct ethtool_coalesce ec = {
+ .rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
+ .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq,
+ .tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
+ .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq,
+ };
struct can_ram_layout layout;
- can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
- priv->rx_obj_num = layout.default_rx;
- tx_ring->obj_num = layout.default_tx;
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode);
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+
+ tx_ring->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
}
if (fd_mode) {
@@ -509,6 +524,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
}
rx_ring->obj_num = rx_obj_num;
+ rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) -
+ ilog2(rx_obj_num);
rx_ring->obj_size = rx_obj_size;
priv->rx[i] = rx_ring;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
index ced8d9c81f8c..fe897f3e4c12 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@...gutronix.de>
//
// Based on:
@@ -16,23 +16,14 @@
#include "mcp251xfd.h"
-static inline int
-mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- u8 *rx_head, bool *fifo_empty)
+static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta)
{
- u32 fifo_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
- &fifo_sta);
- if (err)
- return err;
-
- *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
- *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+}
- return 0;
+static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
}
static inline int
@@ -80,29 +71,49 @@ mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
}
static int
-mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring)
+mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *len_p)
{
- u32 new_head;
- u8 chip_rx_head;
- bool fifo_empty;
+ const u8 shift = ring->obj_num_shift_to_u8;
+ u8 chip_head, tail, len;
+ u32 fifo_sta;
int err;
- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head,
- &fifo_empty);
- if (err || fifo_empty)
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) {
+ *len_p = 0;
+ return 0;
+ }
+
+ if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) {
+ *len_p = ring->obj_num;
+ return 0;
+ }
+
+ chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ err = mcp251xfd_check_rx_tail(priv, ring);
+ if (err)
return err;
+ tail = mcp251xfd_get_rx_tail(ring);
- /* chip_rx_head, is the next RX-Object filled by the HW.
- * The new RX head must be >= the old head.
+ /* First shift to full u8. The subtraction works on signed
+ * values, that keeps the difference steady around the u8
+ * overflow. The right shift acts on len, which is an u8.
*/
- new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
- if (new_head <= ring->head)
- new_head += ring->obj_num;
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head));
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail));
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len));
- ring->head = new_head;
+ len = (chip_head << shift) - (tail << shift);
+ *len_p = len >> shift;
- return mcp251xfd_check_rx_tail(priv, ring);
+ return 0;
}
static void
@@ -148,8 +159,6 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
memcpy(cfd->data, hw_rx_obj->data, cfd->len);
-
- mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
}
static int
@@ -160,8 +169,26 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
struct net_device_stats *stats = &priv->ndev->stats;
struct sk_buff *skb;
struct canfd_frame *cfd;
+ u64 timestamp;
int err;
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the RX FIFO head index
+ * might be corrupted and we might process past the RX FIFO's
+ * head into old CAN frames.
+ *
+ * Compare the timestamp of currently processed CAN frame with
+ * last valid frame received. Abort with -EBADMSG if an old
+ * CAN frame is detected.
+ */
+ timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts);
+ if (timestamp <= ring->last_valid) {
+ stats->rx_fifo_errors++;
+
+ return -EBADMSG;
+ }
+ ring->last_valid = timestamp;
+
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
skb = alloc_canfd_skb(priv->ndev, &cfd);
else
@@ -172,6 +199,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
return 0;
}
+ mcp251xfd_skb_set_timestamp(skb, timestamp);
mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
if (err)
@@ -197,52 +225,81 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
return err;
}
+static int
+mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring,
+ u8 len)
+{
+ int offset;
+ int err;
+
+ if (!len)
+ return 0;
+
+ ring->head += len;
+
+ /* Increment the RX FIFO tail pointer 'len' times in a
+ * single SPI message.
+ *
+ * Note:
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
+ */
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
+ if (err)
+ return err;
+
+ ring->tail += len;
+
+ return 0;
+}
+
static int
mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
struct mcp251xfd_rx_ring *ring)
{
struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
- u8 rx_tail, len;
+ u8 rx_tail, len, l;
int err, i;
- err = mcp251xfd_rx_ring_update(priv, ring);
+ err = mcp251xfd_get_rx_len(priv, ring, &len);
if (err)
return err;
- while ((len = mcp251xfd_get_rx_linear_len(ring))) {
- int offset;
-
+ while ((l = mcp251xfd_get_rx_linear_len(ring, len))) {
rx_tail = mcp251xfd_get_rx_tail(ring);
err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
- rx_tail, len);
+ rx_tail, l);
if (err)
return err;
- for (i = 0; i < len; i++) {
+ for (i = 0; i < l; i++) {
err = mcp251xfd_handle_rxif_one(priv, ring,
(void *)hw_rx_obj +
i * ring->obj_size);
- if (err)
+
+ /* -EBADMSG means we're affected by mcp2518fd
+ * erratum DS80000789E 6., i.e. the timestamp
+ * in the RX object is older that the last
+ * valid received CAN frame. Don't process any
+ * further and mark processed frames as good.
+ */
+ if (err == -EBADMSG)
+ return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i);
+ else if (err)
return err;
}
- /* Increment the RX FIFO tail pointer 'len' times in a
- * single SPI message.
- *
- * Note:
- * Calculate offset, so that the SPI transfer ends on
- * the last message of the uinc_xfer array, which has
- * "cs_change == 0", to properly deactivate the chip
- * select.
- */
- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
- err = spi_sync_transfer(priv->spi,
- ring->uinc_xfer + offset, len);
+ err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l);
if (err)
return err;
- ring->tail += len;
+ len -= l;
}
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
index 5b0c7890d4b4..3886476a8f8e 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -97,7 +97,7 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
tef_tail = mcp251xfd_get_tef_tail(priv);
skb = priv->can.echo_skb[tef_tail];
if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts);
stats->tx_bytes +=
can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
tef_tail, hw_tef_obj->ts,
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
index 712e09186987..1db99aabe85c 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2021 Pengutronix,
+// Copyright (c) 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@...gutronix.de>
//
@@ -11,20 +11,20 @@
#include "mcp251xfd.h"
-static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc)
+static u64 mcp251xfd_timestamp_raw_read(const struct cyclecounter *cc)
{
const struct mcp251xfd_priv *priv;
- u32 timestamp = 0;
+ u32 ts_raw = 0;
int err;
priv = container_of(cc, struct mcp251xfd_priv, cc);
- err = mcp251xfd_get_timestamp(priv, ×tamp);
+ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
if (err)
netdev_err(priv->ndev,
"Error %d while reading timestamp. HW timestamps may be inaccurate.",
err);
- return timestamp;
+ return ts_raw;
}
static void mcp251xfd_timestamp_work(struct work_struct *work)
@@ -39,21 +39,11 @@ static void mcp251xfd_timestamp_work(struct work_struct *work)
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
}
-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
- struct sk_buff *skb, u32 timestamp)
-{
- struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
- u64 ns;
-
- ns = timecounter_cyc2time(&priv->tc, timestamp);
- hwtstamps->hwtstamp = ns_to_ktime(ns);
-}
-
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv)
{
struct cyclecounter *cc = &priv->cc;
- cc->read = mcp251xfd_timestamp_read;
+ cc->read = mcp251xfd_timestamp_raw_read;
cc->mask = CYCLECOUNTER_MASK(32);
cc->shift = 1;
cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 4628bf847bc9..991662fbba42 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -2,7 +2,7 @@
*
* mcp251xfd - Microchip MCP251xFD Family CAN controller driver
*
- * Copyright (c) 2019, 2020, 2021 Pengutronix,
+ * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
* Marc Kleine-Budde <kernel@...gutronix.de>
* Copyright (c) 2019 Martin Sperl <kernel@...tin.sperl.org>
*/
@@ -554,10 +554,14 @@ struct mcp251xfd_rx_ring {
unsigned int head;
unsigned int tail;
+ /* timestamp of the last valid received CAN frame */
+ u64 last_valid;
+
u16 base;
u8 nr;
u8 fifo_nr;
u8 obj_num;
+ u8 obj_num_shift_to_u8;
u8 obj_size;
union mcp251xfd_write_reg_buf irq_enable_buf;
@@ -811,10 +815,27 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
return data;
}
-static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
- u32 *timestamp)
+static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv,
+ u32 *ts_raw)
+{
+ return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw);
+}
+
+static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static inline
+void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv,
+ struct sk_buff *skb, u32 ts_raw)
{
- return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
+ u64 ns;
+
+ ns = timecounter_cyc2time(&priv->tc, ts_raw);
+ mcp251xfd_skb_set_timestamp(skb, ns);
}
static inline u16 mcp251xfd_get_tef_obj_addr(u8 n)
@@ -907,18 +928,9 @@ static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring)
-{
- return ring->head - ring->tail;
-}
-
static inline u8
-mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
+mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len)
{
- u8 len;
-
- len = mcp251xfd_get_rx_len(ring);
-
return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring));
}
@@ -944,8 +956,6 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
- struct sk_buff *skb, u32 timestamp);
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 23bd8b3f8993..a28bf5433ea7 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -34,7 +34,7 @@
#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
-#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
+#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
@@ -370,13 +370,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock)
break;
case VSC73XX_BLOCK_MII:
- case VSC73XX_BLOCK_CAPTURE:
case VSC73XX_BLOCK_ARBITER:
switch (subblock) {
case 0 ... 1:
return 1;
}
break;
+ case VSC73XX_BLOCK_CAPTURE:
+ switch (subblock) {
+ case 0 ... 4:
+ case 6 ... 7:
+ return 1;
+ }
+ break;
}
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index dcbc598b11c6..c6a3eefd83bf 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -931,14 +931,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
}
}
-static void dpaa_fq_setup(struct dpaa_priv *priv,
- const struct dpaa_fq_cbs *fq_cbs,
- struct fman_port *tx_port)
+static int dpaa_fq_setup(struct dpaa_priv *priv,
+ const struct dpaa_fq_cbs *fq_cbs,
+ struct fman_port *tx_port)
{
int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
const cpumask_t *affine_cpus = qman_affine_cpus();
- u16 channels[NR_CPUS];
struct dpaa_fq *fq;
+ u16 *channels;
+
+ channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
channels[num_portals++] = qman_affine_channel(cpu);
@@ -997,6 +1001,10 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
break;
}
}
+
+ kfree(channels);
+
+ return 0;
}
static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
@@ -3416,7 +3424,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
*/
dpaa_eth_add_channel(priv->channel, &pdev->dev);
- dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+ err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+ if (err)
+ goto free_dpaa_bps;
/* Create a congestion group for this netdev, with
* dynamically-allocated CGR ID.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 5bd0b36d1feb..3f8cd4a7d845 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -457,12 +457,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
struct netlink_ext_ack *extack)
{
const cpumask_t *cpus = qman_affine_cpus();
- bool needs_revert[NR_CPUS] = {false};
struct qman_portal *portal;
u32 period, prev_period;
u8 thresh, prev_thresh;
+ bool *needs_revert;
int cpu, res;
+ needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
+ if (!needs_revert)
+ return -ENOMEM;
+
period = c->rx_coalesce_usecs;
thresh = c->rx_max_coalesced_frames;
@@ -485,6 +489,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
needs_revert[cpu] = true;
}
+ kfree(needs_revert);
+
return 0;
revert_values:
@@ -498,6 +504,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
qman_dqrr_set_ithresh(portal, prev_thresh);
}
+ kfree(needs_revert);
+
return res;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 4d83c9a0c023..f9e94be36e97 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2573,7 +2573,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
(u16)(mac_reg & 0xFFFF));
hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
- FIELD_GET(E1000_RAH_AV, mac_reg));
+ (u16)((mac_reg & E1000_RAH_AV) >> 16));
}
e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index c7962f322db2..7b3ce30ba38f 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -313,6 +313,7 @@ enum ice_vsi_state {
ICE_VSI_UMAC_FLTR_CHANGED,
ICE_VSI_MMAC_FLTR_CHANGED,
ICE_VSI_PROMISC_CHANGED,
+ ICE_VSI_REBUILD_PENDING,
ICE_VSI_STATE_NBITS /* must be last */
};
@@ -409,6 +410,7 @@ struct ice_vsi {
struct ice_tx_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct mutex xdp_state_lock;
struct net_device **target_netdevs;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 13ca3342a0ce..b3010a53f1b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -459,6 +459,7 @@ static void ice_vsi_free(struct ice_vsi *vsi)
ice_vsi_free_stats(vsi);
ice_vsi_free_arrays(vsi);
+ mutex_destroy(&vsi->xdp_state_lock);
mutex_unlock(&pf->sw_mutex);
devm_kfree(dev, vsi);
}
@@ -660,6 +661,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
pf->next_vsi);
+ mutex_init(&vsi->xdp_state_lock);
+
unlock_pf:
mutex_unlock(&pf->sw_mutex);
return vsi;
@@ -3164,19 +3167,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
+ mutex_lock(&vsi->xdp_state_lock);
+
ret = ice_vsi_realloc_stat_arrays(vsi);
if (ret)
- goto err_vsi_cfg;
+ goto unlock;
ice_vsi_decfg(vsi);
ret = ice_vsi_cfg_def(vsi, ¶ms);
if (ret)
- goto err_vsi_cfg;
+ goto unlock;
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
- if (!coalesce)
- return -ENOMEM;
+ if (!coalesce) {
+ ret = -ENOMEM;
+ goto decfg;
+ }
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
@@ -3184,22 +3191,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (ret) {
if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = -EIO;
- goto err_vsi_cfg_tc_lan;
+ goto free_coalesce;
}
- kfree(coalesce);
- return ice_schedule_reset(pf, ICE_RESET_PFR);
+ ret = ice_schedule_reset(pf, ICE_RESET_PFR);
+ goto free_coalesce;
}
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
- kfree(coalesce);
+ clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state);
- return 0;
-
-err_vsi_cfg_tc_lan:
- ice_vsi_decfg(vsi);
+free_coalesce:
kfree(coalesce);
-err_vsi_cfg:
+decfg:
+ if (ret)
+ ice_vsi_decfg(vsi);
+unlock:
+ mutex_unlock(&vsi->xdp_state_lock);
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index b168a37a5dff..4d3a9fc79a6c 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -606,11 +606,15 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
}
}
+
+ if (vsi->netdev)
+ netif_device_detach(vsi->netdev);
skip:
/* clear SW filtering DB */
ice_clear_hw_tbls(hw);
/* disable the VSIs and their queues that are not already DOWN */
+ set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
@@ -2927,8 +2931,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
- bool if_running = netif_running(vsi->netdev);
int ret = 0, xdp_ring_err = 0;
+ bool if_running;
if (prog && !prog->aux->xdp_has_frags) {
if (frame_size > ice_max_xdp_frame_size(vsi)) {
@@ -2939,13 +2943,17 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
/* hot swap progs and avoid toggling link */
- if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
+ if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
+ test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
}
+ if_running = netif_running(vsi->netdev) &&
+ !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
+
/* need to stop netdev while setting up the program for Rx rings */
- if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+ if (if_running) {
ret = ice_down(vsi);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
@@ -3011,21 +3019,28 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_vsi *vsi = np->vsi;
+ int ret;
if (vsi->type != ICE_VSI_PF) {
NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
return -EINVAL;
}
+ mutex_lock(&vsi->xdp_state_lock);
+
switch (xdp->command) {
case XDP_SETUP_PROG:
- return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+ ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+ break;
case XDP_SETUP_XSK_POOL:
- return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
- xdp->xsk.queue_id);
+ ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+ mutex_unlock(&vsi->xdp_state_lock);
+ return ret;
}
/**
@@ -3979,13 +3994,17 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ if (err)
+ goto rebuild_err;
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
goto done;
}
ice_vsi_close(vsi);
- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ if (err)
+ goto rebuild_err;
ice_for_each_traffic_class(i) {
if (vsi->tc_cfg.ena_tc & BIT(i))
@@ -3996,6 +4015,11 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
}
ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
+ goto done;
+
+rebuild_err:
+ dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
+ err);
done:
clear_bit(ICE_CFG_BUSY, pf->state);
return err;
@@ -7286,6 +7310,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
*/
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
bool dvm;
@@ -7438,6 +7463,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_rebuild_arfs(pf);
}
+ if (vsi && vsi->netdev)
+ netif_device_attach(vsi->netdev);
+
ice_update_pf_netdev_link(pf);
/* tell the firmware we are up */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 67511153081a..9a9b8698881b 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -396,7 +396,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
goto failure;
}
- if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
+ if_running = !test_bit(ICE_VSI_DOWN, vsi->state) &&
+ ice_is_xdp_ena_vsi(vsi);
if (if_running) {
struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8c8894ef3388..fa268d7bd1bc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6985,10 +6985,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
+ const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
+ TSINTR_TT0 | TSINTR_TT1 |
+ TSINTR_AUTT0 | TSINTR_AUTT1);
struct e1000_hw *hw = &adapter->hw;
u32 tsicr = rd32(E1000_TSICR);
struct ptp_clock_event event;
+ if (hw->mac.type == e1000_82580) {
+ /* 82580 has a hardware bug that requires an explicit
+ * write to clear the TimeSync interrupt cause.
+ */
+ wr32(E1000_TSICR, tsicr & mask);
+ }
+
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 21fb1a98ebca..da1018d83262 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7288,6 +7288,7 @@ static void igc_io_resume(struct pci_dev *pdev)
rtnl_lock();
if (netif_running(netdev)) {
if (igc_open(netdev)) {
+ rtnl_unlock();
netdev_err(netdev, "igc_open failed after reset\n");
return;
}
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index fe4e166de8a0..79276bc3d495 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -1442,18 +1442,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0,
rule->cookie, false);
- vcap_free_rule(rule);
-
- /* Check that the rule has been freed: tricky to access since this
- * memory should not be accessible anymore
- */
- KUNIT_EXPECT_PTR_NE(test, NULL, rule);
- ret = list_empty(&rule->keyfields);
- KUNIT_EXPECT_EQ(test, true, ret);
- ret = list_empty(&rule->actionfields);
- KUNIT_EXPECT_EQ(test, true, ret);
-
- vcap_del_rule(&test_vctrl, &test_netdev, id);
+ ret = vcap_del_rule(&test_vctrl, &test_netdev, id);
+ KUNIT_EXPECT_EQ(test, 0, ret);
}
static void vcap_api_set_rule_counter_test(struct kunit *test)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 765c5fc158f8..d8cce3771af2 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1858,10 +1858,12 @@ static void mana_destroy_txq(struct mana_port_context *apc)
for (i = 0; i < apc->num_queues; i++) {
napi = &apc->tx_qp[i].tx_cq.napi;
- napi_synchronize(napi);
- napi_disable(napi);
- netif_napi_del(napi);
-
+ if (apc->tx_qp[i].txq.napi_initialized) {
+ napi_synchronize(napi);
+ napi_disable(napi);
+ netif_napi_del(napi);
+ apc->tx_qp[i].txq.napi_initialized = false;
+ }
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
@@ -1917,6 +1919,7 @@ static int mana_create_txq(struct mana_port_context *apc,
txq->ndev = net;
txq->net_txq = netdev_get_tx_queue(net, i);
txq->vp_offset = apc->tx_vp_offset;
+ txq->napi_initialized = false;
skb_queue_head_init(&txq->pending_skbs);
memset(&spec, 0, sizeof(spec));
@@ -1983,6 +1986,7 @@ static int mana_create_txq(struct mana_port_context *apc,
netif_napi_add_tx(net, &cq->napi, mana_poll);
napi_enable(&cq->napi);
+ txq->napi_initialized = true;
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
}
@@ -1994,7 +1998,7 @@ static int mana_create_txq(struct mana_port_context *apc,
}
static void mana_destroy_rxq(struct mana_port_context *apc,
- struct mana_rxq *rxq, bool validate_state)
+ struct mana_rxq *rxq, bool napi_initialized)
{
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
@@ -2009,15 +2013,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
napi = &rxq->rx_cq.napi;
- if (validate_state)
+ if (napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
+ napi_disable(napi);
+ netif_napi_del(napi);
+ }
xdp_rxq_info_unreg(&rxq->xdp_rxq);
- netif_napi_del(napi);
-
mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
mana_deinit_cq(apc, &rxq->rx_cq);
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index 5bf6fdff701c..346e6ad36054 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -91,8 +91,8 @@ static int next_chunk_len(struct mctp_serial *dev)
* will be those non-escaped bytes, and does not include the escaped
* byte.
*/
- for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) {
- if (needs_escape(dev->txbuf[dev->txpos + i + 1]))
+ for (i = 1; i + dev->txpos < dev->txlen; i++) {
+ if (needs_escape(dev->txbuf[dev->txpos + i]))
break;
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c895cd178e6a..2e4bff6055e2 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3164,11 +3164,13 @@ static int of_phy_leds(struct phy_device *phydev)
err = of_phy_led(phydev, led);
if (err) {
of_node_put(led);
+ of_node_put(leds);
phy_leds_unregister(phydev);
return err;
}
}
+ of_node_put(leds);
return 0;
}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 687d70cfc556..6eeef10edada 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -475,8 +475,8 @@ static int ipheth_close(struct net_device *net)
{
struct ipheth_device *dev = netdev_priv(net);
- cancel_delayed_work_sync(&dev->carrier_work);
netif_stop_queue(net);
+ cancel_delayed_work_sync(&dev->carrier_work);
return 0;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 127b34dcc5b3..ce19ebd180f1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5143,14 +5143,23 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac)
data = (u8 *)mac;
data += __le16_to_cpu(mac->fw_offset);
- generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data,
- type);
+ if (generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length,
+ data, type) < 0) {
+ dev_err(&tp->intf->dev, "Write %s fw fail\n",
+ type ? "PLA" : "USB");
+ return;
+ }
ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr),
__le16_to_cpu(mac->bp_ba_value));
- generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD,
- __le16_to_cpu(mac->bp_num) << 1, mac->bp, type);
+ if (generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD,
+ ALIGN(__le16_to_cpu(mac->bp_num) << 1, 4),
+ mac->bp, type) < 0) {
+ dev_err(&tp->intf->dev, "Write %s bp fail\n",
+ type ? "PLA" : "USB");
+ return;
+ }
bp_en_addr = __le16_to_cpu(mac->bp_en_addr);
if (bp_en_addr)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2d14b0d78541..6cc1b56ddde2 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -61,9 +61,6 @@
/*-------------------------------------------------------------------------*/
-// randomly generated ethernet address
-static u8 node_id [ETH_ALEN];
-
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param (msg_level, int, 0);
@@ -1731,7 +1728,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->net = net;
strscpy(net->name, "usb%d", sizeof(net->name));
- eth_hw_addr_set(net, node_id);
/* rx and tx sides can use different message sizes;
* bind() should set rx_urb_size in that case.
@@ -1805,9 +1801,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
goto out4;
}
- /* let userspace know we have a random address */
- if (ether_addr_equal(net->dev_addr, node_id))
- net->addr_assign_type = NET_ADDR_RANDOM;
+ /* this flags the device for user space */
+ if (!is_valid_ether_addr(net->dev_addr))
+ eth_hw_addr_random(net);
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
SET_NETDEV_DEVTYPE(net, &wlan_type);
@@ -2217,7 +2213,6 @@ static int __init usbnet_init(void)
BUILD_BUG_ON(
sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
- eth_random_addr(node_id);
return 0;
}
module_init(usbnet_init);
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index ba6fc27f4a1a..dd2a7c95517b 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1614,7 +1614,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
{
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
int i;
- u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
+ u8 ampdu_factor, max_nss;
+ u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
u16 mcs_160_map, mcs_80_map;
bool support_160;
u16 v;
@@ -3355,6 +3357,11 @@ static int ath12k_station_assoc(struct ath12k *ar,
ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+ if (peer_arg.peer_nss < 1) {
+ ath12k_warn(ar->ab,
+ "invalid peer NSS %d\n", peer_arg.peer_nss);
+ return -EINVAL;
+ }
ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 543e93ec49d2..9ab669487de4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -1086,6 +1086,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->extra_tx_headroom = brcms_c_get_header_len();
hw->queues = N_TX_QUEUES;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index c780e5ffcd59..bace9d01fd58 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1318,7 +1318,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
static inline struct ieee80211_bss_conf *
iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu)
{
- if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf),
+ "erroneous FW link ID: %d\n", link_id))
return NULL;
if (rcu)
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 7bdec6c62248..dc6b4cf616be 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1290,6 +1290,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
+ if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
+ continue;
+
if ((adapter->priv[i]->bss_num == bss_num) &&
(adapter->priv[i]->bss_type == bss_type))
break;
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index efd0c2915a05..04a64afcbf8a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -742,7 +742,6 @@ static struct rtw_hci_ops rtw_usb_ops = {
static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
- int i;
rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq");
if (!rtwusb->rxwq) {
@@ -754,13 +753,19 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler);
+ return 0;
+}
+
+static void rtw_usb_setup_rx(struct rtw_dev *rtwdev)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ int i;
+
for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i];
rtw_usb_rx_resubmit(rtwusb, rxcb);
}
-
- return 0;
}
static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev)
@@ -897,6 +902,8 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto err_destroy_rxwq;
}
+ rtw_usb_setup_rx(rtwdev);
+
return 0;
err_destroy_rxwq:
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2c3f55877a11..7fc1ab4d9e7d 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2471,6 +2471,12 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
{
+ if (!dev->ctrl.tagset) {
+ nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
+ nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
+ return;
+ }
+
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
/* free previously allocated queues that are no longer usable */
nvme_free_queues(dev, dev->online_queues);
@@ -2929,6 +2935,17 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
+ } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) {
+ /*
+ * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
+ * because of high power consumption (> 2 Watt) in s2idle
+ * sleep. Only some boards with Intel CPU are affected.
+ */
+ if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
+ dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
+ dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
+ return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
}
/*
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index c65a1f4421f6..bd142aed20f4 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1859,8 +1859,10 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
}
queue->nr_cmds = sq->size * 2;
- if (nvmet_tcp_alloc_cmds(queue))
+ if (nvmet_tcp_alloc_cmds(queue)) {
+ queue->nr_cmds = 0;
return NVME_SC_INTERNAL;
+ }
return 0;
}
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 040dfa01fa12..e7fd1315d7ed 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -1253,13 +1253,13 @@ void nvmem_device_put(struct nvmem_device *nvmem)
EXPORT_SYMBOL_GPL(nvmem_device_put);
/**
- * devm_nvmem_device_get() - Get nvmem cell of device form a given id
+ * devm_nvmem_device_get() - Get nvmem device of device form a given id
*
* @dev: Device that requests the nvmem device.
* @id: name id for the requested nvmem device.
*
- * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
- * on success. The nvmem_cell will be freed by the automatically once the
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success. The nvmem_device will be freed by the automatically once the
* device is freed.
*/
struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index c94203ce65bb..8fd63100ba8f 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -344,7 +344,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
struct device_node *p;
const __be32 *addr;
u32 intsize;
- int i, res;
+ int i, res, addr_len;
+ __be32 addr_buf[3] = { 0 };
pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index);
@@ -353,13 +354,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
return of_irq_parse_oldworld(device, index, out_irq);
/* Get the reg property (if any) */
- addr = of_get_property(device, "reg", NULL);
+ addr = of_get_property(device, "reg", &addr_len);
+
+ /* Prevent out-of-bounds read in case of longer interrupt parent address size */
+ if (addr_len > (3 * sizeof(__be32)))
+ addr_len = 3 * sizeof(__be32);
+ if (addr)
+ memcpy(addr_buf, addr, addr_len);
/* Try the new-style interrupts-extended first */
res = of_parse_phandle_with_args(device, "interrupts-extended",
"#interrupt-cells", index, out_irq);
if (!res)
- return of_irq_parse_raw(addr, out_irq);
+ return of_irq_parse_raw(addr_buf, out_irq);
/* Look for the interrupt parent. */
p = of_irq_find_parent(device);
@@ -389,7 +396,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
/* Check if there are any interrupt-map translations to process */
- res = of_irq_parse_raw(addr, out_irq);
+ res = of_irq_parse_raw(addr_buf, out_irq);
out:
of_node_put(p);
return res;
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 54a3c7f29f78..c1dedc83759c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -34,6 +34,11 @@
#define PCIE_DEVICEID_SHIFT 16
/* Application registers */
+#define PID 0x000
+#define RTL GENMASK(15, 11)
+#define RTL_SHIFT 11
+#define AM6_PCI_PG1_RTL_VER 0x15
+
#define CMD_STATUS 0x004
#define LTSSM_EN_VAL BIT(0)
#define OB_XLAT_EN_VAL BIT(1)
@@ -104,6 +109,8 @@
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+#define PCI_DEVICE_ID_TI_AM654X 0xb00c
+
struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
@@ -518,7 +525,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
+ struct keystone_pcie *ks_pcie;
+ struct device *bridge_dev;
struct pci_dev *bridge;
+ u32 val;
+
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
@@ -530,6 +541,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ 0, },
};
+ static const struct pci_device_id am6_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { 0, },
+ };
if (pci_is_root_bus(bus))
bridge = dev;
@@ -551,10 +567,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
*/
if (pci_match_id(rc_pci_devids, bridge)) {
if (pcie_get_readrq(dev) > 256) {
- dev_info(&dev->dev, "limiting MRRS to 256\n");
+ dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
pcie_set_readrq(dev, 256);
}
}
+
+ /*
+ * Memory transactions fail with PCI controller in AM654 PG1.0
+ * when MRRS is set to more than 128 bytes. Force the MRRS to
+ * 128 bytes in all downstream devices.
+ */
+ if (pci_match_id(am6_pci_devids, bridge)) {
+ bridge_dev = pci_get_host_bridge_device(dev);
+ if (!bridge_dev && !bridge_dev->parent)
+ return;
+
+ ks_pcie = dev_get_drvdata(bridge_dev->parent);
+ if (!ks_pcie)
+ return;
+
+ val = ks_pcie_app_readl(ks_pcie, PID);
+ val &= RTL;
+ val >>= RTL_SHIFT;
+ if (val != AM6_PCI_PG1_RTL_VER)
+ return;
+
+ if (pcie_get_readrq(dev) > 128) {
+ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
+ pcie_set_readrq(dev, 128);
+ }
+ }
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 881d420637bf..092c9ac0d26d 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -39,7 +39,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
bool disable_device)
{
struct pci_dev *pdev = php_slot->pdev;
- int irq = php_slot->irq;
u16 ctrl;
if (php_slot->irq > 0) {
@@ -58,7 +57,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
php_slot->wq = NULL;
}
- if (disable_device || irq > 0) {
+ if (disable_device) {
if (pdev->msix_enabled)
pci_disable_msix(pdev);
else if (pdev->msi_enabled)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a0f961a380fa..53e9e9788bd5 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5718,10 +5718,12 @@ static void pci_bus_lock(struct pci_bus *bus)
{
struct pci_dev *dev;
+ pci_dev_lock(bus->self);
list_for_each_entry(dev, &bus->devices, bus_list) {
- pci_dev_lock(dev);
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
+ else
+ pci_dev_lock(dev);
}
}
@@ -5733,8 +5735,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
+ pci_dev_unlock(bus->self);
}
/* Return 1 on successful lock, 0 on contention */
@@ -5742,15 +5746,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
{
struct pci_dev *dev;
+ if (!pci_dev_trylock(bus->self))
+ return 0;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (!pci_dev_trylock(dev))
- goto unlock;
if (dev->subordinate) {
- if (!pci_bus_trylock(dev->subordinate)) {
- pci_dev_unlock(dev);
+ if (!pci_bus_trylock(dev->subordinate))
goto unlock;
- }
- }
+ } else if (!pci_dev_trylock(dev))
+ goto unlock;
}
return 1;
@@ -5758,8 +5762,10 @@ static int pci_bus_trylock(struct pci_bus *bus)
list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
+ pci_dev_unlock(bus->self);
return 0;
}
@@ -5791,9 +5797,10 @@ static void pci_slot_lock(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- pci_dev_lock(dev);
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
+ else
+ pci_dev_lock(dev);
}
}
@@ -5819,14 +5826,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- if (!pci_dev_trylock(dev))
- goto unlock;
if (dev->subordinate) {
if (!pci_bus_trylock(dev->subordinate)) {
pci_dev_unlock(dev);
goto unlock;
}
- }
+ } else if (!pci_dev_trylock(dev))
+ goto unlock;
}
return 1;
@@ -5837,7 +5843,8 @@ static int pci_slot_trylock(struct pci_slot *slot)
continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
return 0;
}
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 1365eaa20ff4..ff169124929c 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -638,11 +638,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
start = PCIBIOS_MIN_CARDBUS_IO;
end = ~0U;
} else {
- unsigned long avail = root->end - root->start;
+ unsigned long avail = resource_size(root);
int i;
size = BRIDGE_MEM_MAX;
- if (size > avail/8) {
- size = (avail+1)/8;
+ if (size > (avail - 1) / 8) {
+ size = avail / 8;
/* round size down to next power of 2 */
i = 0;
while ((size /= 2) != 0)
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index 8c8b1ca31e4c..c72b52955a86 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -846,6 +846,7 @@ static struct phy *xpsgtr_xlate(struct device *dev,
phy_type = args->args[1];
phy_instance = args->args[2];
+ guard(mutex)(>r_phy->phy->mutex);
ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
if (ret < 0) {
dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index 86b95206cb1b..6fb538a13868 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -590,7 +590,10 @@ static int __init dell_smbios_init(void)
return 0;
fail_sysfs:
- free_group(platform_device);
+ if (!wmi)
+ exit_dell_smbios_wmi();
+ if (!smm)
+ exit_dell_smbios_smm();
fail_create_group:
platform_device_del(platform_device);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index a5a31dfa4512..ee2da8e49d4c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -166,7 +166,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
unsigned long flags;
pm8001_ha = sas_phy->ha->lldd_ha;
phy = &pm8001_ha->phy[phy_id];
- pm8001_ha->phy[phy_id].enable_completion = &completion;
if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
/*
@@ -190,6 +189,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
rates->maximum_linkrate;
}
if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
+ pm8001_ha->phy[phy_id].enable_completion = &completion;
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -198,6 +198,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_HARD_RESET:
if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
+ pm8001_ha->phy[phy_id].enable_completion = &completion;
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -206,6 +207,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_LINK_RESET:
if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
+ pm8001_ha->phy[phy_id].enable_completion = &completion;
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 3c0f7dc9614d..f7e268cf9085 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -82,6 +82,10 @@
#define TCR_RXMSK BIT(19)
#define TCR_TXMSK BIT(18)
+struct fsl_lpspi_devtype_data {
+ u8 prescale_max;
+};
+
struct lpspi_config {
u8 bpw;
u8 chip_select;
@@ -119,10 +123,25 @@ struct fsl_lpspi_data {
bool usedma;
struct completion dma_rx_completion;
struct completion dma_tx_completion;
+
+ const struct fsl_lpspi_devtype_data *devtype_data;
+};
+
+/*
+ * ERR051608 fixed or not:
+ * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
+ */
+static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
+ .prescale_max = 1,
+};
+
+static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
+ .prescale_max = 7,
};
static const struct of_device_id fsl_lpspi_dt_ids[] = {
- { .compatible = "fsl,imx7ulp-spi", },
+ { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,},
+ { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
@@ -297,9 +316,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
{
struct lpspi_config config = fsl_lpspi->config;
unsigned int perclk_rate, scldiv, div;
+ u8 prescale_max;
u8 prescale;
perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
+ prescale_max = fsl_lpspi->devtype_data->prescale_max;
if (!config.speed_hz) {
dev_err(fsl_lpspi->dev,
@@ -315,7 +336,7 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
- for (prescale = 0; prescale < 8; prescale++) {
+ for (prescale = 0; prescale <= prescale_max; prescale++) {
scldiv = div / (1 << prescale) - 2;
if (scldiv < 256) {
fsl_lpspi->config.prescale = prescale;
@@ -822,6 +843,7 @@ static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
static int fsl_lpspi_probe(struct platform_device *pdev)
{
+ const struct fsl_lpspi_devtype_data *devtype_data;
struct fsl_lpspi_data *fsl_lpspi;
struct spi_controller *controller;
struct resource *res;
@@ -830,6 +852,10 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
u32 temp;
bool is_target;
+ devtype_data = of_device_get_match_data(&pdev->dev);
+ if (!devtype_data)
+ return -ENODEV;
+
is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
if (is_target)
controller = devm_spi_alloc_target(&pdev->dev,
@@ -848,6 +874,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi->is_target = is_target;
fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
"fsl,spi-only-use-cs1-sel");
+ fsl_lpspi->devtype_data = devtype_data;
init_completion(&fsl_lpspi->xfer_done);
diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
index 6910b4d4c427..16054695bdb0 100644
--- a/drivers/spi/spi-hisi-kunpeng.c
+++ b/drivers/spi/spi-hisi-kunpeng.c
@@ -481,6 +481,9 @@ static int hisi_spi_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (host->max_speed_hz == 0)
+ return dev_err_probe(dev, -EINVAL, "spi-max-frequency can't be 0\n");
+
ret = device_property_read_u16(dev, "num-cs",
&host->num_chipselect);
if (ret)
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 5b010094dace..1f374cf4d6f6 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -974,14 +974,16 @@ static int rockchip_spi_suspend(struct device *dev)
{
int ret;
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
ret = spi_controller_suspend(ctlr);
if (ret < 0)
return ret;
- clk_disable_unprepare(rs->spiclk);
- clk_disable_unprepare(rs->apb_pclk);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0) {
+ spi_controller_resume(ctlr);
+ return ret;
+ }
pinctrl_pm_select_sleep_state(dev);
@@ -992,25 +994,14 @@ static int rockchip_spi_resume(struct device *dev)
{
int ret;
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
pinctrl_pm_select_default_state(dev);
- ret = clk_prepare_enable(rs->apb_pclk);
+ ret = pm_runtime_force_resume(dev);
if (ret < 0)
return ret;
- ret = clk_prepare_enable(rs->spiclk);
- if (ret < 0)
- clk_disable_unprepare(rs->apb_pclk);
-
- ret = spi_controller_resume(ctlr);
- if (ret < 0) {
- clk_disable_unprepare(rs->spiclk);
- clk_disable_unprepare(rs->apb_pclk);
- }
-
- return 0;
+ return spi_controller_resume(ctlr);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 285df0e489a6..cfdfe66d74f1 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -114,7 +114,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
clk_freq = clk_get_rate(st->mclk);
- if (fout > (clk_freq / 2))
+ if (!clk_freq || fout > (clk_freq / 2))
return -EINVAL;
regval = ad9834_calc_freqreg(clk_freq, fout);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index ad0ef5b6b8cf..ed59d2367a4e 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -10130,7 +10130,8 @@ void ufshcd_remove(struct ufs_hba *hba)
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
- scsi_remove_host(hba->host);
+ if (hba->scsi_host_added)
+ scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
@@ -10408,6 +10409,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
dev_err(hba->dev, "scsi_add_host failed\n");
goto out_disable;
}
+ hba->scsi_host_added = true;
}
hba->tmf_tag_set = (struct blk_mq_tag_set) {
@@ -10489,7 +10491,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
free_tmf_tag_set:
blk_mq_free_tag_set(&hba->tmf_tag_set);
out_remove_scsi_host:
- scsi_remove_host(hba->host);
+ if (hba->scsi_host_added)
+ scsi_remove_host(hba->host);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 6be3462b109f..a2c7abf8c289 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -104,10 +104,11 @@ static void hv_uio_channel_cb(void *context)
/*
* Callback from vmbus_event when channel is rescinded.
+ * It is meant for rescind of primary channels only.
*/
static void hv_uio_rescind(struct vmbus_channel *channel)
{
- struct hv_device *hv_dev = channel->primary_channel->device_obj;
+ struct hv_device *hv_dev = channel->device_obj;
struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
/*
@@ -118,6 +119,14 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
/* Wake up reader */
uio_event_notify(&pdata->info);
+
+ /*
+ * With rescind callback registered, rescind path will not unregister the device
+ * from vmbus when the primary channel is rescinded.
+ * Without it, rescind handling is incomplete and next onoffer msg does not come.
+ * Unregister the device from vmbus here.
+ */
+ vmbus_device_unregister(channel->device_obj);
}
/* Sysfs API to allow mmap of the ring buffers
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 21ff6cbe5e49..9955091c5336 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1288,6 +1288,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
+ /*
+ * STAR 9001285599: This issue affects DWC_usb3 version 3.20a
+ * only. If the PM TIMER ECM is enabled through GUCTL2[19], the
+ * link compliance test (TD7.21) may fail. If the ECN is not
+ * enabled (GUCTL2[19] = 0), the controller will use the old timer
+ * value (5us), which is still acceptable for the link compliance
+ * test. Therefore, do not enable PM TIMER ECM in 3.20a by
+ * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0.
+ */
+ if (DWC3_VER_IS(DWC3, 320A)) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
+ reg &= ~DWC3_GUCTL2_LC_TIMER;
+ dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
+ }
+
/*
* When configured in HOST mode, after issuing U3/L2 exit controller
* fails to send proper CRC checksum in CRC5 feild. Because of this
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 07b062c2f647..db1a793a9b13 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -408,6 +408,7 @@
/* Global User Control Register 2 */
#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
+#define DWC3_GUCTL2_LC_TIMER BIT(19)
/* Global User Control Register 3 */
#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
@@ -1238,6 +1239,7 @@ struct dwc3 {
#define DWC3_REVISION_290A 0x5533290a
#define DWC3_REVISION_300A 0x5533300a
#define DWC3_REVISION_310A 0x5533310a
+#define DWC3_REVISION_320A 0x5533320a
#define DWC3_REVISION_330A 0x5533330a
#define DWC31_REVISION_ANY 0x0
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 579d90efc281..1a1da4a05701 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -287,6 +287,23 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async);
*
* Caller should handle locking. This function will issue @cmd with given
* @params to @dep and wait for its completion.
+ *
+ * According to the programming guide, if the link state is in L1/L2/U3,
+ * then sending the Start Transfer command may not complete. The
+ * programming guide suggested to bring the link state back to ON/U0 by
+ * performing remote wakeup prior to sending the command. However, don't
+ * initiate remote wakeup when the user/function does not send wakeup
+ * request via wakeup ops. Send the command when it's allowed.
+ *
+ * Notes:
+ * For L1 link state, issuing a command requires the clearing of
+ * GUSB2PHYCFG.SUSPENDUSB2, which turns on the signal required to complete
+ * the given command (usually within 50us). This should happen within the
+ * command timeout set by driver. No additional step is needed.
+ *
+ * For L2 or U3 link state, the gadget is in USB suspend. Care should be
+ * taken when sending Start Transfer command to ensure that it's done after
+ * USB resume.
*/
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
struct dwc3_gadget_ep_cmd_params *params)
@@ -327,30 +344,6 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
- if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
- int link_state;
-
- /*
- * Initiate remote wakeup if the link state is in U3 when
- * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
- * link state is in U1/U2, no remote wakeup is needed. The Start
- * Transfer command will initiate the link recovery.
- */
- link_state = dwc3_gadget_get_link_state(dwc);
- switch (link_state) {
- case DWC3_LINK_STATE_U2:
- if (dwc->gadget->speed >= USB_SPEED_SUPER)
- break;
-
- fallthrough;
- case DWC3_LINK_STATE_U3:
- ret = __dwc3_gadget_wakeup(dwc, false);
- dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
- ret);
- break;
- }
- }
-
/*
* For some commands such as Update Transfer command, DEPCMDPARn
* registers are reserved. Since the driver often sends Update Transfer
diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
index fc2ead0fe621..4868286574a1 100644
--- a/drivers/usb/gadget/udc/aspeed_udc.c
+++ b/drivers/usb/gadget/udc/aspeed_udc.c
@@ -1009,6 +1009,8 @@ static void ast_udc_getstatus(struct ast_udc_dev *udc)
break;
case USB_RECIP_ENDPOINT:
epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum >= AST_UDC_NUM_ENDPOINTS)
+ goto stall;
status = udc->ep[epnum].stopped;
break;
default:
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
index 0eed0e03842c..d394affb7072 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
@@ -2251,7 +2251,6 @@ static int cdns2_gadget_start(struct cdns2_device *pdev)
{
u32 max_speed;
void *buf;
- int val;
int ret;
pdev->usb_regs = pdev->regs;
@@ -2261,14 +2260,9 @@ static int cdns2_gadget_start(struct cdns2_device *pdev)
pdev->adma_regs = pdev->regs + CDNS2_ADMA_REGS_OFFSET;
/* Reset controller. */
- set_reg_bit_8(&pdev->usb_regs->cpuctrl, CPUCTRL_SW_RST);
-
- ret = readl_poll_timeout_atomic(&pdev->usb_regs->cpuctrl, val,
- !(val & CPUCTRL_SW_RST), 1, 10000);
- if (ret) {
- dev_err(pdev->dev, "Error: reset controller timeout\n");
- return -EINVAL;
- }
+ writeb(CPUCTRL_SW_RST | CPUCTRL_UPCLK | CPUCTRL_WUEN,
+ &pdev->usb_regs->cpuctrl);
+ usleep_range(5, 10);
usb_initialize_gadget(pdev->dev, &pdev->gadget, NULL);
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
index 71e2f62d653a..b5d5ec12e986 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
@@ -292,8 +292,17 @@ struct cdns2_usb_regs {
#define SPEEDCTRL_HSDISABLE BIT(7)
/* CPUCTRL- bitmasks. */
+/* UP clock enable */
+#define CPUCTRL_UPCLK BIT(0)
/* Controller reset bit. */
#define CPUCTRL_SW_RST BIT(1)
+/**
+ * If the wuen bit is ‘1’, the upclken is automatically set to ‘1’ after
+ * detecting rising edge of wuintereq interrupt. If the wuen bit is ‘0’,
+ * the wuintereq interrupt is ignored.
+ */
+#define CPUCTRL_WUEN BIT(7)
+
/**
* struct cdns2_adma_regs - ADMA controller registers.
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 451d9569163a..f794cb39cc31 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -422,6 +422,7 @@ static void uas_data_cmplt(struct urb *urb)
uas_log_cmd_state(cmnd, "data cmplt err", status);
/* error: no data transfered */
scsi_set_resid(cmnd, sdb->length);
+ set_host_byte(cmnd, DID_ERROR);
} else {
scsi_set_resid(cmnd, sdb->length - urb->actual_length);
}
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index a94ec6225d31..5f9e7e477078 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -364,7 +364,6 @@ static void tce_iommu_release(void *iommu_data)
if (!tbl)
continue;
- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
tce_iommu_free_table(container, tbl);
}
@@ -720,6 +719,8 @@ static long tce_iommu_remove_window(struct tce_container *container,
BUG_ON(!tbl->it_size);
+ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
+
/* Detach groups from IOMMUs */
list_for_each_entry(tcegrp, &container->group_list, next) {
table_group = iommu_group_get_iommudata(tcegrp->grp);
@@ -738,7 +739,6 @@ static long tce_iommu_remove_window(struct tce_container *container,
}
/* Free table */
- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
tce_iommu_free_table(container, tbl);
container->tables[num] = NULL;
@@ -1197,9 +1197,14 @@ static void tce_iommu_release_ownership(struct tce_container *container,
return;
}
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
- if (container->tables[i])
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ if (container->tables[i]) {
+ tce_iommu_clear(container, container->tables[i],
+ container->tables[i]->it_offset,
+ container->tables[i]->it_size);
table_group->ops->unset_window(table_group, i);
+ }
+ }
}
static long tce_iommu_take_ownership(struct tce_container *container,
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 6f7e5010a673..80669e05bf0e 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -3126,8 +3126,10 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
{
struct vring_virtqueue *vq = to_vvq(_vq);
- if (!vq->use_dma_api)
+ if (!vq->use_dma_api) {
+ kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir);
return (dma_addr_t)virt_to_phys(ptr);
+ }
return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 923f064c7e3e..61aaded483e1 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -17,6 +17,7 @@
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/srcu.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
@@ -842,6 +843,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
/* Irqfd support */
static struct workqueue_struct *irqfd_cleanup_wq;
static DEFINE_SPINLOCK(irqfds_lock);
+DEFINE_STATIC_SRCU(irqfds_srcu);
static LIST_HEAD(irqfds_list);
struct privcmd_kernel_irqfd {
@@ -869,6 +871,9 @@ static void irqfd_shutdown(struct work_struct *work)
container_of(work, struct privcmd_kernel_irqfd, shutdown);
u64 cnt;
+ /* Make sure irqfd has been initialized in assign path */
+ synchronize_srcu(&irqfds_srcu);
+
eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
eventfd_ctx_put(kirqfd->eventfd);
kfree(kirqfd);
@@ -931,7 +936,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
__poll_t events;
struct fd f;
void *dm_op;
- int ret;
+ int ret, idx;
kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
if (!kirqfd)
@@ -977,6 +982,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
}
}
+ idx = srcu_read_lock(&irqfds_srcu);
list_add_tail(&kirqfd->list, &irqfds_list);
spin_unlock_irqrestore(&irqfds_lock, flags);
@@ -988,6 +994,8 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
if (events & EPOLLIN)
irqfd_inject(kirqfd);
+ srcu_read_unlock(&irqfds_srcu, idx);
+
/*
* Do not drop the file until the kirqfd is fully initialized, otherwise
* we might race against the EPOLLHUP.
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7b3d2d491407..fb2c8d14327a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1008,7 +1008,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
if (elf_read_implies_exec(*elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space);
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space)
current->flags |= PF_RANDOMIZE;
setup_new_exec(bprm);
@@ -1300,7 +1301,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
mm->end_data = end_data;
mm->start_stack = bprm->p;
- if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
+ if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) {
/*
* For architectures with ELF randomization, when executing
* a loader directly (i.e. no interpreter listed in ELF
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 118ad4d2cbbe..2eb4e03080ac 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -451,8 +451,16 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
}
owner = btrfs_header_owner(buf);
- BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
- !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+ if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID &&
+ !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) {
+ btrfs_crit(fs_info,
+"found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set",
+ buf->start, btrfs_header_level(buf),
+ btrfs_root_id(root), refs, flags);
+ ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
if (refs > 1) {
if ((owner == root->root_key.objectid ||
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 86c7f8ce1715..06333a74d6c4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -445,7 +445,6 @@ struct btrfs_file_private {
void *filldir_buf;
u64 last_index;
struct extent_state *llseek_cached_state;
- bool fsync_skip_inode_lock;
};
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c6ecfd05e1db..72851adc1fee 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5085,7 +5085,15 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
/* We don't care about errors in readahead. */
if (ret < 0)
continue;
- BUG_ON(refs == 0);
+
+ /*
+ * This could be racey, it's conceivable that we raced and end
+ * up with a bogus refs count, if that's the case just skip, if
+ * we are actually corrupt we will notice when we look up
+ * everything again with our locks.
+ */
+ if (refs == 0)
+ continue;
if (wc->stage == DROP_REFERENCE) {
if (refs == 1)
@@ -5144,7 +5152,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
if (lookup_info &&
((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
(wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
- BUG_ON(!path->locks[level]);
+ ASSERT(path->locks[level]);
ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
&wc->refs[level],
@@ -5152,7 +5160,11 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
BUG_ON(ret == -ENOMEM);
if (ret)
return ret;
- BUG_ON(wc->refs[level] == 0);
+ if (unlikely(wc->refs[level] == 0)) {
+ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
+ eb->start);
+ return -EUCLEAN;
+ }
}
if (wc->stage == DROP_REFERENCE) {
@@ -5168,7 +5180,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
/* wc->stage == UPDATE_BACKREF */
if (!(wc->flags[level] & flag)) {
- BUG_ON(!path->locks[level]);
+ ASSERT(path->locks[level]);
ret = btrfs_inc_ref(trans, root, eb, 1);
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_dec_ref(trans, root, eb, 0);
@@ -5286,8 +5298,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
goto out_unlock;
if (unlikely(wc->refs[level - 1] == 0)) {
- btrfs_err(fs_info, "Missing references.");
- ret = -EIO;
+ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
+ bytenr);
+ ret = -EUCLEAN;
goto out_unlock;
}
*lookup_info = 0;
@@ -5487,7 +5500,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
path->locks[level] = 0;
return ret;
}
- BUG_ON(wc->refs[level] == 0);
+ if (unlikely(wc->refs[level] == 0)) {
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
+ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
+ eb->start);
+ return -EUCLEAN;
+ }
if (wc->refs[level] == 1) {
btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 952cf145c629..15fd8c00f4c0 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1543,13 +1543,6 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
if (IS_ERR_OR_NULL(dio)) {
err = PTR_ERR_OR_ZERO(dio);
} else {
- struct btrfs_file_private stack_private = { 0 };
- struct btrfs_file_private *private;
- const bool have_private = (file->private_data != NULL);
-
- if (!have_private)
- file->private_data = &stack_private;
-
/*
* If we have a synchoronous write, we must make sure the fsync
* triggered by the iomap_dio_complete() call below doesn't
@@ -1558,13 +1551,10 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
* partial writes due to the input buffer (or parts of it) not
* being already faulted in.
*/
- private = file->private_data;
- private->fsync_skip_inode_lock = true;
+ ASSERT(current->journal_info == NULL);
+ current->journal_info = BTRFS_TRANS_DIO_WRITE_STUB;
err = iomap_dio_complete(dio);
- private->fsync_skip_inode_lock = false;
-
- if (!have_private)
- file->private_data = NULL;
+ current->journal_info = NULL;
}
/* No increment (+=) because iomap returns a cumulative value. */
@@ -1796,7 +1786,6 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
*/
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct btrfs_file_private *private = file->private_data;
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -1806,7 +1795,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
int ret = 0, err;
u64 len;
bool full_sync;
- const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false);
+ bool skip_ilock = false;
+
+ if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) {
+ skip_ilock = true;
+ current->journal_info = NULL;
+ lockdep_assert_held(&inode->i_rwsem);
+ }
trace_btrfs_sync_file(file, datasync);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 18ce5353092d..a42238211887 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5668,7 +5668,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
- struct btrfs_key location;
+ struct btrfs_key location = { 0 };
u8 di_type = 0;
int ret = 0;
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 238a0ab85df9..7623db359881 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -12,6 +12,12 @@
#include "ctree.h"
#include "misc.h"
+/*
+ * Signal that a direct IO write is in progress, to avoid deadlock for sync
+ * direct IO writes when fsync is called during the direct IO write path.
+ */
+#define BTRFS_TRANS_DIO_WRITE_STUB ((void *) 1)
+
/* Radix-tree tag for roots that are part of the trasaction. */
#define BTRFS_ROOT_TRANS_TAG 0
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 5d473e50598f..f32a91d7c05d 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -353,7 +353,7 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl
read_unlock(&sbi->s_journal->j_state_lock);
}
spin_lock(&sbi->s_fc_lock);
- if (sbi->s_fc_ineligible_tid < tid)
+ if (tid_gt(tid, sbi->s_fc_ineligible_tid))
sbi->s_fc_ineligible_tid = tid;
spin_unlock(&sbi->s_fc_lock);
WARN_ON(reason >= EXT4_FC_REASON_MAX);
@@ -1213,7 +1213,7 @@ int ext4_fc_commit(journal_t *journal, tid_t commit_tid)
if (ret == -EALREADY) {
/* There was an ongoing commit, check if we need to restart */
if (atomic_read(&sbi->s_fc_subtid) <= subtid &&
- commit_tid > journal->j_commit_sequence)
+ tid_gt(commit_tid, journal->j_commit_sequence))
goto restart_fc;
ext4_fc_update_stats(sb, EXT4_FC_STATUS_SKIPPED, 0, 0,
commit_tid);
@@ -1288,7 +1288,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
list_del_init(&iter->i_fc_list);
ext4_clear_inode_state(&iter->vfs_inode,
EXT4_STATE_FC_COMMITTING);
- if (iter->i_sync_tid <= tid)
+ if (tid_geq(tid, iter->i_sync_tid))
ext4_fc_reset_inode(&iter->vfs_inode);
/* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
smp_mb();
@@ -1319,7 +1319,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
&sbi->s_fc_q[FC_Q_MAIN]);
- if (tid >= sbi->s_fc_ineligible_tid) {
+ if (tid_geq(tid, sbi->s_fc_ineligible_tid)) {
sbi->s_fc_ineligible_tid = 0;
ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
}
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index dad85fd84f6f..7a60cd96e87e 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -114,6 +114,7 @@ static void __exit fscache_exit(void)
kmem_cache_destroy(fscache_cookie_jar);
fscache_proc_cleanup();
+ timer_shutdown_sync(&fscache_cookie_lru_timer);
destroy_workqueue(fscache_wq);
pr_notice("Unloaded\n");
}
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c1703d2c4bf9..95f9913a3537 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -668,7 +668,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
err = get_create_ext(&args, dir, entry, mode);
if (err)
- goto out_put_forget_req;
+ goto out_free_ff;
err = fuse_simple_request(fm, &args);
free_ext_value(&args);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index cc9651a01351..ceb9f7d23038 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1735,10 +1735,16 @@ __acquires(fi->lock)
fuse_writepage_finish(fm, wpa);
spin_unlock(&fi->lock);
- /* After fuse_writepage_finish() aux request list is private */
+ /* After rb_erase() aux request list is private */
for (aux = wpa->next; aux; aux = next) {
+ struct backing_dev_info *bdi = inode_to_bdi(aux->inode);
+
next = aux->next;
aux->next = NULL;
+
+ dec_wb_stat(&bdi->wb, WB_WRITEBACK);
+ dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP);
+ wb_writeout_inc(&bdi->wb);
fuse_writepage_free(aux);
}
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
index 49c01559580f..690b9aadceaa 100644
--- a/fs/fuse/xattr.c
+++ b/fs/fuse/xattr.c
@@ -81,7 +81,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
}
ret = fuse_simple_request(fm, &args);
if (!ret && !size)
- ret = min_t(ssize_t, outarg.size, XATTR_SIZE_MAX);
+ ret = min_t(size_t, outarg.size, XATTR_SIZE_MAX);
if (ret == -ENOSYS) {
fm->fc->no_getxattr = 1;
ret = -EOPNOTSUPP;
@@ -143,7 +143,7 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
}
ret = fuse_simple_request(fm, &args);
if (!ret && !size)
- ret = min_t(ssize_t, outarg.size, XATTR_LIST_MAX);
+ ret = min_t(size_t, outarg.size, XATTR_LIST_MAX);
if (ret > 0 && size)
ret = fuse_verify_xattr_list(list, ret);
if (ret == -ENOSYS) {
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 5b771a3d8d9a..421c0d360836 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -448,6 +448,27 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
return provided == cpu_to_be32(calculated);
}
+static bool jbd2_commit_block_csum_verify_partial(journal_t *j, void *buf)
+{
+ struct commit_header *h;
+ __be32 provided;
+ __u32 calculated;
+ void *tmpbuf;
+
+ tmpbuf = kzalloc(j->j_blocksize, GFP_KERNEL);
+ if (!tmpbuf)
+ return false;
+
+ memcpy(tmpbuf, buf, sizeof(struct commit_header));
+ h = tmpbuf;
+ provided = h->h_chksum[0];
+ h->h_chksum[0] = 0;
+ calculated = jbd2_chksum(j, j->j_csum_seed, tmpbuf, j->j_blocksize);
+ kfree(tmpbuf);
+
+ return provided == cpu_to_be32(calculated);
+}
+
static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
journal_block_tag3_t *tag3,
void *buf, __u32 sequence)
@@ -814,6 +835,13 @@ static int do_one_pass(journal_t *journal,
if (pass == PASS_SCAN &&
!jbd2_commit_block_csum_verify(journal,
bh->b_data)) {
+ if (jbd2_commit_block_csum_verify_partial(
+ journal,
+ bh->b_data)) {
+ pr_notice("JBD2: Find incomplete commit block in transaction %u block %lu\n",
+ next_commit_ID, next_log_block);
+ goto chksum_ok;
+ }
chksum_error:
if (commit_time < last_trans_commit_time)
goto ignore_crc_mismatch;
@@ -828,6 +856,7 @@ static int do_one_pass(journal_t *journal,
}
}
if (pass == PASS_SCAN) {
+ chksum_ok:
last_trans_commit_time = commit_time;
head_block = next_log_block;
}
@@ -847,6 +876,7 @@ static int do_one_pass(journal_t *journal,
next_log_block);
need_check_commit_time = true;
}
+
/* If we aren't in the REVOKE pass, then we can
* just skip over this block. */
if (pass != PASS_REVOKE) {
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 0d6473cb00cb..f63513e477c5 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -47,6 +47,7 @@
#include <linux/vfs.h>
#include <linux/inet.h>
#include <linux/in6.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <net/ipv6.h>
#include <linux/netdevice.h>
@@ -223,6 +224,7 @@ static int __nfs_list_for_each_server(struct list_head *head,
ret = fn(server, data);
if (ret)
goto out;
+ cond_resched();
rcu_read_lock();
}
rcu_read_unlock();
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index a9b8d77c8c1d..ce30b51ac593 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -708,6 +708,33 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
brelse(bh);
}
+/**
+ * nilfs_abort_roll_forward - cleaning up after a failed rollforward recovery
+ * @nilfs: nilfs object
+ */
+static void nilfs_abort_roll_forward(struct the_nilfs *nilfs)
+{
+ struct nilfs_inode_info *ii, *n;
+ LIST_HEAD(head);
+
+ /* Abandon inodes that have read recovery data */
+ spin_lock(&nilfs->ns_inode_lock);
+ list_splice_init(&nilfs->ns_dirty_files, &head);
+ spin_unlock(&nilfs->ns_inode_lock);
+ if (list_empty(&head))
+ return;
+
+ set_nilfs_purging(nilfs);
+ list_for_each_entry_safe(ii, n, &head, i_dirty) {
+ spin_lock(&nilfs->ns_inode_lock);
+ list_del_init(&ii->i_dirty);
+ spin_unlock(&nilfs->ns_inode_lock);
+
+ iput(&ii->vfs_inode);
+ }
+ clear_nilfs_purging(nilfs);
+}
+
/**
* nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
* @nilfs: nilfs object
@@ -766,15 +793,19 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
if (unlikely(err)) {
nilfs_err(sb, "error %d writing segment for recovery",
err);
- goto failed;
+ goto put_root;
}
nilfs_finish_roll_forward(nilfs, ri);
}
- failed:
+put_root:
nilfs_put_root(root);
return err;
+
+failed:
+ nilfs_abort_roll_forward(nilfs);
+ goto put_root;
}
/**
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index e10f8a777ab0..0610cb12c11c 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1835,6 +1835,9 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
nilfs_abort_logs(&logs, ret ? : err);
list_splice_tail_init(&sci->sc_segbufs, &logs);
+ if (list_empty(&logs))
+ return; /* if the first segment buffer preparation failed */
+
nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
nilfs_free_incomplete_logs(&logs, nilfs);
@@ -2079,7 +2082,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
err = nilfs_segctor_begin_construction(sci, nilfs);
if (unlikely(err))
- goto out;
+ goto failed;
/* Update time stamp */
sci->sc_seg_ctime = ktime_get_real_seconds();
@@ -2142,10 +2145,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
return err;
failed_to_write:
- if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
- nilfs_redirty_inodes(&sci->sc_dirty_files);
-
failed:
+ if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE)
+ nilfs_redirty_inodes(&sci->sc_dirty_files);
if (nilfs_doing_gc())
nilfs_redirty_inodes(&sci->sc_gc_inodes);
nilfs_segctor_abort_construction(sci, nilfs, err);
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 379d22e28ed6..905c7eadf967 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -836,9 +836,15 @@ ssize_t nilfs_dev_revision_show(struct nilfs_dev_attr *attr,
struct the_nilfs *nilfs,
char *buf)
{
- struct nilfs_super_block **sbp = nilfs->ns_sbp;
- u32 major = le32_to_cpu(sbp[0]->s_rev_level);
- u16 minor = le16_to_cpu(sbp[0]->s_minor_rev_level);
+ struct nilfs_super_block *raw_sb;
+ u32 major;
+ u16 minor;
+
+ down_read(&nilfs->ns_sem);
+ raw_sb = nilfs->ns_sbp[0];
+ major = le32_to_cpu(raw_sb->s_rev_level);
+ minor = le16_to_cpu(raw_sb->s_minor_rev_level);
+ up_read(&nilfs->ns_sem);
return sysfs_emit(buf, "%d.%d\n", major, minor);
}
@@ -856,8 +862,13 @@ ssize_t nilfs_dev_device_size_show(struct nilfs_dev_attr *attr,
struct the_nilfs *nilfs,
char *buf)
{
- struct nilfs_super_block **sbp = nilfs->ns_sbp;
- u64 dev_size = le64_to_cpu(sbp[0]->s_dev_size);
+ struct nilfs_super_block *raw_sb;
+ u64 dev_size;
+
+ down_read(&nilfs->ns_sem);
+ raw_sb = nilfs->ns_sbp[0];
+ dev_size = le64_to_cpu(raw_sb->s_dev_size);
+ up_read(&nilfs->ns_sem);
return sysfs_emit(buf, "%llu\n", dev_size);
}
@@ -879,9 +890,15 @@ ssize_t nilfs_dev_uuid_show(struct nilfs_dev_attr *attr,
struct the_nilfs *nilfs,
char *buf)
{
- struct nilfs_super_block **sbp = nilfs->ns_sbp;
+ struct nilfs_super_block *raw_sb;
+ ssize_t len;
- return sysfs_emit(buf, "%pUb\n", sbp[0]->s_uuid);
+ down_read(&nilfs->ns_sem);
+ raw_sb = nilfs->ns_sbp[0];
+ len = sysfs_emit(buf, "%pUb\n", raw_sb->s_uuid);
+ up_read(&nilfs->ns_sem);
+
+ return len;
}
static
@@ -889,10 +906,16 @@ ssize_t nilfs_dev_volume_name_show(struct nilfs_dev_attr *attr,
struct the_nilfs *nilfs,
char *buf)
{
- struct nilfs_super_block **sbp = nilfs->ns_sbp;
+ struct nilfs_super_block *raw_sb;
+ ssize_t len;
+
+ down_read(&nilfs->ns_sem);
+ raw_sb = nilfs->ns_sbp[0];
+ len = scnprintf(buf, sizeof(raw_sb->s_volume_name), "%s\n",
+ raw_sb->s_volume_name);
+ up_read(&nilfs->ns_sem);
- return scnprintf(buf, sizeof(sbp[0]->s_volume_name), "%s\n",
- sbp[0]->s_volume_name);
+ return len;
}
static const char dev_readme_str[] =
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index 9d0a09f00b38..e1b856ecce61 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -272,9 +272,12 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode;
}
-static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
- const struct NTFS_DE *e, u8 *name,
- struct dir_context *ctx)
+/*
+ * returns false if 'ctx' if full
+ */
+static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi,
+ struct ntfs_inode *ni, const struct NTFS_DE *e,
+ u8 *name, struct dir_context *ctx)
{
const struct ATTR_FILE_NAME *fname;
unsigned long ino;
@@ -284,29 +287,29 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
fname = Add2Ptr(e, sizeof(struct NTFS_DE));
if (fname->type == FILE_NAME_DOS)
- return 0;
+ return true;
if (!mi_is_ref(&ni->mi, &fname->home))
- return 0;
+ return true;
ino = ino_get(&e->ref);
if (ino == MFT_REC_ROOT)
- return 0;
+ return true;
/* Skip meta files. Unless option to show metafiles is set. */
if (!sbi->options->showmeta && ntfs_is_meta_file(sbi, ino))
- return 0;
+ return true;
if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
- return 0;
+ return true;
name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name,
PATH_MAX);
if (name_len <= 0) {
ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
ino);
- return 0;
+ return true;
}
/*
@@ -336,17 +339,20 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
}
}
- return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
+ return dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
}
/*
* ntfs_read_hdr - Helper function for ntfs_readdir().
+ *
+ * returns 0 if ok.
+ * returns -EINVAL if directory is corrupted.
+ * returns +1 if 'ctx' is full.
*/
static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
const struct INDEX_HDR *hdr, u64 vbo, u64 pos,
u8 *name, struct dir_context *ctx)
{
- int err;
const struct NTFS_DE *e;
u32 e_size;
u32 end = le32_to_cpu(hdr->used);
@@ -354,12 +360,12 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
for (;; off += e_size) {
if (off + sizeof(struct NTFS_DE) > end)
- return -1;
+ return -EINVAL;
e = Add2Ptr(hdr, off);
e_size = le16_to_cpu(e->size);
if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
- return -1;
+ return -EINVAL;
if (de_is_last(e))
return 0;
@@ -369,14 +375,15 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
continue;
if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME)
- return -1;
+ return -EINVAL;
ctx->pos = vbo + off;
/* Submit the name to the filldir callback. */
- err = ntfs_filldir(sbi, ni, e, name, ctx);
- if (err)
- return err;
+ if (!ntfs_dir_emit(sbi, ni, e, name, ctx)) {
+ /* ctx is full. */
+ return +1;
+ }
}
}
@@ -475,8 +482,6 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
vbo = (u64)bit << index_bits;
if (vbo >= i_size) {
- ntfs_inode_err(dir, "Looks like your dir is corrupt");
- ctx->pos = eod;
err = -EINVAL;
goto out;
}
@@ -499,9 +504,16 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
__putname(name);
put_indx_node(node);
- if (err == -ENOENT) {
+ if (err == 1) {
+ /* 'ctx' is full. */
+ err = 0;
+ } else if (err == -ENOENT) {
err = 0;
ctx->pos = pos;
+ } else if (err < 0) {
+ if (err == -EINVAL)
+ ntfs_inode_err(dir, "directory corrupted");
+ ctx->pos = eod;
}
return err;
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 45b687aff700..f7c381730b39 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -1601,8 +1601,10 @@ int ni_delete_all(struct ntfs_inode *ni)
asize = le32_to_cpu(attr->size);
roff = le16_to_cpu(attr->nres.run_off);
- if (roff > asize)
+ if (roff > asize) {
+ _ntfs_bad_inode(&ni->vfs_inode);
return -EINVAL;
+ }
/* run==1 means unpack and deallocate. */
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 15cbfec4c28c..dd8acd207752 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -1106,6 +1106,8 @@ int smb2_rename_path(const unsigned int xid,
co, DELETE, SMB2_OP_RENAME, cfile, source_dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease");
+ cifs_get_writable_path(tcon, from_name,
+ FIND_WR_WITH_DELETE, &cfile);
rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
co, DELETE, SMB2_OP_RENAME, cfile, NULL);
}
@@ -1149,6 +1151,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
cfile, NULL, NULL, dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease");
+ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb,
full_path, &oparms, &in_iov,
&(int){SMB2_OP_SET_EOF}, 1,
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 012d6ec12a69..acd5d7d79352 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -3186,13 +3186,15 @@ static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
}
static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
- loff_t offset, loff_t len, bool keep_size)
+ unsigned long long offset, unsigned long long len,
+ bool keep_size)
{
struct cifs_ses *ses = tcon->ses;
struct inode *inode = file_inode(file);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifsFileInfo *cfile = file->private_data;
- unsigned long long new_size;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ unsigned long long i_size, new_size, remote_size;
long rc;
unsigned int xid;
@@ -3204,6 +3206,16 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
+ i_size = i_size_read(inode);
+ remote_size = ictx->remote_i_size;
+ if (offset + len >= remote_size && offset < i_size) {
+ unsigned long long top = umin(offset + len, i_size);
+
+ rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1);
+ if (rc < 0)
+ goto zero_range_exit;
+ }
+
/*
* We zero the range through ioctl, so we need remove the page caches
* first, otherwise the data may be inconsistent with the server.
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index a8f52c4ebbda..e546ffa57b55 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -1510,7 +1510,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
* parse_lease_state() - parse lease context containted in file open request
* @open_req: buffer containing smb2 file open(create) request
*
- * Return: oplock state, -ENOENT if create lease context not found
+ * Return: allocated lease context object on success, otherwise NULL
*/
struct lease_ctx_info *parse_lease_state(void *open_req)
{
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index da57adc2bd68..458cc736286a 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1687,6 +1687,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
rc = ksmbd_session_register(conn, sess);
if (rc)
goto out_err;
+
+ conn->binding = false;
} else if (conn->dialect >= SMB30_PROT_ID &&
(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) {
@@ -1765,6 +1767,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
sess = NULL;
goto out_err;
}
+
+ conn->binding = false;
}
work->sess = sess;
@@ -2767,8 +2771,8 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
}
}
- if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
- req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) {
+ if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
+ req_op_level == SMB2_OPLOCK_LEVEL_BATCH) {
dh_info->CreateGuid =
durable_v2_blob->CreateGuid;
dh_info->persistent =
@@ -2788,8 +2792,8 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
goto out;
}
- if (((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
- req_op_level == SMB2_OPLOCK_LEVEL_BATCH)) {
+ if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
+ req_op_level == SMB2_OPLOCK_LEVEL_BATCH) {
ksmbd_debug(SMB, "Request for durable open\n");
dh_info->type = dh_idx;
}
@@ -3411,7 +3415,7 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
} else {
- if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
+ if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && lc) {
if (S_ISDIR(file_inode(filp)->i_mode)) {
lc->req_state &= ~SMB2_LEASE_WRITE_CACHING_LE;
lc->is_dir = true;
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index 6633fa78e9b9..2ce7f75059cb 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -624,8 +624,10 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
for_each_netdev(&init_net, netdev) {
if (netif_is_bridge_port(netdev))
continue;
- if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL)))
+ if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) {
+ rtnl_unlock();
return -ENOMEM;
+ }
}
rtnl_unlock();
bind_additional_ifaces = 1;
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 16bd693d0b3a..d5918eba27e3 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -279,8 +279,13 @@ int squashfs_read_inode(struct inode *inode, long long ino)
if (err < 0)
goto failed_read;
- set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
+ if (inode->i_size > PAGE_SIZE) {
+ ERROR("Corrupted symlink\n");
+ return -EINVAL;
+ }
+
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_op = &squashfs_symlink_inode_ops;
inode_nohighmem(inode);
inode->i_data.a_ops = &squashfs_symlink_aops;
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index aa54be1ce124..73a08db3b7a8 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -935,7 +935,7 @@ static void eventfs_remove_rec(struct eventfs_inode *ei, int level)
list_for_each_entry(ei_child, &ei->children, list)
eventfs_remove_rec(ei_child, level + 1);
- list_del(&ei->list);
+ list_del_rcu(&ei->list);
free_ei(ei);
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index e0080fda2526..3c78535f406b 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1080,12 +1080,19 @@ static int udf_fill_partdesc_info(struct super_block *sb,
struct udf_part_map *map;
struct udf_sb_info *sbi = UDF_SB(sb);
struct partitionHeaderDesc *phd;
+ u32 sum;
int err;
map = &sbi->s_partmaps[p_index];
map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
+ if (check_add_overflow(map->s_partition_root, map->s_partition_len,
+ &sum)) {
+ udf_err(sb, "Partition %d has invalid location %u + %u\n",
+ p_index, map->s_partition_root, map->s_partition_len);
+ return -EFSCORRUPTED;
+ }
if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
@@ -1141,6 +1148,14 @@ static int udf_fill_partdesc_info(struct super_block *sb,
bitmap->s_extPosition = le32_to_cpu(
phd->unallocSpaceBitmap.extPosition);
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
+ /* Check whether math over bitmap won't overflow. */
+ if (check_add_overflow(map->s_partition_len,
+ sizeof(struct spaceBitmapDesc) << 3,
+ &sum)) {
+ udf_err(sb, "Partition %d is too long (%u)\n", p_index,
+ map->s_partition_len);
+ return -EFSCORRUPTED;
+ }
udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
p_index, bitmap->s_extPosition);
}
diff --git a/fs/xattr.c b/fs/xattr.c
index efd4736bc94b..c20046548f21 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -631,10 +631,9 @@ int do_setxattr(struct mnt_idmap *idmap, struct dentry *dentry,
ctx->kvalue, ctx->size, ctx->flags);
}
-static long
-setxattr(struct mnt_idmap *idmap, struct dentry *d,
- const char __user *name, const void __user *value, size_t size,
- int flags)
+static int path_setxattr(const char __user *pathname,
+ const char __user *name, const void __user *value,
+ size_t size, int flags, unsigned int lookup_flags)
{
struct xattr_name kname;
struct xattr_ctx ctx = {
@@ -644,33 +643,20 @@ setxattr(struct mnt_idmap *idmap, struct dentry *d,
.kname = &kname,
.flags = flags,
};
+ struct path path;
int error;
error = setxattr_copy(name, &ctx);
if (error)
return error;
- error = do_setxattr(idmap, d, &ctx);
-
- kvfree(ctx.kvalue);
- return error;
-}
-
-static int path_setxattr(const char __user *pathname,
- const char __user *name, const void __user *value,
- size_t size, int flags, unsigned int lookup_flags)
-{
- struct path path;
- int error;
-
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
- return error;
+ goto out;
error = mnt_want_write(path.mnt);
if (!error) {
- error = setxattr(mnt_idmap(path.mnt), path.dentry, name,
- value, size, flags);
+ error = do_setxattr(mnt_idmap(path.mnt), path.dentry, &ctx);
mnt_drop_write(path.mnt);
}
path_put(&path);
@@ -678,6 +664,9 @@ static int path_setxattr(const char __user *pathname,
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
+
+out:
+ kvfree(ctx.kvalue);
return error;
}
@@ -698,20 +687,32 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
const void __user *,value, size_t, size, int, flags)
{
- struct fd f = fdget(fd);
- int error = -EBADF;
+ struct xattr_name kname;
+ struct xattr_ctx ctx = {
+ .cvalue = value,
+ .kvalue = NULL,
+ .size = size,
+ .kname = &kname,
+ .flags = flags,
+ };
+ int error;
+ CLASS(fd, f)(fd);
if (!f.file)
- return error;
+ return -EBADF;
+
audit_file(f.file);
+ error = setxattr_copy(name, &ctx);
+ if (error)
+ return error;
+
error = mnt_want_write_file(f.file);
if (!error) {
- error = setxattr(file_mnt_idmap(f.file),
- f.file->f_path.dentry, name,
- value, size, flags);
+ error = do_setxattr(file_mnt_idmap(f.file),
+ f.file->f_path.dentry, &ctx);
mnt_drop_write_file(f.file);
}
- fdput(f);
+ kvfree(ctx.kvalue);
return error;
}
@@ -900,9 +901,17 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
* Extended attribute REMOVE operations
*/
static long
-removexattr(struct mnt_idmap *idmap, struct dentry *d,
- const char __user *name)
+removexattr(struct mnt_idmap *idmap, struct dentry *d, const char *name)
{
+ if (is_posix_acl_xattr(name))
+ return vfs_remove_acl(idmap, d, name);
+ return vfs_removexattr(idmap, d, name);
+}
+
+static int path_removexattr(const char __user *pathname,
+ const char __user *name, unsigned int lookup_flags)
+{
+ struct path path;
int error;
char kname[XATTR_NAME_MAX + 1];
@@ -911,25 +920,13 @@ removexattr(struct mnt_idmap *idmap, struct dentry *d,
error = -ERANGE;
if (error < 0)
return error;
-
- if (is_posix_acl_xattr(kname))
- return vfs_remove_acl(idmap, d, kname);
-
- return vfs_removexattr(idmap, d, kname);
-}
-
-static int path_removexattr(const char __user *pathname,
- const char __user *name, unsigned int lookup_flags)
-{
- struct path path;
- int error;
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = mnt_want_write(path.mnt);
if (!error) {
- error = removexattr(mnt_idmap(path.mnt), path.dentry, name);
+ error = removexattr(mnt_idmap(path.mnt), path.dentry, kname);
mnt_drop_write(path.mnt);
}
path_put(&path);
@@ -955,15 +952,23 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
{
struct fd f = fdget(fd);
+ char kname[XATTR_NAME_MAX + 1];
int error = -EBADF;
if (!f.file)
return error;
audit_file(f.file);
+
+ error = strncpy_from_user(kname, name, sizeof(kname));
+ if (error == 0 || error == sizeof(kname))
+ error = -ERANGE;
+ if (error < 0)
+ return error;
+
error = mnt_want_write_file(f.file);
if (!error) {
error = removexattr(file_mnt_idmap(f.file),
- f.file->f_path.dentry, name);
+ f.file->f_path.dentry, kname);
mnt_drop_write_file(f.file);
}
fdput(f);
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 6126c977ece0..c0b69ffe7bdb 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -139,6 +139,7 @@ struct cppc_cpudata {
#ifdef CONFIG_ACPI_CPPC_LIB
extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
+extern int cppc_get_highest_perf(int cpunum, u64 *highest_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
@@ -165,6 +166,10 @@ static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
return -ENOTSUPP;
}
+static inline int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
+{
+ return -ENOTSUPP;
+}
static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{
return -ENOTSUPP;
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
index 6ad02ad9c7b4..68fc1bd8d851 100644
--- a/include/linux/amd-pstate.h
+++ b/include/linux/amd-pstate.h
@@ -52,6 +52,9 @@ struct amd_aperf_mperf {
* @prev: Last Aperf/Mperf/tsc count value read from register
* @freq: current cpu frequency value
* @boost_supported: check whether the Processor or SBIOS supports boost mode
+ * @hw_prefcore: check whether HW supports preferred core featue.
+ * Only when hw_prefcore and early prefcore param are true,
+ * AMD P-State driver supports preferred core featue.
* @epp_policy: Last saved policy used to set energy-performance preference
* @epp_cached: Cached CPPC energy-performance preference value
* @policy: Cpufreq policy value
@@ -85,6 +88,7 @@ struct amd_cpudata {
u64 freq;
bool boost_supported;
+ bool hw_prefcore;
/* EPP feature related attributes*/
s16 epp_policy;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 31561e789715..d4f2c8706042 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -138,11 +138,12 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
- int *optname, char __user *optval,
+ int *optname, sockptr_t optval,
int *optlen, char **kernel_optval);
+
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
- int optname, char __user *optval,
- int __user *optlen, int max_optlen,
+ int optname, sockptr_t optval,
+ sockptr_t optlen, int max_optlen,
int retval);
int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
@@ -374,14 +375,6 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
__ret; \
})
-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
-({ \
- int __ret = 0; \
- if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
- get_user(__ret, optlen); \
- __ret; \
-})
-
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
max_optlen, retval) \
({ \
@@ -499,7 +492,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
optlen, max_optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3d617d0d6967..830b925c2d00 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -95,6 +95,10 @@ extern const int mmap_rnd_compat_bits_max;
extern int mmap_rnd_compat_bits __read_mostly;
#endif
+#ifndef PHYSMEM_END
+# define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1)
+#endif
+
#include <asm/page.h>
#include <asm/processor.h>
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index af7639c3b0a3..8b7daccd11be 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -292,6 +292,27 @@ static inline pmd_t pmdp_get(pmd_t *pmdp)
}
#endif
+#ifndef pudp_get
+static inline pud_t pudp_get(pud_t *pudp)
+{
+ return READ_ONCE(*pudp);
+}
+#endif
+
+#ifndef p4dp_get
+static inline p4d_t p4dp_get(p4d_t *p4dp)
+{
+ return READ_ONCE(*p4dp);
+}
+#endif
+
+#ifndef pgdp_get
+static inline pgd_t pgdp_get(pgd_t *pgdp)
+{
+ return READ_ONCE(*pgdp);
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 2c526c8d10cc..25d0684d37b3 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -489,6 +489,14 @@ static inline int of_regulator_bulk_get_all(struct device *dev, struct device_no
return 0;
}
+static inline int devm_regulator_bulk_get_const(
+ struct device *dev, int num_consumers,
+ const struct regulator_bulk_data *in_consumers,
+ struct regulator_bulk_data **out_consumers)
+{
+ return 0;
+}
+
static inline int regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers)
{
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index d2a280a42f3b..2129d071c372 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -430,6 +430,7 @@ enum {
#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
@@ -644,6 +645,7 @@ enum {
#define HCI_ERROR_PIN_OR_KEY_MISSING 0x06
#define HCI_ERROR_MEMORY_EXCEEDED 0x07
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_COMMAND_DISALLOWED 0x0c
#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
#define HCI_ERROR_INVALID_PARAMETERS 0x12
@@ -652,6 +654,7 @@ enum {
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
#define HCI_ERROR_LOCAL_HOST_TERM 0x16
#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
+#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index f89d6d43ba8f..29f1549ee111 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -188,7 +188,6 @@ struct blocked_key {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
- u8 link_type;
u8 type;
u8 val[16];
};
@@ -198,7 +197,6 @@ struct smp_ltk {
struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
- u8 link_type;
u8 authenticated;
u8 type;
u8 enc_size;
@@ -213,7 +211,6 @@ struct smp_irk {
bdaddr_t rpa;
bdaddr_t bdaddr;
u8 addr_type;
- u8 link_type;
u8 val[16];
};
@@ -221,8 +218,6 @@ struct link_key {
struct list_head list;
struct rcu_head rcu;
bdaddr_t bdaddr;
- u8 bdaddr_type;
- u8 link_type;
u8 type;
u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;
@@ -1046,6 +1041,24 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
return c->acl_num + c->sco_num + c->le_num + c->iso_num;
}
+static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c == conn) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -1422,7 +1435,6 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
bdaddr_t *dst, u8 role);
void hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
-void hci_conn_check_pending(struct hci_dev *hdev);
struct hci_chan *hci_chan_create(struct hci_conn *conn);
void hci_chan_del(struct hci_chan *chan);
@@ -1436,6 +1448,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
u16 conn_timeout, u8 role);
+void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
enum conn_reasons conn_reason);
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 4cb048bdcb1e..3cb2d10cac93 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -50,6 +50,22 @@ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+struct hci_cmd_sync_work_entry *
+hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry);
+bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
+ hci_cmd_sync_work_func_t func, void *data,
+ hci_cmd_sync_work_destroy_t destroy);
int hci_update_eir_sync(struct hci_dev *hdev);
int hci_update_class_sync(struct hci_dev *hdev);
@@ -129,8 +145,6 @@ struct hci_conn;
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
-
int hci_le_create_cis_sync(struct hci_dev *hdev);
int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
@@ -140,3 +154,9 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason);
int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle);
int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle);
+
+int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 37ccce56a73e..28e110f733ff 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -97,6 +97,8 @@ struct mana_txq {
atomic_t pending_sends;
+ bool napi_initialized;
+
struct mana_stats_tx stats;
};
diff --git a/include/net/sock.h b/include/net/sock.h
index 5942b5ff4c78..2a1aee503848 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1875,11 +1875,13 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
int sock_setsockopt(struct socket *sock, int level, int op,
sockptr_t optval, unsigned int optlen);
+int do_sock_setsockopt(struct socket *sock, bool compat, int level,
+ int optname, sockptr_t optval, int optlen);
+int do_sock_getsockopt(struct socket *sock, bool compat, int level,
+ int optname, sockptr_t optval, sockptr_t optlen);
int sk_getsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, sockptr_t optlen);
-int sock_getsockopt(struct socket *sock, int level, int op,
- char __user *optval, int __user *optlen);
int sock_gettstamp(struct socket *sock, void __user *userstamp,
bool timeval, bool time32);
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 8db7fd3f743e..5eed091d4c29 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -1474,6 +1474,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AMD_FMT_MOD_TILE_VER_GFX10 2
#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
#define AMD_FMT_MOD_TILE_VER_GFX11 4
+#define AMD_FMT_MOD_TILE_VER_GFX12 5
/*
* 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
@@ -1484,6 +1485,8 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
/*
* 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
* GFX9 as canonical version.
+ *
+ * 64K_D_2D on GFX12 is identical to 64K_D on GFX11.
*/
#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
@@ -1491,6 +1494,21 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
+/* Gfx12 swizzle modes:
+ * 0 - LINEAR
+ * 1 - 256B_2D - 2D block dimensions
+ * 2 - 4KB_2D
+ * 3 - 64KB_2D
+ * 4 - 256KB_2D
+ * 5 - 4KB_3D - 3D block dimensions
+ * 6 - 64KB_3D
+ * 7 - 256KB_3D
+ */
+#define AMD_FMT_MOD_TILE_GFX12_256B_2D 1
+#define AMD_FMT_MOD_TILE_GFX12_4K_2D 2
+#define AMD_FMT_MOD_TILE_GFX12_64K_2D 3
+#define AMD_FMT_MOD_TILE_GFX12_256K_2D 4
+
#define AMD_FMT_MOD_DCC_BLOCK_64B 0
#define AMD_FMT_MOD_DCC_BLOCK_128B 1
#define AMD_FMT_MOD_DCC_BLOCK_256B 2
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index ac37bd53aee0..913a6a7e62ca 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1799,7 +1799,7 @@ static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
}
int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
- int *optname, char __user *optval,
+ int *optname, sockptr_t optval,
int *optlen, char **kernel_optval)
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
@@ -1822,7 +1822,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
ctx.optlen = *optlen;
- if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
+ if (copy_from_sockptr(ctx.optval, optval,
+ min(*optlen, max_optlen))) {
ret = -EFAULT;
goto out;
}
@@ -1889,8 +1890,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
}
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
- int optname, char __user *optval,
- int __user *optlen, int max_optlen,
+ int optname, sockptr_t optval,
+ sockptr_t optlen, int max_optlen,
int retval)
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
@@ -1917,8 +1918,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
* one that kernel returned as well to let
* BPF programs inspect the value.
*/
-
- if (get_user(ctx.optlen, optlen)) {
+ if (copy_from_sockptr(&ctx.optlen, optlen,
+ sizeof(ctx.optlen))) {
ret = -EFAULT;
goto out;
}
@@ -1929,8 +1930,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
}
orig_optlen = ctx.optlen;
- if (copy_from_user(ctx.optval, optval,
- min(ctx.optlen, max_optlen)) != 0) {
+ if (copy_from_sockptr(ctx.optval, optval,
+ min(ctx.optlen, max_optlen))) {
ret = -EFAULT;
goto out;
}
@@ -1944,7 +1945,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
if (ret < 0)
goto out;
- if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
+ if (!sockptr_is_null(optval) &&
+ (ctx.optlen > max_optlen || ctx.optlen < 0)) {
if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
ctx.optlen, max_optlen);
@@ -1956,11 +1958,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
}
if (ctx.optlen != 0) {
- if (optval && copy_to_user(optval, ctx.optval, ctx.optlen)) {
+ if (!sockptr_is_null(optval) &&
+ copy_to_sockptr(optval, ctx.optval, ctx.optlen)) {
ret = -EFAULT;
goto out;
}
- if (put_user(ctx.optlen, optlen)) {
+ if (copy_to_sockptr(optlen, &ctx.optlen, sizeof(ctx.optlen))) {
ret = -EFAULT;
goto out;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 3f1a9cd7fc9e..9d5699942273 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3064,8 +3064,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
if (code == (BPF_JMP | BPF_CALL) &&
insn[i].src_reg == 0 &&
- insn[i].imm == BPF_FUNC_tail_call)
+ insn[i].imm == BPF_FUNC_tail_call) {
subprog[cur_subprog].has_tail_call = true;
+ subprog[cur_subprog].tail_call_reachable = true;
+ }
if (BPF_CLASS(code) == BPF_LD &&
(BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
subprog[cur_subprog].has_ld_abs = true;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 5eca6281d1aa..660817c125e7 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1829,9 +1829,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
rcu_assign_pointer(dcgrp->subsys[ssid], css);
ss->root = dst_root;
- css->cgroup = dcgrp;
spin_lock_irq(&css_set_lock);
+ css->cgroup = dcgrp;
WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
e_cset_node[ss->id]) {
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index 4950e0b622b1..cc19a3efea89 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -89,6 +89,22 @@ static int map_benchmark_thread(void *data)
atomic64_add(map_sq, &map->sum_sq_map);
atomic64_add(unmap_sq, &map->sum_sq_unmap);
atomic64_inc(&map->loops);
+
+ /*
+ * We may test for a long time so periodically check whether
+ * we need to schedule to avoid starving the others. Otherwise
+ * we may hangup the kernel in a non-preemptible kernel when
+ * the test kthreads number >= CPU number, the test kthreads
+ * will run endless on every CPU since the thread resposible
+ * for notifying the kthread stop (in do_map_benchmark())
+ * could not be scheduled.
+ *
+ * Note this may degrade the test concurrency since the test
+ * threads may need to share the CPU time with other load
+ * in the system. So it's recommended to run this benchmark
+ * on an idle system.
+ */
+ cond_resched();
}
out:
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0f2b5610933d..4d0abdace4e7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1255,8 +1255,9 @@ static void put_ctx(struct perf_event_context *ctx)
* perf_event_context::mutex
* perf_event::child_mutex;
* perf_event_context::lock
- * perf_event::mmap_mutex
* mmap_lock
+ * perf_event::mmap_mutex
+ * perf_buffer::aux_mutex
* perf_addr_filters_head::lock
*
* cpu_hotplug_lock
@@ -6352,12 +6353,11 @@ static void perf_mmap_close(struct vm_area_struct *vma)
event->pmu->event_unmapped(event, vma->vm_mm);
/*
- * rb->aux_mmap_count will always drop before rb->mmap_count and
- * event->mmap_count, so it is ok to use event->mmap_mutex to
- * serialize with perf_mmap here.
+ * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
+ * to avoid complications.
*/
if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
- atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
+ atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
/*
* Stop all AUX events that are writing to this buffer,
* so that we can free its AUX pages and corresponding PMU
@@ -6374,7 +6374,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
rb_free_aux(rb);
WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
- mutex_unlock(&event->mmap_mutex);
+ mutex_unlock(&rb->aux_mutex);
}
if (atomic_dec_and_test(&rb->mmap_count))
@@ -6462,6 +6462,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
+ struct mutex *aux_mutex = NULL;
struct perf_buffer *rb = NULL;
unsigned long locked, lock_limit;
unsigned long vma_size;
@@ -6510,6 +6511,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (!rb)
goto aux_unlock;
+ aux_mutex = &rb->aux_mutex;
+ mutex_lock(aux_mutex);
+
aux_offset = READ_ONCE(rb->user_page->aux_offset);
aux_size = READ_ONCE(rb->user_page->aux_size);
@@ -6660,6 +6664,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
atomic_dec(&rb->mmap_count);
}
aux_unlock:
+ if (aux_mutex)
+ mutex_unlock(aux_mutex);
mutex_unlock(&event->mmap_mutex);
/*
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 386d21c7edfa..f376b057320c 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -40,6 +40,7 @@ struct perf_buffer {
struct user_struct *mmap_user;
/* AUX area */
+ struct mutex aux_mutex;
long aux_head;
unsigned int aux_nest;
long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index f1f4a627f93d..b0930b418552 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -333,6 +333,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
*/
if (!rb->nr_pages)
rb->paused = 1;
+
+ mutex_init(&rb->aux_mutex);
}
void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 3048589e2e85..4705571f8034 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1480,7 +1480,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
uprobe_opcode_t insn = UPROBE_SWBP_INSN;
struct xol_area *area;
- area = kmalloc(sizeof(*area), GFP_KERNEL);
+ area = kzalloc(sizeof(*area), GFP_KERNEL);
if (unlikely(!area))
goto out;
@@ -1490,7 +1490,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
goto free_area;
area->xol_mapping.name = "[uprobes]";
- area->xol_mapping.fault = NULL;
area->xol_mapping.pages = area->pages;
area->pages[0] = alloc_page(GFP_HIGHUSER);
if (!area->pages[0])
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index f9a419cd22d4..830344627e9f 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -728,7 +728,7 @@ static int kexec_calculate_store_digests(struct kimage *image)
#ifdef CONFIG_CRASH_HOTPLUG
/* Exclude elfcorehdr segment to allow future changes via hotplug */
- if (j == image->elfcorehdr_index)
+ if (i == image->elfcorehdr_index)
continue;
#endif
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 21db0df0eb00..bf3a28ee7d8f 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1624,6 +1624,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
}
static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ struct rt_mutex_base *lock,
struct rt_mutex_waiter *w)
{
/*
@@ -1636,10 +1637,10 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
if (build_ww_mutex() && w->ww_ctx)
return;
- /*
- * Yell loudly and stop the task right here.
- */
+ raw_spin_unlock_irq(&lock->wait_lock);
+
WARN(1, "rtmutex deadlock detected\n");
+
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
@@ -1693,7 +1694,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
} else {
__set_current_state(TASK_RUNNING);
remove_waiter(lock, waiter);
- rt_mutex_handle_deadlock(ret, chwalk, waiter);
+ rt_mutex_handle_deadlock(ret, chwalk, lock, waiter);
}
/*
diff --git a/kernel/resource.c b/kernel/resource.c
index e3f5680a564c..ce127a829ead 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1778,8 +1778,7 @@ static resource_size_t gfr_start(struct resource *base, resource_size_t size,
if (flags & GFR_DESCENDING) {
resource_size_t end;
- end = min_t(resource_size_t, base->end,
- (1ULL << MAX_PHYSMEM_BITS) - 1);
+ end = min_t(resource_size_t, base->end, PHYSMEM_END);
return end - size + 1;
}
@@ -1796,8 +1795,7 @@ static bool gfr_continue(struct resource *base, resource_size_t addr,
* @size did not wrap 0.
*/
return addr > addr - size &&
- addr <= min_t(resource_size_t, base->end,
- (1ULL << MAX_PHYSMEM_BITS) - 1);
+ addr <= min_t(resource_size_t, base->end, PHYSMEM_END);
}
static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 97571d390f18..9b406d988654 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6679,8 +6679,9 @@ static void __sched notrace __schedule(unsigned int sched_mode)
*
* Here are the schemes providing that barrier on the
* various architectures:
- * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
- * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
+ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
+ * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
+ * on PowerPC and on RISC-V.
* - finish_lock_switch() for weakly-ordered
* architectures where spin_unlock is a full barrier,
* - switch_to() for arm64 (weakly-ordered, spin_unlock
diff --git a/kernel/smp.c b/kernel/smp.c
index 695eb13a276d..3eeffeaf5450 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -1119,6 +1119,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
queue_work_on(cpu, system_wq, &sscs.work);
wait_for_completion(&sscs.done);
+ destroy_work_on_stack(&sscs.work);
return sscs.ret;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index fd398af792b4..be878005e344 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4156,6 +4156,8 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
break;
entries++;
ring_buffer_iter_advance(buf_iter);
+ /* This could be a big loop */
+ cond_resched();
}
per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index a8e28f9b9271..5b06f67879f5 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -252,6 +252,11 @@ static inline struct timerlat_variables *this_cpu_tmr_var(void)
return this_cpu_ptr(&per_cpu_timerlat_var);
}
+/*
+ * Protect the interface.
+ */
+static struct mutex interface_lock;
+
/*
* tlat_var_reset - Reset the values of the given timerlat_variables
*/
@@ -259,14 +264,20 @@ static inline void tlat_var_reset(void)
{
struct timerlat_variables *tlat_var;
int cpu;
+
+ /* Synchronize with the timerlat interfaces */
+ mutex_lock(&interface_lock);
/*
* So far, all the values are initialized as 0, so
* zeroing the structure is perfect.
*/
for_each_cpu(cpu, cpu_online_mask) {
tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
+ if (tlat_var->kthread)
+ hrtimer_cancel(&tlat_var->timer);
memset(tlat_var, 0, sizeof(*tlat_var));
}
+ mutex_unlock(&interface_lock);
}
#else /* CONFIG_TIMERLAT_TRACER */
#define tlat_var_reset() do {} while (0)
@@ -331,11 +342,6 @@ struct timerlat_sample {
};
#endif
-/*
- * Protect the interface.
- */
-static struct mutex interface_lock;
-
/*
* Tracer data.
*/
@@ -1612,6 +1618,7 @@ static int run_osnoise(void)
static struct cpumask osnoise_cpumask;
static struct cpumask save_cpumask;
+static struct cpumask kthread_cpumask;
/*
* osnoise_sleep - sleep until the next period
@@ -1675,6 +1682,7 @@ static inline int osnoise_migration_pending(void)
*/
mutex_lock(&interface_lock);
this_cpu_osn_var()->kthread = NULL;
+ cpumask_clear_cpu(smp_processor_id(), &kthread_cpumask);
mutex_unlock(&interface_lock);
return 1;
@@ -1945,11 +1953,16 @@ static void stop_kthread(unsigned int cpu)
{
struct task_struct *kthread;
+ mutex_lock(&interface_lock);
kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread;
if (kthread) {
- if (test_bit(OSN_WORKLOAD, &osnoise_options)) {
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
+ mutex_unlock(&interface_lock);
+
+ if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) &&
+ !WARN_ON(!test_bit(OSN_WORKLOAD, &osnoise_options))) {
kthread_stop(kthread);
- } else {
+ } else if (!WARN_ON(test_bit(OSN_WORKLOAD, &osnoise_options))) {
/*
* This is a user thread waiting on the timerlat_fd. We need
* to close all users, and the best way to guarantee this is
@@ -1958,8 +1971,8 @@ static void stop_kthread(unsigned int cpu)
kill_pid(kthread->thread_pid, SIGKILL, 1);
put_task_struct(kthread);
}
- per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
} else {
+ mutex_unlock(&interface_lock);
/* if no workload, just return */
if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
/*
@@ -1967,7 +1980,6 @@ static void stop_kthread(unsigned int cpu)
*/
per_cpu(per_cpu_osnoise_var, cpu).sampling = false;
barrier();
- return;
}
}
}
@@ -1982,12 +1994,8 @@ static void stop_per_cpu_kthreads(void)
{
int cpu;
- cpus_read_lock();
-
- for_each_online_cpu(cpu)
+ for_each_possible_cpu(cpu)
stop_kthread(cpu);
-
- cpus_read_unlock();
}
/*
@@ -2021,6 +2029,7 @@ static int start_kthread(unsigned int cpu)
}
per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread;
+ cpumask_set_cpu(cpu, &kthread_cpumask);
return 0;
}
@@ -2048,8 +2057,16 @@ static int start_per_cpu_kthreads(void)
*/
cpumask_and(current_mask, cpu_online_mask, &osnoise_cpumask);
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask)) {
+ struct task_struct *kthread;
+
+ kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread;
+ if (!WARN_ON(!kthread))
+ kthread_stop(kthread);
+ }
per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
+ }
for_each_cpu(cpu, current_mask) {
retval = start_kthread(cpu);
@@ -2579,7 +2596,8 @@ static int timerlat_fd_release(struct inode *inode, struct file *file)
osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
- hrtimer_cancel(&tlat_var->timer);
+ if (tlat_var->kthread)
+ hrtimer_cancel(&tlat_var->timer);
memset(tlat_var, 0, sizeof(*tlat_var));
osn_var->sampling = 0;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8c7bafbee1b1..7fa1c7c9151a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -6456,10 +6456,18 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
notrace void wq_watchdog_touch(int cpu)
{
+ unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
+ unsigned long touch_ts = READ_ONCE(wq_watchdog_touched);
+ unsigned long now = jiffies;
+
if (cpu >= 0)
- per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
+ per_cpu(wq_watchdog_touched_cpu, cpu) = now;
+ else
+ WARN_ONCE(1, "%s should be called with valid CPU", __func__);
- wq_watchdog_touched = jiffies;
+ /* Don't unnecessarily store to global cacheline */
+ if (time_after(now, touch_ts + thresh / 4))
+ WRITE_ONCE(wq_watchdog_touched, jiffies);
}
static void wq_watchdog_set_thresh(unsigned long thresh)
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index 7dfa88282b00..78f081d695d0 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -131,6 +131,8 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
v = new_root;
new_node = NULL;
+ } else {
+ new_node->children[0] = NULL;
}
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f36525a595a9..9beed7c71a8e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1610,7 +1610,7 @@ struct range __weak arch_get_mappable_range(void)
struct range mhp_get_pluggable_range(bool need_mapping)
{
- const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1;
+ const u64 max_phys = PHYSMEM_END;
struct range mhp_range;
if (need_mapping) {
diff --git a/mm/sparse.c b/mm/sparse.c
index 338cf946dee8..0706113c4c84 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -129,7 +129,7 @@ static inline int sparse_early_nid(struct mem_section *section)
static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
unsigned long *end_pfn)
{
- unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
+ unsigned long max_sparsemem_pfn = (PHYSMEM_END + 1) >> PAGE_SHIFT;
/*
* Sanity checks - do not allow an architecture to pass
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index e76faba10279..92fe2a76f4b5 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -699,27 +699,30 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
}
dst_pmdval = pmdp_get_lockless(dst_pmd);
- /*
- * If the dst_pmd is mapped as THP don't
- * override it and just be strict.
- */
- if (unlikely(pmd_trans_huge(dst_pmdval))) {
- err = -EEXIST;
- break;
- }
if (unlikely(pmd_none(dst_pmdval)) &&
unlikely(__pte_alloc(dst_mm, dst_pmd))) {
err = -ENOMEM;
break;
}
- /* If an huge pmd materialized from under us fail */
- if (unlikely(pmd_trans_huge(*dst_pmd))) {
+ dst_pmdval = pmdp_get_lockless(dst_pmd);
+ /*
+ * If the dst_pmd is THP don't override it and just be strict.
+ * (This includes the case where the PMD used to be THP and
+ * changed back to none after __pte_alloc().)
+ */
+ if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval) ||
+ pmd_devmap(dst_pmdval))) {
+ err = -EEXIST;
+ break;
+ }
+ if (unlikely(pmd_bad(dst_pmdval))) {
err = -EFAULT;
break;
}
-
- BUG_ON(pmd_none(*dst_pmd));
- BUG_ON(pmd_trans_huge(*dst_pmd));
+ /*
+ * For shmem mappings, khugepaged is allowed to remove page
+ * tables under us; pte_offset_map_lock() will deal with that.
+ */
err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
src_addr, flags, &folio);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 732ff66d1b51..0148be0814af 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2066,6 +2066,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
vb->dirty_max = 0;
bitmap_set(vb->used_map, 0, (1UL << order));
INIT_LIST_HEAD(&vb->free_list);
+ vb->cpu = raw_smp_processor_id();
xa = addr_to_vb_xa(va->va_start);
vb_idx = addr_to_vb_idx(va->va_start);
@@ -2082,7 +2083,6 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
* integrity together with list_for_each_rcu from read
* side.
*/
- vb->cpu = raw_smp_processor_id();
vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
spin_lock(&vbq->lock);
list_add_tail_rcu(&vb->free_list, &vbq->free);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 83fa8e924f8a..81533bed0b46 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2261,25 +2261,6 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
}
-#ifdef CONFIG_CMA
-/*
- * It is waste of effort to scan and reclaim CMA pages if it is not available
- * for current allocation context. Kswapd can not be enrolled as it can not
- * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
- */
-static bool skip_cma(struct folio *folio, struct scan_control *sc)
-{
- return !current_is_kswapd() &&
- gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
- get_pageblock_migratetype(&folio->page) == MIGRATE_CMA;
-}
-#else
-static bool skip_cma(struct folio *folio, struct scan_control *sc)
-{
- return false;
-}
-#endif
-
/*
* Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
*
@@ -2326,8 +2307,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
nr_pages = folio_nr_pages(folio);
total_scan += nr_pages;
- if (folio_zonenum(folio) > sc->reclaim_idx ||
- skip_cma(folio, sc)) {
+ if (folio_zonenum(folio) > sc->reclaim_idx) {
nr_skipped[folio_zonenum(folio)] += nr_pages;
move_to = &folios_skipped;
goto move;
@@ -4971,7 +4951,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
}
/* ineligible */
- if (zone > sc->reclaim_idx || skip_cma(folio, sc)) {
+ if (zone > sc->reclaim_idx) {
gen = folio_inc_gen(lruvec, folio, false);
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index dc1c07c7d4ff..d8a01eb016ad 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -68,7 +68,7 @@ static const struct sco_param esco_param_msbc[] = {
};
/* This function requires the caller holds hdev->lock */
-static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
+void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
{
struct hci_conn_params *params;
struct hci_dev *hdev = conn->hdev;
@@ -178,64 +178,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
hci_dev_put(hdev);
}
-static void hci_acl_create_connection(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
- struct inquiry_entry *ie;
- struct hci_cp_create_conn cp;
-
- BT_DBG("hcon %p", conn);
-
- /* Many controllers disallow HCI Create Connection while it is doing
- * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
- * Connection. This may cause the MGMT discovering state to become false
- * without user space's request but it is okay since the MGMT Discovery
- * APIs do not promise that discovery should be done forever. Instead,
- * the user space monitors the status of MGMT discovering and it may
- * request for discovery again when this flag becomes false.
- */
- if (test_bit(HCI_INQUIRY, &hdev->flags)) {
- /* Put this connection to "pending" state so that it will be
- * executed after the inquiry cancel command complete event.
- */
- conn->state = BT_CONNECT2;
- hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
- return;
- }
-
- conn->state = BT_CONNECT;
- conn->out = true;
- conn->role = HCI_ROLE_MASTER;
-
- conn->attempt++;
-
- conn->link_policy = hdev->link_policy;
-
- memset(&cp, 0, sizeof(cp));
- bacpy(&cp.bdaddr, &conn->dst);
- cp.pscan_rep_mode = 0x02;
-
- ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
- if (ie) {
- if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
- cp.pscan_rep_mode = ie->data.pscan_rep_mode;
- cp.pscan_mode = ie->data.pscan_mode;
- cp.clock_offset = ie->data.clock_offset |
- cpu_to_le16(0x8000);
- }
-
- memcpy(conn->dev_class, ie->data.dev_class, 3);
- }
-
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
- if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
- cp.role_switch = 0x01;
- else
- cp.role_switch = 0x00;
-
- hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
-}
-
int hci_disconnect(struct hci_conn *conn, __u8 reason)
{
BT_DBG("hcon %p", conn);
@@ -1201,6 +1143,9 @@ void hci_conn_del(struct hci_conn *conn)
* rest of hci_conn_del.
*/
hci_conn_cleanup(conn);
+
+ /* Dequeue callbacks using connection pointer as data */
+ hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
}
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
@@ -1334,53 +1279,6 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
return 0;
}
-static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
-{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return;
-
- bt_dev_dbg(hdev, "err %d", err);
-
- hci_dev_lock(hdev);
-
- if (!err) {
- hci_connect_le_scan_cleanup(conn, 0x00);
- goto done;
- }
-
- /* Check if connection is still pending */
- if (conn != hci_lookup_le_connect(hdev))
- goto done;
-
- /* Flush to make sure we send create conn cancel command if needed */
- flush_delayed_work(&conn->le_conn_timeout);
- hci_conn_failed(conn, bt_status(err));
-
-done:
- hci_dev_unlock(hdev);
-}
-
-static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
-{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return 0;
-
- bt_dev_dbg(hdev, "conn %p", conn);
-
- clear_bit(HCI_CONN_SCANNING, &conn->flags);
- conn->state = BT_CONNECT;
-
- return hci_le_create_conn_sync(hdev, conn);
-}
-
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
u16 conn_timeout, u8 role)
@@ -1447,9 +1345,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
conn->sec_level = BT_SECURITY_LOW;
conn->conn_timeout = conn_timeout;
- err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
- UINT_PTR(conn->handle),
- create_le_conn_complete);
+ err = hci_connect_le_sync(hdev, conn);
if (err) {
hci_conn_del(conn);
return ERR_PTR(err);
@@ -1702,10 +1598,17 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
acl->conn_reason = conn_reason;
if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
+ int err;
+
acl->sec_level = BT_SECURITY_LOW;
acl->pending_sec_level = sec_level;
acl->auth_type = auth_type;
- hci_acl_create_connection(acl);
+
+ err = hci_connect_acl_sync(hdev, acl);
+ if (err) {
+ hci_conn_del(acl);
+ return ERR_PTR(err);
+ }
}
return acl;
@@ -2616,22 +2519,6 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
}
}
-/* Check pending connect attempts */
-void hci_conn_check_pending(struct hci_dev *hdev)
-{
- struct hci_conn *conn;
-
- BT_DBG("hdev %s", hdev->name);
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
- if (conn)
- hci_acl_create_connection(conn);
-
- hci_dev_unlock(hdev);
-}
-
static u32 get_link_mode(struct hci_conn *conn)
{
u32 link_mode = 0;
@@ -2947,12 +2834,10 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
static int abort_conn_sync(struct hci_dev *hdev, void *data)
{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
+ struct hci_conn *conn = data;
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return 0;
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
}
@@ -2980,14 +2865,21 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
*/
if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
switch (hci_skb_event(hdev->sent_cmd)) {
+ case HCI_EV_CONN_COMPLETE:
case HCI_EV_LE_CONN_COMPLETE:
case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
case HCI_EVT_LE_CIS_ESTABLISHED:
hci_cmd_sync_cancel(hdev, ECANCELED);
break;
}
+ /* Cancel connect attempt if still queued/pending */
+ } else if (!hci_cancel_connect_sync(hdev, conn)) {
+ return 0;
}
- return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
- NULL);
+ /* Run immediately if on cmd_sync_work since this may be called
+ * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
+ * already queue its callback on cmd_sync_work.
+ */
+ return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 727f040b6529..d81c7fccdd40 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -93,11 +93,11 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
/* It is possible that we receive Inquiry Complete event right
* before we receive Inquiry Cancel Command Complete event, in
* which case the latter event should have status of Command
- * Disallowed (0x0c). This should not be treated as error, since
+ * Disallowed. This should not be treated as error, since
* we actually achieve what Inquiry Cancel wants to achieve,
* which is to end the last Inquiry session.
*/
- if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
+ if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
rp->status = 0x00;
}
@@ -118,8 +118,6 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
hci_dev_unlock(hdev);
- hci_conn_check_pending(hdev);
-
return rp->status;
}
@@ -150,8 +148,6 @@ static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
- hci_conn_check_pending(hdev);
-
return rp->status;
}
@@ -2257,10 +2253,8 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
bt_dev_dbg(hdev, "status 0x%2.2x", status);
- if (status) {
- hci_conn_check_pending(hdev);
+ if (status)
return;
- }
if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
set_bit(HCI_INQUIRY, &hdev->flags);
@@ -2285,12 +2279,9 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
if (status) {
if (conn && conn->state == BT_CONNECT) {
- if (status != 0x0c || conn->attempt > 2) {
- conn->state = BT_CLOSED;
- hci_connect_cfm(conn, status);
- hci_conn_del(conn);
- } else
- conn->state = BT_CONNECT2;
+ conn->state = BT_CLOSED;
+ hci_connect_cfm(conn, status);
+ hci_conn_del(conn);
}
} else {
if (!conn) {
@@ -2980,8 +2971,6 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
- hci_conn_check_pending(hdev);
-
if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
return;
@@ -3228,8 +3217,6 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
unlock:
hci_dev_unlock(hdev);
-
- hci_conn_check_pending(hdev);
}
static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
@@ -6430,7 +6417,7 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
* transition into connected state and mark it as
* successful.
*/
- if (!conn->out && ev->status == 0x1a &&
+ if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
(hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
status = 0x00;
else
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 38fee34887d8..af7817a7c585 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -114,7 +114,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
skb_queue_tail(&req->cmd_q, skb);
}
-static int hci_cmd_sync_run(struct hci_request *req)
+static int hci_req_sync_run(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
@@ -164,7 +164,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
hdev->req_status = HCI_REQ_PEND;
- err = hci_cmd_sync_run(&req);
+ err = hci_req_sync_run(&req);
if (err < 0)
return ERR_PTR(err);
@@ -651,6 +651,17 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
}
+static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry,
+ int err)
+{
+ if (entry->destroy)
+ entry->destroy(hdev, entry->data, err);
+
+ list_del(&entry->list);
+ kfree(entry);
+}
+
void hci_cmd_sync_clear(struct hci_dev *hdev)
{
struct hci_cmd_sync_work_entry *entry, *tmp;
@@ -659,13 +670,8 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
cancel_work_sync(&hdev->reenable_adv_work);
mutex_lock(&hdev->cmd_sync_work_lock);
- list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
- if (entry->destroy)
- entry->destroy(hdev, entry->data, -ECANCELED);
-
- list_del(&entry->list);
- kfree(entry);
- }
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
mutex_unlock(&hdev->cmd_sync_work_lock);
}
@@ -757,6 +763,153 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
}
EXPORT_SYMBOL(hci_cmd_sync_queue);
+static struct hci_cmd_sync_work_entry *
+_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
+ if (func && entry->func != func)
+ continue;
+
+ if (data && entry->data != data)
+ continue;
+
+ if (destroy && entry->destroy != destroy)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
+/* Queue HCI command entry once:
+ *
+ * - Lookup if an entry already exist and only if it doesn't creates a new entry
+ * and queue it.
+ */
+int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
+ return 0;
+
+ return hci_cmd_sync_queue(hdev, func, data, destroy);
+}
+EXPORT_SYMBOL(hci_cmd_sync_queue_once);
+
+/* Run HCI command:
+ *
+ * - hdev must be running
+ * - if on cmd_sync_work then run immediately otherwise queue
+ */
+int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ /* Only queue command if hdev is running which means it had been opened
+ * and is either on init phase or is already up.
+ */
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return -ENETDOWN;
+
+ /* If on cmd_sync_work then run immediately otherwise queue */
+ if (current_work() == &hdev->cmd_sync_work)
+ return func(hdev, data);
+
+ return hci_cmd_sync_submit(hdev, func, data, destroy);
+}
+EXPORT_SYMBOL(hci_cmd_sync_run);
+
+/* Run HCI command entry once:
+ *
+ * - Lookup if an entry already exist and only if it doesn't creates a new entry
+ * and run it.
+ * - if on cmd_sync_work then run immediately otherwise queue
+ */
+int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
+ return 0;
+
+ return hci_cmd_sync_run(hdev, func, data, destroy);
+}
+EXPORT_SYMBOL(hci_cmd_sync_run_once);
+
+/* Lookup HCI command entry:
+ *
+ * - Return first entry that matches by function callback or data or
+ * destroy callback.
+ */
+struct hci_cmd_sync_work_entry *
+hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ return entry;
+}
+EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
+
+/* Cancel HCI command entry */
+void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry)
+{
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+}
+EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
+
+/* Dequeue one HCI command entry:
+ *
+ * - Lookup and cancel first entry that matches.
+ */
+bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
+ hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+
+ entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
+ if (!entry)
+ return false;
+
+ hci_cmd_sync_cancel_entry(hdev, entry);
+
+ return true;
+}
+EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
+
+/* Dequeue HCI command entry:
+ *
+ * - Lookup and cancel any entry that matches by function callback or data or
+ * destroy callback.
+ */
+bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+ bool ret = false;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
+ destroy))) {
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+ ret = true;
+ }
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hci_cmd_sync_dequeue);
+
int hci_update_eir_sync(struct hci_dev *hdev)
{
struct hci_cp_write_eir cp;
@@ -3048,7 +3201,8 @@ int hci_update_passive_scan(struct hci_dev *hdev)
hci_dev_test_flag(hdev, HCI_UNREGISTER))
return 0;
- return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL);
+ return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
+ NULL);
}
int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
@@ -6254,12 +6408,21 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
conn->conn_timeout, NULL);
}
-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
+static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
{
struct hci_cp_le_create_conn cp;
struct hci_conn_params *params;
u8 own_addr_type;
int err;
+ struct hci_conn *conn = data;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ bt_dev_dbg(hdev, "conn %p", conn);
+
+ clear_bit(HCI_CONN_SCANNING, &conn->flags);
+ conn->state = BT_CONNECT;
/* If requested to connect as peripheral use directed advertising */
if (conn->role == HCI_ROLE_SLAVE) {
@@ -6577,3 +6740,125 @@ int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
UINT_PTR(instance), NULL);
}
+
+static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
+{
+ struct hci_conn *conn = data;
+ struct inquiry_entry *ie;
+ struct hci_cp_create_conn cp;
+ int err;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ /* Many controllers disallow HCI Create Connection while it is doing
+ * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
+ * Connection. This may cause the MGMT discovering state to become false
+ * without user space's request but it is okay since the MGMT Discovery
+ * APIs do not promise that discovery should be done forever. Instead,
+ * the user space monitors the status of MGMT discovering and it may
+ * request for discovery again when this flag becomes false.
+ */
+ if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+ err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
+ NULL, HCI_CMD_TIMEOUT);
+ if (err)
+ bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
+ }
+
+ conn->state = BT_CONNECT;
+ conn->out = true;
+ conn->role = HCI_ROLE_MASTER;
+
+ conn->attempt++;
+
+ conn->link_policy = hdev->link_policy;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.pscan_rep_mode = 0x02;
+
+ ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
+ if (ie) {
+ if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
+ cp.pscan_rep_mode = ie->data.pscan_rep_mode;
+ cp.pscan_mode = ie->data.pscan_mode;
+ cp.clock_offset = ie->data.clock_offset |
+ cpu_to_le16(0x8000);
+ }
+
+ memcpy(conn->dev_class, ie->data.dev_class, 3);
+ }
+
+ cp.pkt_type = cpu_to_le16(conn->pkt_type);
+ if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
+ cp.role_switch = 0x01;
+ else
+ cp.role_switch = 0x00;
+
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
+ sizeof(cp), &cp,
+ HCI_EV_CONN_COMPLETE,
+ conn->conn_timeout, NULL);
+}
+
+int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
+ NULL);
+}
+
+static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct hci_conn *conn = data;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ if (err == -ECANCELED)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (!hci_conn_valid(hdev, conn))
+ goto done;
+
+ if (!err) {
+ hci_connect_le_scan_cleanup(conn, 0x00);
+ goto done;
+ }
+
+ /* Check if connection is still pending */
+ if (conn != hci_lookup_le_connect(hdev))
+ goto done;
+
+ /* Flush to make sure we send create conn cancel command if needed */
+ flush_delayed_work(&conn->le_conn_timeout);
+ hci_conn_failed(conn, bt_status(err));
+
+done:
+ hci_dev_unlock(hdev);
+}
+
+int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
+ create_le_conn_complete);
+}
+
+int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ if (conn->state != BT_OPEN)
+ return -EINVAL;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ return !hci_cmd_sync_dequeue_once(hdev,
+ hci_acl_create_conn_sync,
+ conn, NULL);
+ case LE_LINK:
+ return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
+ conn, create_le_conn_complete);
+ }
+
+ return -ENOENT;
+}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3533ac679e42..4ae9029b5785 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2824,16 +2824,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
key_count);
- for (i = 0; i < key_count; i++) {
- struct mgmt_link_key_info *key = &cp->keys[i];
-
- /* Considering SMP over BREDR/LE, there is no need to check addr_type */
- if (key->type > 0x08)
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_LOAD_LINK_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
- }
-
hci_dev_lock(hdev);
hci_link_keys_clear(hdev);
@@ -2858,6 +2848,19 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
continue;
}
+ if (key->addr.type != BDADDR_BREDR) {
+ bt_dev_warn(hdev,
+ "Invalid link address type %u for %pMR",
+ key->addr.type, &key->addr.bdaddr);
+ continue;
+ }
+
+ if (key->type > 0x08) {
+ bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
+ key->type, &key->addr.bdaddr);
+ continue;
+ }
+
/* Always ignore debug keys and require a new pairing if
* the user wants to use them.
*/
@@ -2915,7 +2918,12 @@ static int unpair_device_sync(struct hci_dev *hdev, void *data)
if (!conn)
return 0;
- return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
+ /* Disregard any possible error since the likes of hci_abort_conn_sync
+ * will clean up the connection no matter the error.
+ */
+ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
+
+ return 0;
}
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -3047,13 +3055,44 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
return err;
}
+static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+
+ cmd->cmd_complete(cmd, mgmt_status(err));
+ mgmt_pending_free(cmd);
+}
+
+static int disconnect_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_disconnect *cp = cmd->param;
+ struct hci_conn *conn;
+
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
+ le_addr_type(cp->addr.type));
+
+ if (!conn)
+ return -ENOTCONN;
+
+ /* Disregard any possible error since the likes of hci_abort_conn_sync
+ * will clean up the connection no matter the error.
+ */
+ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
+
+ return 0;
+}
+
static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_disconnect *cp = data;
struct mgmt_rp_disconnect rp;
struct mgmt_pending_cmd *cmd;
- struct hci_conn *conn;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -3076,27 +3115,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_BUSY, &rp, sizeof(rp));
- goto failed;
- }
-
- if (cp->addr.type == BDADDR_BREDR)
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
- &cp->addr.bdaddr);
- else
- conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
- le_addr_type(cp->addr.type));
-
- if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
- MGMT_STATUS_NOT_CONNECTED, &rp,
- sizeof(rp));
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -3104,9 +3123,10 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->cmd_complete = generic_cmd_complete;
- err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
+ err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
+ disconnect_complete);
if (err < 0)
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
failed:
hci_dev_unlock(hdev);
@@ -7065,7 +7085,6 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
for (i = 0; i < irk_count; i++) {
struct mgmt_irk_info *irk = &cp->irks[i];
- u8 addr_type = le_addr_type(irk->addr.type);
if (hci_is_blocked_key(hdev,
HCI_BLOCKED_KEY_TYPE_IRK,
@@ -7075,12 +7094,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
continue;
}
- /* When using SMP over BR/EDR, the addr type should be set to BREDR */
- if (irk->addr.type == BDADDR_BREDR)
- addr_type = BDADDR_BREDR;
-
hci_add_irk(hdev, &irk->addr.bdaddr,
- addr_type, irk->val,
+ le_addr_type(irk->addr.type), irk->val,
BDADDR_ANY);
}
@@ -7145,15 +7160,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
bt_dev_dbg(hdev, "key_count %u", key_count);
- for (i = 0; i < key_count; i++) {
- struct mgmt_ltk_info *key = &cp->keys[i];
-
- if (!ltk_is_valid(key))
- return mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_LOAD_LONG_TERM_KEYS,
- MGMT_STATUS_INVALID_PARAMS);
- }
-
hci_dev_lock(hdev);
hci_smp_ltks_clear(hdev);
@@ -7161,7 +7167,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
for (i = 0; i < key_count; i++) {
struct mgmt_ltk_info *key = &cp->keys[i];
u8 type, authenticated;
- u8 addr_type = le_addr_type(key->addr.type);
if (hci_is_blocked_key(hdev,
HCI_BLOCKED_KEY_TYPE_LTK,
@@ -7171,6 +7176,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
continue;
}
+ if (!ltk_is_valid(key)) {
+ bt_dev_warn(hdev, "Invalid LTK for %pMR",
+ &key->addr.bdaddr);
+ continue;
+ }
+
switch (key->type) {
case MGMT_LTK_UNAUTHENTICATED:
authenticated = 0x00;
@@ -7196,12 +7207,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
continue;
}
- /* When using SMP over BR/EDR, the addr type should be set to BREDR */
- if (key->addr.type == BDADDR_BREDR)
- addr_type = BDADDR_BREDR;
-
hci_add_ltk(hdev, &key->addr.bdaddr,
- addr_type, type, authenticated,
+ le_addr_type(key->addr.type), type, authenticated,
key->val, key->enc_size, key->ediv, key->rand);
}
@@ -9450,7 +9457,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
+ ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
ev.key.pin_len = key->pin_len;
@@ -9501,7 +9508,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
ev.key.type = mgmt_ltk_type(key);
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
@@ -9530,7 +9537,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
bacpy(&ev.rpa, &irk->rpa);
bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
- ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
+ ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
memcpy(ev.irk.val, irk->val, sizeof(irk->val));
mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
@@ -9559,7 +9566,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
- ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
ev.key.type = csrk->type;
memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
@@ -9637,18 +9644,6 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
mgmt_event_skb(skb, NULL);
}
-static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
-{
- struct sock **sk = data;
-
- cmd->cmd_complete(cmd, 0);
-
- *sk = cmd->sk;
- sock_hold(*sk);
-
- mgmt_pending_remove(cmd);
-}
-
static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
{
struct hci_dev *hdev = data;
@@ -9689,8 +9684,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (link_type != ACL_LINK && link_type != LE_LINK)
return;
- mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
-
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.reason = reason;
@@ -9703,9 +9696,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (sk)
sock_put(sk);
-
- mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
- hdev);
}
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index fa3986cfd526..56f7f041c9a6 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -1061,7 +1061,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
}
if (smp->remote_irk) {
- smp->remote_irk->link_type = hcon->type;
mgmt_new_irk(hdev, smp->remote_irk, persistent);
/* Now that user space can be considered to know the
@@ -1081,28 +1080,24 @@ static void smp_notify_keys(struct l2cap_conn *conn)
}
if (smp->csrk) {
- smp->csrk->link_type = hcon->type;
smp->csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->csrk->bdaddr, &hcon->dst);
mgmt_new_csrk(hdev, smp->csrk, persistent);
}
if (smp->responder_csrk) {
- smp->responder_csrk->link_type = hcon->type;
smp->responder_csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->responder_csrk->bdaddr, &hcon->dst);
mgmt_new_csrk(hdev, smp->responder_csrk, persistent);
}
if (smp->ltk) {
- smp->ltk->link_type = hcon->type;
smp->ltk->bdaddr_type = hcon->dst_type;
bacpy(&smp->ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->ltk, persistent);
}
if (smp->responder_ltk) {
- smp->responder_ltk->link_type = hcon->type;
smp->responder_ltk->bdaddr_type = hcon->dst_type;
bacpy(&smp->responder_ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->responder_ltk, persistent);
@@ -1122,8 +1117,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst,
smp->link_key, type, 0, &persistent);
if (key) {
- key->link_type = hcon->type;
- key->bdaddr_type = hcon->dst_type;
mgmt_new_link_key(hdev, key, persistent);
/* Don't keep debug keys around if the relevant
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index e69a872bfc1d..a6d8cd9a5807 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -1425,12 +1425,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
modified = true;
}
- if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
+ if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
/* Refresh entry */
fdb->used = jiffies;
- } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
- /* Take over SW learned entry */
- set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
+ } else {
modified = true;
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9168114fc87f..00208ee13e57 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1428,6 +1428,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
/* remove device reference, if this is our bound device */
if (bo->bound && bo->ifindex == dev->ifindex) {
+#if IS_ENABLED(CONFIG_PROC_FS)
+ if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read)
+ remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
+#endif
bo->bound = 0;
bo->ifindex = 0;
notify_enodev = 1;
diff --git a/net/core/sock.c b/net/core/sock.c
index 55d85d50b3e4..bc2a4e38dcea 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2019,14 +2019,6 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
return 0;
}
-int sock_getsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int __user *optlen)
-{
- return sk_getsockopt(sock->sk, level, optname,
- USER_SOCKPTR(optval),
- USER_SOCKPTR(optlen));
-}
-
/*
* Initialize an sk_lock.
*
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index b38b82ae903d..e0b8d6b17a34 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -50,7 +50,7 @@ struct fou_net {
static inline struct fou *fou_from_sock(struct sock *sk)
{
- return sk->sk_user_data;
+ return rcu_dereference_sk_user_data(sk);
}
static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
@@ -233,9 +233,15 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
struct sk_buff *skb)
{
const struct net_offload __rcu **offloads;
- u8 proto = fou_from_sock(sk)->protocol;
+ struct fou *fou = fou_from_sock(sk);
const struct net_offload *ops;
struct sk_buff *pp = NULL;
+ u8 proto;
+
+ if (!fou)
+ goto out;
+
+ proto = fou->protocol;
/* We can clear the encap_mark for FOU as we are essentially doing
* one of two possible things. We are either adding an L4 tunnel
@@ -263,14 +269,24 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
int nhoff)
{
const struct net_offload __rcu **offloads;
- u8 proto = fou_from_sock(sk)->protocol;
+ struct fou *fou = fou_from_sock(sk);
const struct net_offload *ops;
- int err = -ENOSYS;
+ u8 proto;
+ int err;
+
+ if (!fou) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ proto = fou->protocol;
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
- if (WARN_ON(!ops || !ops->callbacks.gro_complete))
+ if (WARN_ON(!ops || !ops->callbacks.gro_complete)) {
+ err = -ENOSYS;
goto out;
+ }
err = ops->callbacks.gro_complete(skb, nhoff);
@@ -320,6 +336,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
struct gro_remcsum grc;
u8 proto;
+ if (!fou)
+ goto out;
+
skb_gro_remcsum_init(&grc);
off = skb_gro_offset(skb);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 53b0d62fd2c2..fe6178715ba0 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -577,7 +577,7 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
err = sk_stream_error(sk, msg->msg_flags, err);
release_sock(sk);
sk_psock_put(sk, psock);
- return copied ? copied : err;
+ return copied > 0 ? copied : err;
}
enum {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 24c7c955dc95..b565c8a8e7ba 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5880,6 +5880,11 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
* RFC 5961 4.2 : Send a challenge ack
*/
if (th->syn) {
+ if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
+ TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
+ TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
+ TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt)
+ goto pass;
syn_challenge:
if (syn_inerr)
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
@@ -5889,6 +5894,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
goto discard;
}
+pass:
bpf_skops_parse_hdr(sk, skb);
return true;
diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h
index ad5f6f6ba333..85b92917849b 100644
--- a/net/ipv6/ila/ila.h
+++ b/net/ipv6/ila/ila.h
@@ -108,6 +108,7 @@ int ila_lwt_init(void);
void ila_lwt_fini(void);
int ila_xlat_init_net(struct net *net);
+void ila_xlat_pre_exit_net(struct net *net);
void ila_xlat_exit_net(struct net *net);
int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c
index 69caed07315f..976c78efbae1 100644
--- a/net/ipv6/ila/ila_main.c
+++ b/net/ipv6/ila/ila_main.c
@@ -71,6 +71,11 @@ static __net_init int ila_init_net(struct net *net)
return err;
}
+static __net_exit void ila_pre_exit_net(struct net *net)
+{
+ ila_xlat_pre_exit_net(net);
+}
+
static __net_exit void ila_exit_net(struct net *net)
{
ila_xlat_exit_net(net);
@@ -78,6 +83,7 @@ static __net_exit void ila_exit_net(struct net *net)
static struct pernet_operations ila_net_ops = {
.init = ila_init_net,
+ .pre_exit = ila_pre_exit_net,
.exit = ila_exit_net,
.id = &ila_net_id,
.size = sizeof(struct ila_net),
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 67e8c9440977..534a4498e280 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -619,6 +619,15 @@ int ila_xlat_init_net(struct net *net)
return 0;
}
+void ila_xlat_pre_exit_net(struct net *net)
+{
+ struct ila_net *ilan = net_generic(net, ila_net_id);
+
+ if (ilan->xlat.hooks_registered)
+ nf_unregister_net_hooks(net, ila_nf_hook_ops,
+ ARRAY_SIZE(ila_nf_hook_ops));
+}
+
void ila_xlat_exit_net(struct net *net)
{
struct ila_net *ilan = net_generic(net, ila_net_id);
@@ -626,10 +635,6 @@ void ila_xlat_exit_net(struct net *net)
rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
free_bucket_spinlocks(ilan->xlat.locks);
-
- if (ilan->xlat.hooks_registered)
- nf_unregister_net_hooks(net, ila_nf_hook_ops,
- ARRAY_SIZE(ila_nf_hook_ops));
}
static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 5d8ed6c90b7e..5885810da412 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -321,7 +321,6 @@ insert_tree(struct net *net,
struct nf_conncount_rb *rbconn;
struct nf_conncount_tuple *conn;
unsigned int count = 0, gc_count = 0;
- u8 keylen = data->keylen;
bool do_gc = true;
spin_lock_bh(&nf_conncount_locks[hash]);
@@ -333,7 +332,7 @@ insert_tree(struct net *net,
rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
parent = *rbnode;
- diff = key_diff(key, rbconn->key, keylen);
+ diff = key_diff(key, rbconn->key, data->keylen);
if (diff < 0) {
rbnode = &((*rbnode)->rb_left);
} else if (diff > 0) {
@@ -378,7 +377,7 @@ insert_tree(struct net *net,
conn->tuple = *tuple;
conn->zone = *zone;
- memcpy(rbconn->key, key, sizeof(u32) * keylen);
+ memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
nf_conncount_list_init(&rbconn->list);
list_add(&conn->node, &rbconn->list.head);
@@ -403,7 +402,6 @@ count_tree(struct net *net,
struct rb_node *parent;
struct nf_conncount_rb *rbconn;
unsigned int hash;
- u8 keylen = data->keylen;
hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
root = &data->root[hash];
@@ -414,7 +412,7 @@ count_tree(struct net *net,
rbconn = rb_entry(parent, struct nf_conncount_rb, node);
- diff = key_diff(key, rbconn->key, keylen);
+ diff = key_diff(key, rbconn->key, data->keylen);
if (diff < 0) {
parent = rcu_dereference_raw(parent->rb_left);
} else if (diff > 0) {
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 9cff99558694..30955dd45779 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -786,12 +786,15 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
* queue, accept the collision, update the host tags.
*/
q->way_collisions++;
- if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
- q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
- q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
- }
allocate_src = cake_dsrc(flow_mode);
allocate_dst = cake_ddst(flow_mode);
+
+ if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+ if (allocate_src)
+ q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
+ if (allocate_dst)
+ q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
+ }
found:
/* reserve queue for future packets in same flow */
reduced_hash = outer_hash + k;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 0224a9249245..d36eeb7b0502 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -742,11 +742,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
err = qdisc_enqueue(skb, q->qdisc, &to_free);
kfree_skb_list(to_free);
- if (err != NET_XMIT_SUCCESS &&
- net_xmit_drop_count(err)) {
- qdisc_qstats_drop(sch);
- qdisc_tree_reduce_backlog(sch, 1,
- pkt_len);
+ if (err != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(err))
+ qdisc_qstats_drop(sch);
+ qdisc_tree_reduce_backlog(sch, 1, pkt_len);
}
goto tfifo_dequeue;
}
diff --git a/net/socket.c b/net/socket.c
index 8d83c4bb163b..9db33cd4a71b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2281,33 +2281,23 @@ static bool sock_use_custom_sol_socket(const struct socket *sock)
return test_bit(SOCK_CUSTOM_SOCKOPT, &sock->flags);
}
-/*
- * Set a socket option. Because we don't know the option lengths we have
- * to pass the user mode parameter for the protocols to sort out.
- */
-int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
- int optlen)
+int do_sock_setsockopt(struct socket *sock, bool compat, int level,
+ int optname, sockptr_t optval, int optlen)
{
- sockptr_t optval = USER_SOCKPTR(user_optval);
const struct proto_ops *ops;
char *kernel_optval = NULL;
- int err, fput_needed;
- struct socket *sock;
+ int err;
if (optlen < 0)
return -EINVAL;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (!sock)
- return err;
-
err = security_socket_setsockopt(sock, level, optname);
if (err)
goto out_put;
- if (!in_compat_syscall())
+ if (!compat)
err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname,
- user_optval, &optlen,
+ optval, &optlen,
&kernel_optval);
if (err < 0)
goto out_put;
@@ -2328,6 +2318,27 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
optlen);
kfree(kernel_optval);
out_put:
+ return err;
+}
+EXPORT_SYMBOL(do_sock_setsockopt);
+
+/* Set a socket option. Because we don't know the option lengths we have
+ * to pass the user mode parameter for the protocols to sort out.
+ */
+int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
+ int optlen)
+{
+ sockptr_t optval = USER_SOCKPTR(user_optval);
+ bool compat = in_compat_syscall();
+ int err, fput_needed;
+ struct socket *sock;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ return err;
+
+ err = do_sock_setsockopt(sock, compat, level, optname, optval, optlen);
+
fput_light(sock->file, fput_needed);
return err;
}
@@ -2341,6 +2352,43 @@ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level,
int optname));
+int do_sock_getsockopt(struct socket *sock, bool compat, int level,
+ int optname, sockptr_t optval, sockptr_t optlen)
+{
+ int max_optlen __maybe_unused = 0;
+ const struct proto_ops *ops;
+ int err;
+
+ err = security_socket_getsockopt(sock, level, optname);
+ if (err)
+ return err;
+
+ if (!compat)
+ copy_from_sockptr(&max_optlen, optlen, sizeof(int));
+
+ ops = READ_ONCE(sock->ops);
+ if (level == SOL_SOCKET) {
+ err = sk_getsockopt(sock->sk, level, optname, optval, optlen);
+ } else if (unlikely(!ops->getsockopt)) {
+ err = -EOPNOTSUPP;
+ } else {
+ if (WARN_ONCE(optval.is_kernel || optlen.is_kernel,
+ "Invalid argument type"))
+ return -EOPNOTSUPP;
+
+ err = ops->getsockopt(sock, level, optname, optval.user,
+ optlen.user);
+ }
+
+ if (!compat)
+ err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname,
+ optval, optlen, max_optlen,
+ err);
+
+ return err;
+}
+EXPORT_SYMBOL(do_sock_getsockopt);
+
/*
* Get a socket option. Because we don't know the option lengths we have
* to pass a user mode parameter for the protocols to sort out.
@@ -2348,36 +2396,18 @@ INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level,
int __sys_getsockopt(int fd, int level, int optname, char __user *optval,
int __user *optlen)
{
- int max_optlen __maybe_unused;
- const struct proto_ops *ops;
int err, fput_needed;
struct socket *sock;
+ bool compat;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
return err;
- err = security_socket_getsockopt(sock, level, optname);
- if (err)
- goto out_put;
-
- if (!in_compat_syscall())
- max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen);
+ compat = in_compat_syscall();
+ err = do_sock_getsockopt(sock, compat, level, optname,
+ USER_SOCKPTR(optval), USER_SOCKPTR(optlen));
- ops = READ_ONCE(sock->ops);
- if (level == SOL_SOCKET)
- err = sock_getsockopt(sock, level, optname, optval, optlen);
- else if (unlikely(!ops->getsockopt))
- err = -EOPNOTSUPP;
- else
- err = ops->getsockopt(sock, level, optname, optval,
- optlen);
-
- if (!in_compat_syscall())
- err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname,
- optval, optlen, max_optlen,
- err);
-out_put:
fput_light(sock->file, fput_needed);
return err;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b7f62442d826..dca4429014db 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -693,9 +693,6 @@ static void init_peercred(struct sock *sk)
static void copy_peercred(struct sock *sk, struct sock *peersk)
{
- const struct cred *old_cred;
- struct pid *old_pid;
-
if (sk < peersk) {
spin_lock(&sk->sk_peer_lock);
spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
@@ -703,16 +700,12 @@ static void copy_peercred(struct sock *sk, struct sock *peersk)
spin_lock(&peersk->sk_peer_lock);
spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
}
- old_pid = sk->sk_peer_pid;
- old_cred = sk->sk_peer_cred;
+
sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
spin_unlock(&sk->sk_peer_lock);
spin_unlock(&peersk->sk_peer_lock);
-
- put_pid(old_pid);
- put_cred(old_cred);
}
static int unix_listen(struct socket *sock, int backlog)
diff --git a/rust/Makefile b/rust/Makefile
index e5619f25b55c..333b9a482473 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -363,9 +363,7 @@ $(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
quiet_cmd_exports = EXPORTS $@
cmd_exports = \
$(NM) -p --defined-only $< \
- | grep -E ' (T|R|D) ' | cut -d ' ' -f 3 \
- | xargs -Isymbol \
- echo 'EXPORT_SYMBOL_RUST_GPL(symbol);' > $@
+ | awk '/ (T|R|D|B) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
$(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(call if_changed,exports)
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index fdb778e65d79..e23e7827d756 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -248,7 +248,7 @@ pub fn ffi_init(init_func: impl FnOnce(*mut T)) -> impl PinInit<Self> {
}
/// Returns a raw pointer to the opaque data.
- pub fn get(&self) -> *mut T {
+ pub const fn get(&self) -> *mut T {
UnsafeCell::get(&self.value).cast::<T>()
}
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index acd0393b5095..7dee348ef0cc 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -203,7 +203,11 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
// freed until the module is unloaded.
#[cfg(MODULE)]
static THIS_MODULE: kernel::ThisModule = unsafe {{
- kernel::ThisModule::from_ptr(&kernel::bindings::__this_module as *const _ as *mut _)
+ extern \"C\" {{
+ static __this_module: kernel::types::Opaque<kernel::bindings::module>;
+ }}
+
+ kernel::ThisModule::from_ptr(__this_module.get())
}};
#[cfg(not(MODULE))]
static THIS_MODULE: kernel::ThisModule = unsafe {{
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 98c2bdbfcaed..4625674f0e95 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -3769,12 +3769,18 @@ static int smack_unix_stream_connect(struct sock *sock,
}
}
- /*
- * Cross reference the peer labels for SO_PEERSEC.
- */
if (rc == 0) {
+ /*
+ * Cross reference the peer labels for SO_PEERSEC.
+ */
nsp->smk_packet = ssp->smk_out;
ssp->smk_packet = osp->smk_out;
+
+ /*
+ * new/child/established socket must inherit listening socket labels
+ */
+ nsp->smk_out = osp->smk_out;
+ nsp->smk_in = osp->smk_in;
}
return rc;
diff --git a/sound/core/control.c b/sound/core/control.c
index 59c8658966d4..dd4bdb39782c 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1553,12 +1553,16 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- int change;
+ int err, change;
struct user_element *ue = kcontrol->private_data;
unsigned int size = ue->elem_data_size;
char *dst = ue->elem_data +
snd_ctl_get_ioff(kcontrol, &ucontrol->id) * size;
+ err = sanity_check_input_values(ue->card, ucontrol, &ue->info, false);
+ if (err < 0)
+ return err;
+
change = memcmp(&ucontrol->value, dst, size) != 0;
if (change)
memcpy(dst, &ucontrol->value, size);
diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
index 5d8e1d944b0a..7b276047f85a 100644
--- a/sound/hda/hdmi_chmap.c
+++ b/sound/hda/hdmi_chmap.c
@@ -753,6 +753,20 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
return 0;
}
+/* a simple sanity check for input values to chmap kcontrol */
+static int chmap_value_check(struct hdac_chmap *hchmap,
+ const struct snd_ctl_elem_value *ucontrol)
+{
+ int i;
+
+ for (i = 0; i < hchmap->channels_max; i++) {
+ if (ucontrol->value.integer.value[i] < 0 ||
+ ucontrol->value.integer.value[i] > SNDRV_CHMAP_LAST)
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -764,6 +778,10 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
unsigned char chmap[8], per_pin_chmap[8];
int i, err, ca, prepared = 0;
+ err = chmap_value_check(hchmap, ucontrol);
+ if (err < 0)
+ return err;
+
/* No monitor is connected in dyn_pcm_assign.
* It's invalid to setup the chmap
*/
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 8396d1d93668..63bd0e384bae 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -311,6 +311,7 @@ enum {
CXT_FIXUP_HEADSET_MIC,
CXT_FIXUP_HP_MIC_NO_PRESENCE,
CXT_PINCFG_SWS_JS201D,
+ CXT_PINCFG_TOP_SPEAKER,
};
/* for hda_fixup_thinkpad_acpi() */
@@ -978,6 +979,13 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_PINS,
.v.pins = cxt_pincfg_sws_js201d,
},
+ [CXT_PINCFG_TOP_SPEAKER] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1d, 0x82170111 },
+ { }
+ },
+ },
};
static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -1074,6 +1082,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
+ SND_PCI_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER),
+ SND_PCI_QUIRK(0x2782, 0x12c5, "Sirius Gen2", CXT_PINCFG_TOP_SPEAKER),
{}
};
@@ -1093,6 +1103,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
{ .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
+ { .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" },
{}
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 5736516275a3..6661fed2c2bb 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7366,6 +7366,7 @@ enum {
ALC236_FIXUP_HP_GPIO_LED,
ALC236_FIXUP_HP_MUTE_LED,
ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+ ALC236_FIXUP_LENOVO_INV_DMIC,
ALC298_FIXUP_SAMSUNG_AMP,
ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
@@ -8922,6 +8923,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc236_fixup_hp_mute_led_micmute_vref,
},
+ [ALC236_FIXUP_LENOVO_INV_DMIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic,
+ .chained = true,
+ .chain_id = ALC283_FIXUP_INT_MIC,
+ },
[ALC298_FIXUP_SAMSUNG_AMP] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc298_fixup_samsung_amp,
@@ -9866,6 +9873,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ SND_PCI_QUIRK(0x103c, 0x87fd, "HP Laptop 14-dq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
@@ -10298,6 +10306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -10546,6 +10555,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
{.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
+ {.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"},
{}
};
#define ALC225_STANDARD_PINS \
diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c
index 41ad82a42916..3639dcd0bbb2 100644
--- a/sound/soc/codecs/tas2781-fmwlib.c
+++ b/sound/soc/codecs/tas2781-fmwlib.c
@@ -21,7 +21,7 @@
#include <sound/soc.h>
#include <sound/tlv.h>
#include <sound/tas2781.h>
-
+#include <asm/unaligned.h>
#define ERROR_PRAM_CRCCHK 0x0000000
#define ERROR_YRAM_CRCCHK 0x0000001
@@ -125,8 +125,7 @@ static struct tasdevice_config_info *tasdevice_add_config(
/* convert data[offset], data[offset + 1], data[offset + 2] and
* data[offset + 3] into host
*/
- cfg_info->nblocks =
- be32_to_cpup((__be32 *)&config_data[config_offset]);
+ cfg_info->nblocks = get_unaligned_be32(&config_data[config_offset]);
config_offset += 4;
/* Several kinds of dsp/algorithm firmwares can run on tas2781,
@@ -170,14 +169,14 @@ static struct tasdevice_config_info *tasdevice_add_config(
}
bk_da[i]->yram_checksum =
- be16_to_cpup((__be16 *)&config_data[config_offset]);
+ get_unaligned_be16(&config_data[config_offset]);
config_offset += 2;
bk_da[i]->block_size =
- be32_to_cpup((__be32 *)&config_data[config_offset]);
+ get_unaligned_be32(&config_data[config_offset]);
config_offset += 4;
bk_da[i]->n_subblks =
- be32_to_cpup((__be32 *)&config_data[config_offset]);
+ get_unaligned_be32(&config_data[config_offset]);
config_offset += 4;
@@ -227,7 +226,7 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
}
buf = (unsigned char *)fmw->data;
- fw_hdr->img_sz = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_hdr->img_sz = get_unaligned_be32(&buf[offset]);
offset += 4;
if (fw_hdr->img_sz != fmw->size) {
dev_err(tas_priv->dev,
@@ -238,9 +237,9 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
goto out;
}
- fw_hdr->checksum = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_hdr->checksum = get_unaligned_be32(&buf[offset]);
offset += 4;
- fw_hdr->binary_version_num = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_hdr->binary_version_num = get_unaligned_be32(&buf[offset]);
if (fw_hdr->binary_version_num < 0x103) {
dev_err(tas_priv->dev, "File version 0x%04x is too low",
fw_hdr->binary_version_num);
@@ -249,7 +248,7 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
goto out;
}
offset += 4;
- fw_hdr->drv_fw_version = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_hdr->drv_fw_version = get_unaligned_be32(&buf[offset]);
offset += 8;
fw_hdr->plat_type = buf[offset];
offset += 1;
@@ -277,11 +276,11 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
for (i = 0; i < TASDEVICE_DEVICE_SUM; i++, offset++)
fw_hdr->devs[i] = buf[offset];
- fw_hdr->nconfig = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_hdr->nconfig = get_unaligned_be32(&buf[offset]);
offset += 4;
for (i = 0; i < TASDEVICE_CONFIG_SUM; i++) {
- fw_hdr->config_size[i] = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_hdr->config_size[i] = get_unaligned_be32(&buf[offset]);
offset += 4;
total_config_sz += fw_hdr->config_size[i];
}
@@ -330,7 +329,7 @@ static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw,
/* convert data[offset], data[offset + 1], data[offset + 2] and
* data[offset + 3] into host
*/
- block->type = be32_to_cpup((__be32 *)&data[offset]);
+ block->type = get_unaligned_be32(&data[offset]);
offset += 4;
block->is_pchksum_present = data[offset];
@@ -345,10 +344,10 @@ static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw,
block->ychksum = data[offset];
offset++;
- block->blk_size = be32_to_cpup((__be32 *)&data[offset]);
+ block->blk_size = get_unaligned_be32(&data[offset]);
offset += 4;
- block->nr_subblocks = be32_to_cpup((__be32 *)&data[offset]);
+ block->nr_subblocks = get_unaligned_be32(&data[offset]);
offset += 4;
if (offset + block->blk_size > fmw->size) {
@@ -381,7 +380,7 @@ static int fw_parse_data_kernel(struct tasdevice_fw *tas_fmw,
offset = -EINVAL;
goto out;
}
- img_data->nr_blk = be32_to_cpup((__be32 *)&data[offset]);
+ img_data->nr_blk = get_unaligned_be32(&data[offset]);
offset += 4;
img_data->dev_blks = kcalloc(img_data->nr_blk,
@@ -477,14 +476,14 @@ static int fw_parse_variable_header_kernel(
offset = -EINVAL;
goto out;
}
- fw_hdr->device_family = be16_to_cpup((__be16 *)&buf[offset]);
+ fw_hdr->device_family = get_unaligned_be16(&buf[offset]);
if (fw_hdr->device_family != 0) {
dev_err(tas_priv->dev, "%s:not TAS device\n", __func__);
offset = -EINVAL;
goto out;
}
offset += 2;
- fw_hdr->device = be16_to_cpup((__be16 *)&buf[offset]);
+ fw_hdr->device = get_unaligned_be16(&buf[offset]);
if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE ||
fw_hdr->device == 6) {
dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device);
@@ -502,7 +501,7 @@ static int fw_parse_variable_header_kernel(
goto out;
}
- tas_fmw->nr_programs = be32_to_cpup((__be32 *)&buf[offset]);
+ tas_fmw->nr_programs = get_unaligned_be32(&buf[offset]);
offset += 4;
if (tas_fmw->nr_programs == 0 || tas_fmw->nr_programs >
@@ -521,14 +520,14 @@ static int fw_parse_variable_header_kernel(
for (i = 0; i < tas_fmw->nr_programs; i++) {
program = &(tas_fmw->programs[i]);
- program->prog_size = be32_to_cpup((__be32 *)&buf[offset]);
+ program->prog_size = get_unaligned_be32(&buf[offset]);
offset += 4;
}
/* Skip the unused prog_size */
offset += 4 * (TASDEVICE_MAXPROGRAM_NUM_KERNEL - tas_fmw->nr_programs);
- tas_fmw->nr_configurations = be32_to_cpup((__be32 *)&buf[offset]);
+ tas_fmw->nr_configurations = get_unaligned_be32(&buf[offset]);
offset += 4;
/* The max number of config in firmware greater than 4 pieces of
@@ -560,7 +559,7 @@ static int fw_parse_variable_header_kernel(
for (i = 0; i < tas_fmw->nr_programs; i++) {
config = &(tas_fmw->configs[i]);
- config->cfg_size = be32_to_cpup((__be32 *)&buf[offset]);
+ config->cfg_size = get_unaligned_be32(&buf[offset]);
offset += 4;
}
@@ -598,7 +597,7 @@ static int tasdevice_process_block(void *context, unsigned char *data,
switch (subblk_typ) {
case TASDEVICE_CMD_SING_W: {
int i;
- unsigned short len = be16_to_cpup((__be16 *)&data[2]);
+ unsigned short len = get_unaligned_be16(&data[2]);
subblk_offset += 2;
if (subblk_offset + 4 * len > sublocksize) {
@@ -624,7 +623,7 @@ static int tasdevice_process_block(void *context, unsigned char *data,
}
break;
case TASDEVICE_CMD_BURST: {
- unsigned short len = be16_to_cpup((__be16 *)&data[2]);
+ unsigned short len = get_unaligned_be16(&data[2]);
subblk_offset += 2;
if (subblk_offset + 4 + len > sublocksize) {
@@ -665,7 +664,7 @@ static int tasdevice_process_block(void *context, unsigned char *data,
is_err = true;
break;
}
- sleep_time = be16_to_cpup((__be16 *)&data[2]) * 1000;
+ sleep_time = get_unaligned_be16(&data[2]) * 1000;
usleep_range(sleep_time, sleep_time + 50);
subblk_offset += 2;
}
@@ -940,7 +939,7 @@ static int fw_parse_variable_hdr(struct tasdevice_priv
offset += len;
- fw_hdr->device_family = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_hdr->device_family = get_unaligned_be32(&buf[offset]);
if (fw_hdr->device_family != 0) {
dev_err(tas_priv->dev, "%s: not TAS device\n", __func__);
offset = -EINVAL;
@@ -948,7 +947,7 @@ static int fw_parse_variable_hdr(struct tasdevice_priv
}
offset += 4;
- fw_hdr->device = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_hdr->device = get_unaligned_be32(&buf[offset]);
if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE ||
fw_hdr->device == 6) {
dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device);
@@ -993,7 +992,7 @@ static int fw_parse_block_data(struct tasdevice_fw *tas_fmw,
offset = -EINVAL;
goto out;
}
- block->type = be32_to_cpup((__be32 *)&data[offset]);
+ block->type = get_unaligned_be32(&data[offset]);
offset += 4;
if (tas_fmw->fw_hdr.fixed_hdr.drv_ver >= PPC_DRIVER_CRCCHK) {
@@ -1018,7 +1017,7 @@ static int fw_parse_block_data(struct tasdevice_fw *tas_fmw,
block->is_ychksum_present = 0;
}
- block->nr_cmds = be32_to_cpup((__be32 *)&data[offset]);
+ block->nr_cmds = get_unaligned_be32(&data[offset]);
offset += 4;
n = block->nr_cmds * 4;
@@ -1069,7 +1068,7 @@ static int fw_parse_data(struct tasdevice_fw *tas_fmw,
goto out;
}
offset += n;
- img_data->nr_blk = be16_to_cpup((__be16 *)&data[offset]);
+ img_data->nr_blk = get_unaligned_be16(&data[offset]);
offset += 2;
img_data->dev_blks = kcalloc(img_data->nr_blk,
@@ -1106,7 +1105,7 @@ static int fw_parse_program_data(struct tasdevice_priv *tas_priv,
offset = -EINVAL;
goto out;
}
- tas_fmw->nr_programs = be16_to_cpup((__be16 *)&buf[offset]);
+ tas_fmw->nr_programs = get_unaligned_be16(&buf[offset]);
offset += 2;
if (tas_fmw->nr_programs == 0) {
@@ -1173,7 +1172,7 @@ static int fw_parse_configuration_data(
offset = -EINVAL;
goto out;
}
- tas_fmw->nr_configurations = be16_to_cpup((__be16 *)&data[offset]);
+ tas_fmw->nr_configurations = get_unaligned_be16(&data[offset]);
offset += 2;
if (tas_fmw->nr_configurations == 0) {
@@ -1805,7 +1804,7 @@ static int fw_parse_header(struct tasdevice_priv *tas_priv,
/* Convert data[offset], data[offset + 1], data[offset + 2] and
* data[offset + 3] into host
*/
- fw_fixed_hdr->fwsize = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_fixed_hdr->fwsize = get_unaligned_be32(&buf[offset]);
offset += 4;
if (fw_fixed_hdr->fwsize != fmw->size) {
dev_err(tas_priv->dev, "File size not match, %lu %u",
@@ -1814,9 +1813,9 @@ static int fw_parse_header(struct tasdevice_priv *tas_priv,
goto out;
}
offset += 4;
- fw_fixed_hdr->ppcver = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_fixed_hdr->ppcver = get_unaligned_be32(&buf[offset]);
offset += 8;
- fw_fixed_hdr->drv_ver = be32_to_cpup((__be32 *)&buf[offset]);
+ fw_fixed_hdr->drv_ver = get_unaligned_be32(&buf[offset]);
offset += 72;
out:
@@ -1858,7 +1857,7 @@ static int fw_parse_calibration_data(struct tasdevice_priv *tas_priv,
offset = -EINVAL;
goto out;
}
- tas_fmw->nr_calibrations = be16_to_cpup((__be16 *)&data[offset]);
+ tas_fmw->nr_calibrations = get_unaligned_be16(&data[offset]);
offset += 2;
if (tas_fmw->nr_calibrations != 1) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 85e3bbf7e5f0..7729f8f4d5e6 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -4018,6 +4018,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMD:
kfree(substream->runtime);
+ substream->runtime = NULL;
break;
default:
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 7e8fca0b0662..a643ef654b9d 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -851,6 +851,8 @@ static int soc_tplg_denum_create_values(struct soc_tplg *tplg, struct soc_enum *
se->dobj.control.dvalues[i] = le32_to_cpu(ec->values[i]);
}
+ se->items = le32_to_cpu(ec->items);
+ se->values = (const unsigned int *)se->dobj.control.dvalues;
return 0;
}
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index 7133ec13322b..cf1e63daad86 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -2040,6 +2040,8 @@ static int sof_link_unload(struct snd_soc_component *scomp, struct snd_soc_dobj
if (!slink)
return 0;
+ slink->link->platforms->name = NULL;
+
kfree(slink->tuples);
list_del(&slink->list);
kfree(slink->hw_configs);
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 5124b6c9ceb4..d1cb49d54f00 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -100,8 +100,8 @@
#define SUN8I_I2S_CTRL_MODE_PCM (0 << 4)
#define SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK BIT(19)
-#define SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED (1 << 19)
-#define SUN8I_I2S_FMT0_LRCLK_POLARITY_NORMAL (0 << 19)
+#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH (1 << 19)
+#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW (0 << 19)
#define SUN8I_I2S_FMT0_LRCK_PERIOD_MASK GENMASK(17, 8)
#define SUN8I_I2S_FMT0_LRCK_PERIOD(period) ((period - 1) << 8)
#define SUN8I_I2S_FMT0_BCLK_POLARITY_MASK BIT(7)
@@ -727,65 +727,37 @@ static int sun4i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
unsigned int fmt)
{
- u32 mode, val;
+ u32 mode, lrclk_pol, bclk_pol, val;
u8 offset;
- /*
- * DAI clock polarity
- *
- * The setup for LRCK contradicts the datasheet, but under a
- * scope it's clear that the LRCK polarity is reversed
- * compared to the expected polarity on the bus.
- */
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_IB_IF:
- /* Invert both clocks */
- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
- break;
- case SND_SOC_DAIFMT_IB_NF:
- /* Invert bit clock */
- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED |
- SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
- break;
- case SND_SOC_DAIFMT_NB_IF:
- /* Invert frame clock */
- val = 0;
- break;
- case SND_SOC_DAIFMT_NB_NF:
- val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
- break;
- default:
- return -EINVAL;
- }
-
- regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
- SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
- SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
- val);
-
/* DAI Mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_PCM;
offset = 1;
break;
case SND_SOC_DAIFMT_DSP_B:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_PCM;
offset = 0;
break;
case SND_SOC_DAIFMT_I2S:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW;
mode = SUN8I_I2S_CTRL_MODE_LEFT;
offset = 1;
break;
case SND_SOC_DAIFMT_LEFT_J:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_LEFT;
offset = 0;
break;
case SND_SOC_DAIFMT_RIGHT_J:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_RIGHT;
offset = 0;
break;
@@ -803,6 +775,35 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
SUN8I_I2S_TX_CHAN_OFFSET_MASK,
SUN8I_I2S_TX_CHAN_OFFSET(offset));
+ /* DAI clock polarity */
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_IF:
+ /* Invert both clocks */
+ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ /* Invert bit clock */
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ /* Invert frame clock */
+ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
+ /* No inversion */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+ SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
+ SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
+ lrclk_pol | bclk_pol);
+
/* DAI clock master masks */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
@@ -834,65 +835,37 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
unsigned int fmt)
{
- u32 mode, val;
+ u32 mode, lrclk_pol, bclk_pol, val;
u8 offset;
- /*
- * DAI clock polarity
- *
- * The setup for LRCK contradicts the datasheet, but under a
- * scope it's clear that the LRCK polarity is reversed
- * compared to the expected polarity on the bus.
- */
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_IB_IF:
- /* Invert both clocks */
- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
- break;
- case SND_SOC_DAIFMT_IB_NF:
- /* Invert bit clock */
- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED |
- SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
- break;
- case SND_SOC_DAIFMT_NB_IF:
- /* Invert frame clock */
- val = 0;
- break;
- case SND_SOC_DAIFMT_NB_NF:
- val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
- break;
- default:
- return -EINVAL;
- }
-
- regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
- SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
- SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
- val);
-
/* DAI Mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_PCM;
offset = 1;
break;
case SND_SOC_DAIFMT_DSP_B:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_PCM;
offset = 0;
break;
case SND_SOC_DAIFMT_I2S:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW;
mode = SUN8I_I2S_CTRL_MODE_LEFT;
offset = 1;
break;
case SND_SOC_DAIFMT_LEFT_J:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_LEFT;
offset = 0;
break;
case SND_SOC_DAIFMT_RIGHT_J:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_RIGHT;
offset = 0;
break;
@@ -910,6 +883,36 @@ static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET_MASK,
SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET(offset));
+ /* DAI clock polarity */
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_IF:
+ /* Invert both clocks */
+ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ /* Invert bit clock */
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ /* Invert frame clock */
+ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
+ /* No inversion */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+ SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
+ SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
+ lrclk_pol | bclk_pol);
+
+
/* DAI clock master masks */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c
index 3f114a2adfce..ab3c6b2544d2 100644
--- a/sound/soc/tegra/tegra210_ahub.c
+++ b/sound/soc/tegra/tegra210_ahub.c
@@ -2,7 +2,7 @@
//
// tegra210_ahub.c - Tegra210 AHUB driver
//
-// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
+// Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#include <linux/clk.h>
#include <linux/device.h>
@@ -1391,11 +1391,13 @@ static int tegra_ahub_probe(struct platform_device *pdev)
return err;
}
+ pm_runtime_enable(&pdev->dev);
+
err = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
- if (err)
+ if (err) {
+ pm_runtime_disable(&pdev->dev);
return err;
-
- pm_runtime_enable(&pdev->dev);
+ }
return 0;
}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index de35b9a21dad..ceed16a10285 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -9753,7 +9753,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
struct bpf_map *
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
{
- if (prev == NULL)
+ if (prev == NULL && obj != NULL)
return obj->maps;
return __bpf_map__iter(prev, obj, 1);
@@ -9762,7 +9762,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
struct bpf_map *
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
{
- if (next == NULL) {
+ if (next == NULL && obj != NULL) {
if (!obj->nr_maps)
return NULL;
return obj->maps + obj->nr_maps - 1;
diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
index 890a8236a8ba..2809f9a25c43 100644
--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
@@ -28,9 +28,11 @@ static int check_vgem(int fd)
version.name = name;
ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
- if (ret)
+ if (ret || version.name_len != 4)
return 0;
+ name[4] = '\0';
+
return !strcmp(name, "vgem");
}
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index d417de105123..91a48efb140b 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -85,7 +85,8 @@ TEST_GEN_FILES += csum
TEST_GEN_FILES += nat6to4.o
TEST_GEN_FILES += xdp_dummy.o
TEST_GEN_FILES += ip_local_port_range
-TEST_GEN_FILES += bind_wildcard
+TEST_GEN_PROGS += bind_wildcard
+TEST_GEN_PROGS += bind_timewait
TEST_PROGS += test_vxlan_mdb.sh
TEST_PROGS += test_bridge_neigh_suppress.sh
TEST_PROGS += test_vxlan_nolocalbypass.sh
Powered by blists - more mailing lists