[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <2023051722-lash-municipal-df95@gregkh>
Date: Wed, 17 May 2023 14:00:23 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org, akpm@...ux-foundation.org,
torvalds@...ux-foundation.org, stable@...r.kernel.org
Cc: lwn@....net, jslaby@...e.cz,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Subject: Re: Linux 5.4.243
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index 15e11f27b4c8..b89a2f6c9155 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -7794,3 +7794,30 @@ formats.
- 0x5001
- Interleaved raw UYVY and JPEG image format with embedded meta-data
used by Samsung S3C73MX camera sensors.
+
+.. _v4l2-mbus-metadata-fmts:
+
+Metadata Formats
+^^^^^^^^^^^^^^^^
+
+This section lists all metadata formats.
+
+The following table lists the existing metadata formats.
+
+.. tabularcolumns:: |p{8.0cm}|p{1.4cm}|p{7.7cm}|
+
+.. flat-table:: Metadata formats
+ :header-rows: 1
+ :stub-columns: 0
+
+ * - Identifier
+ - Code
+ - Comments
+ * .. _MEDIA-BUS-FMT-METADATA-FIXED:
+
+ - MEDIA_BUS_FMT_METADATA_FIXED
+ - 0x7001
+ - This format should be used when the same driver handles
+ both sides of the link and the bus format is a fixed
+ metadata format that is not configurable from userspace.
+ Width and height will be set to 0 for this format.
diff --git a/Makefile b/Makefile
index ab3ba4358842..f660c3e224ec 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
-SUBLEVEL = 242
+SUBLEVEL = 243
EXTRAVERSION =
NAME = Kleptomaniac Octopus
diff --git a/arch/arm/boot/dts/exynos4412-itop-elite.dts b/arch/arm/boot/dts/exynos4412-itop-elite.dts
index f6d0a5f5d339..9a2a49420d4d 100644
--- a/arch/arm/boot/dts/exynos4412-itop-elite.dts
+++ b/arch/arm/boot/dts/exynos4412-itop-elite.dts
@@ -179,7 +179,7 @@
compatible = "wlf,wm8960";
reg = <0x1a>;
clocks = <&pmu_system_controller 0>;
- clock-names = "MCLK1";
+ clock-names = "mclk";
wlf,shared-lrclk;
#sound-dai-cells = <0>;
};
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 338256c59ca5..fc31ee980639 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -393,8 +393,8 @@
#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
- <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
+ ranges = <0x81000000 0x0 0x00000000 0x40200000 0x0 0x00100000>,
+ <0x82000000 0x0 0x40300000 0x40300000 0x0 0x00d00000>;
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 16c0da97932c..976a8a1fa953 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -451,8 +451,8 @@
#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x81000000 0 0x0fe00000 0x0fe00000 0 0x00100000 /* downstream I/O */
- 0x82000000 0 0x08000000 0x08000000 0 0x07e00000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x0fe00000 0x0 0x00010000 /* I/O */
+ 0x82000000 0x0 0x08000000 0x08000000 0x0 0x07e00000>; /* MEM */
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -502,8 +502,8 @@
#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x81000000 0 0x31e00000 0x31e00000 0 0x00100000 /* downstream I/O */
- 0x82000000 0 0x2e000000 0x2e000000 0 0x03e00000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x31e00000 0x0 0x00010000 /* I/O */
+ 0x82000000 0x0 0x2e000000 0x2e000000 0x0 0x03e00000>; /* MEM */
interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -553,8 +553,8 @@
#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x81000000 0 0x35e00000 0x35e00000 0 0x00100000 /* downstream I/O */
- 0x82000000 0 0x32000000 0x32000000 0 0x03e00000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x35e00000 0x0 0x00010000 /* I/O */
+ 0x82000000 0x0 0x32000000 0x32000000 0x0 0x03e00000>; /* MEM */
interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
index 61822afa30ab..cd8f6e7a7f7f 100644
--- a/arch/arm/boot/dts/s5pv210.dtsi
+++ b/arch/arm/boot/dts/s5pv210.dtsi
@@ -571,7 +571,7 @@
interrupts = <29>;
clocks = <&clocks CLK_CSIS>,
<&clocks SCLK_CSIS>;
- clock-names = "clk_csis",
+ clock-names = "csis",
"sclk_csis";
bus-width = <4>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 73ded80a79ba..1de7891c658c 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -49,17 +49,14 @@
opp-shared;
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
opp-suspend;
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index dabee157119f..82efc8a3627d 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -60,17 +60,14 @@
opp-shared;
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
opp-suspend;
};
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 0253691c4b3a..bd451732a3af 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -116,6 +116,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs,
void kernel_enable_single_step(struct pt_regs *regs);
void kernel_disable_single_step(void);
int kernel_active_single_step(void);
+void kernel_rewind_single_step(struct pt_regs *regs);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int reinstall_suspended_bps(struct pt_regs *regs);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index d64a3c1e1b6b..62ab9d4f995e 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -441,6 +441,11 @@ int kernel_active_single_step(void)
}
NOKPROBE_SYMBOL(kernel_active_single_step);
+void kernel_rewind_single_step(struct pt_regs *regs)
+{
+ set_regs_spsr_ss(regs);
+}
+
/* ptrace API */
void user_enable_single_step(struct task_struct *task)
{
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 1a157ca33262..e4e95821b1f6 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -223,6 +223,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
*/
if (!kernel_active_single_step())
kernel_enable_single_step(linux_regs);
+ else
+ kernel_rewind_single_step(linux_regs);
err = 0;
break;
default:
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index b392c0a50346..0ec8b55b28ac 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -581,7 +581,7 @@ static int salinfo_cpu_pre_down(unsigned int cpu)
* 'data' contains an integer that corresponds to the feature we're
* testing
*/
-static int proc_salinfo_show(struct seq_file *m, void *v)
+static int __maybe_unused proc_salinfo_show(struct seq_file *m, void *v)
{
unsigned long data = (unsigned long)v;
seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n");
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 5b00dc3898e1..f2b40bcd2495 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -81,7 +81,7 @@ void *per_cpu_init(void)
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}
-static inline void
+static inline __init void
alloc_per_cpu_data(void)
{
size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
index 6ecda64ad184..ed88abc40513 100644
--- a/arch/mips/fw/lib/cmdline.c
+++ b/arch/mips/fw/lib/cmdline.c
@@ -51,7 +51,7 @@ char *fw_getenv(char *envname)
{
char *result = NULL;
- if (_fw_envp != NULL) {
+ if (_fw_envp != NULL && fw_envp(0) != NULL) {
/*
* Return a pointer to the given environment variable.
* YAMON uses "name", "value" pairs, while U-Boot uses
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index 6b27cf4a0d78..8db433947d39 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -173,7 +173,6 @@ handler: ;\
l.sw PT_GPR28(r1),r28 ;\
l.sw PT_GPR29(r1),r29 ;\
/* r30 already save */ ;\
-/* l.sw PT_GPR30(r1),r30*/ ;\
l.sw PT_GPR31(r1),r31 ;\
TRACE_IRQS_OFF_ENTRY ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
@@ -211,9 +210,8 @@ handler: ;\
l.sw PT_GPR27(r1),r27 ;\
l.sw PT_GPR28(r1),r28 ;\
l.sw PT_GPR29(r1),r29 ;\
- /* r31 already saved */ ;\
- l.sw PT_GPR30(r1),r30 ;\
-/* l.sw PT_GPR31(r1),r31 */ ;\
+ /* r30 already saved */ ;\
+ l.sw PT_GPR31(r1),r31 ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
l.addi r30,r0,-1 ;\
l.sw PT_ORIG_GPR11(r1),r30 ;\
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 2b16d8d6598f..c37010a13586 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -248,9 +248,6 @@ ENTRY_CFI(real64_call_asm)
/* save fn */
copy %arg2, %r31
- /* set up the new ap */
- ldo 64(%arg1), %r29
-
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
@@ -262,7 +259,9 @@ ENTRY_CFI(real64_call_asm)
ldd 7*REG_SZ(%arg1), %r19
ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
+ /* set up real-mode stack and real-mode ap */
tophys_r1 %sp
+ ldo -16(%sp), %r29 /* Reference param save area */
b,l rfi_virt2real,%r2
nop
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index ee810df7d522..0793579f2bb9 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -398,7 +398,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
}
if (buf)
- memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
+ memmove(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
}
return buf;
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index 30342b60aa63..42c3d40355d9 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -984,7 +984,7 @@ static void mpc5121_clk_provide_migration_support(void)
#define NODE_PREP do { \
of_address_to_resource(np, 0, &res); \
- snprintf(devname, sizeof(devname), "%08x.%s", res.start, np->name); \
+ snprintf(devname, sizeof(devname), "%pa.%s", &res.start, np->name); \
} while (0)
#define NODE_CHK(clkname, clkitem, regnode, regflag) do { \
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index d39a9213a3e6..7dd2e2f97aae 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -144,7 +144,7 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np)
}
io_base = ioremap(res.start, resource_size(&res));
- pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
+ pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);
__flipper_quiesce(io_base);
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index de10c13de15c..c6b492ebb766 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -173,7 +173,7 @@ static struct irq_domain *hlwd_pic_init(struct device_node *np)
return NULL;
}
- pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
+ pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);
__hlwd_quiesce(io_base);
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 67e48b0a164e..c691453dfd3f 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -89,8 +89,8 @@ static void __iomem *wii_ioremap_hw_regs(char *name, char *compatible)
hw_regs = ioremap(res.start, resource_size(&res));
if (hw_regs) {
- pr_info("%s at 0x%08x mapped to 0x%p\n", name,
- res.start, hw_regs);
+ pr_info("%s at 0x%pa mapped to 0x%p\n", name,
+ &res.start, hw_regs);
}
out_put:
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 49f9541954f8..3664ffcbb313 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -216,9 +216,8 @@ int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary)
(hose)->ops = &tsi108_direct_pci_ops;
- printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. "
- "Firmware bus number: %d->%d\n",
- rsrc.start, hose->first_busno, hose->last_busno);
+ pr_info("Found tsi108 PCI host bridge at 0x%pa. Firmware bus number: %d->%d\n",
+ &rsrc.start, hose->first_busno, hose->last_busno);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 934ff84844fa..c633fe4d7039 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -380,7 +380,7 @@ static int __init sq_api_init(void)
if (unlikely(!sq_cache))
return ret;
- sq_bitmap = kzalloc(size, GFP_KERNEL);
+ sq_bitmap = kcalloc(size, sizeof(long), GFP_KERNEL);
if (unlikely(!sq_bitmap))
goto out;
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 4adbd4ade319..b603b7968b38 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -64,7 +64,7 @@ ENTRY(_stext)
ldc r0, r6_bank
#endif
-#ifdef CONFIG_OF_FLATTREE
+#ifdef CONFIG_OF_EARLY_FLATTREE
mov r4, r12 ! Store device tree blob pointer in r12
#endif
@@ -315,7 +315,7 @@ ENTRY(_stext)
10:
#endif
-#ifdef CONFIG_OF_FLATTREE
+#ifdef CONFIG_OF_EARLY_FLATTREE
mov.l 8f, r0 ! Make flat device tree available early.
jsr @r0
mov r12, r4
@@ -346,7 +346,7 @@ ENTRY(stack_start)
5: .long start_kernel
6: .long cpu_init
7: .long init_thread_union
-#if defined(CONFIG_OF_FLATTREE)
+#if defined(CONFIG_OF_EARLY_FLATTREE)
8: .long sh_fdt_init
#endif
diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c
index 11777867c6f5..a212b645b4cf 100644
--- a/arch/sh/kernel/nmi_debug.c
+++ b/arch/sh/kernel/nmi_debug.c
@@ -49,7 +49,7 @@ static int __init nmi_debug_setup(char *str)
register_die_notifier(&nmi_debug_nb);
if (*str != '=')
- return 0;
+ return 1;
for (p = str + 1; *p; p = sep + 1) {
sep = strchr(p, ',');
@@ -70,6 +70,6 @@ static int __init nmi_debug_setup(char *str)
break;
}
- return 0;
+ return 1;
}
__setup("nmi_debug", nmi_debug_setup);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 2c0e0f37a318..c25ee383cb83 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -243,7 +243,7 @@ void __init __weak plat_early_device_setup(void)
{
}
-#ifdef CONFIG_OF_FLATTREE
+#ifdef CONFIG_OF_EARLY_FLATTREE
void __ref sh_fdt_init(phys_addr_t dt_phys)
{
static int done = 0;
@@ -330,7 +330,7 @@ void __init setup_arch(char **cmdline_p)
/* Let earlyprintk output early console messages */
early_platform_driver_probe("earlyprintk", 1, 1);
-#ifdef CONFIG_OF_FLATTREE
+#ifdef CONFIG_OF_EARLY_FLATTREE
#ifdef CONFIG_USE_BUILTIN_DTB
unflatten_and_copy_device_tree();
#else
diff --git a/arch/sh/math-emu/sfp-util.h b/arch/sh/math-emu/sfp-util.h
index 784f541344f3..bda50762b3d3 100644
--- a/arch/sh/math-emu/sfp-util.h
+++ b/arch/sh/math-emu/sfp-util.h
@@ -67,7 +67,3 @@
} while (0)
#define abort() return 0
-
-#define __BYTE_ORDER __LITTLE_ENDIAN
-
-
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 68c734032523..a3b7b2fb04cb 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -410,10 +410,9 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
if (vector && !eilvt_entry_is_changeable(vector, new))
/* may not change if vectors are different */
return rsvd;
- rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
- } while (rsvd != new);
+ } while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new));
- rsvd &= ~APIC_EILVT_MASKED;
+ rsvd = new & ~APIC_EILVT_MASKED;
if (rsvd && rsvd != vector)
pr_info("LVT offset %d assigned for vector 0x%02x\n",
offset, rsvd);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 1622cff009c9..e4d63392c619 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2455,17 +2455,21 @@ static int io_apic_get_redir_entries(int ioapic)
unsigned int arch_dynirq_lower_bound(unsigned int from)
{
+ unsigned int ret;
+
/*
* dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
* gsi_top if ioapic_dynirq_base hasn't been initialized yet.
*/
- if (!ioapic_initialized)
- return gsi_top;
+ ret = ioapic_dynirq_base ? : gsi_top;
+
/*
- * For DT enabled machines ioapic_dynirq_base is irrelevant and not
- * updated. So simply return @from if ioapic_dynirq_base == 0.
+ * For DT enabled machines ioapic_dynirq_base is irrelevant and
+ * always 0. gsi_top can be 0 if there is no IO/APIC registered.
+ * 0 is an invalid interrupt number for dynamic allocations. Return
+ * @from instead.
*/
- return ioapic_dynirq_base ? : from;
+ return ret ? : from;
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index e6dd6a7e8689..9bd08d264603 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7266,6 +7266,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
break;
+ case x86_intercept_pause:
+ /*
+ * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
+ * with vanilla NOPs in the emulator. Apply the interception
+ * check only to actual PAUSE instructions. Don't check
+ * PAUSE-loop-exiting, software can't expect a given PAUSE to
+ * exit, i.e. KVM is within its rights to allow L2 to execute
+ * the PAUSE.
+ */
+ if ((info->rep_prefix != REPE_PREFIX) ||
+ !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING))
+ return X86EMUL_CONTINUE;
+
+ break;
+
/* TODO: check more intercepts... */
default:
break;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 9329d9dcc210..df80752fb649 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1515,6 +1515,14 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
return 0;
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
+ if (IS_ERR(drbg->jent)) {
+ const int err = PTR_ERR(drbg->jent);
+
+ drbg->jent = NULL;
+ if (fips_enabled)
+ return err;
+ pr_info("DRBG: Continuing without Jitter RNG\n");
+ }
return 0;
}
@@ -1570,14 +1578,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
if (ret)
goto free_everything;
- if (IS_ERR(drbg->jent)) {
- ret = PTR_ERR(drbg->jent);
- drbg->jent = NULL;
- if (fips_enabled || ret != -ENOENT)
- goto free_everything;
- pr_info("DRBG: Continuing without Jitter RNG\n");
- }
-
reseed = false;
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 7000c836951c..980e9a76e172 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -491,7 +491,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
bool cpu_is_hotpluggable(unsigned cpu)
{
struct device *dev = get_cpu_device(cpu);
- return dev && container_of(dev, struct cpu, dev)->hotpluggable;
+ return dev && container_of(dev, struct cpu, dev)->hotpluggable
+ && tick_nohz_cpu_hotpluggable(cpu);
}
EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 4f63a8bc7260..edb791354421 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -263,18 +263,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
/*
* Traverse all sub-domains within the domain. This can be
* done without any additional locking as the link->performance_state
- * field is protected by the master genpd->lock, which is already taken.
+ * field is protected by the parent genpd->lock, which is already taken.
*
* Also note that link->performance_state (subdomain's performance state
- * requirement to master domain) is different from
- * link->slave->performance_state (current performance state requirement
+ * requirement to parent domain) is different from
+ * link->child->performance_state (current performance state requirement
* of the devices/sub-domains of the subdomain) and so can have a
* different value.
*
* Note that we also take vote from powered-off sub-domains into account
* as the same is done for devices right now.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
if (link->performance_state > state)
state = link->performance_state;
}
@@ -285,40 +285,40 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth)
{
- struct generic_pm_domain *master;
+ struct generic_pm_domain *parent;
struct gpd_link *link;
- int master_state, ret;
+ int parent_state, ret;
if (state == genpd->performance_state)
return 0;
- /* Propagate to masters of genpd */
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- master = link->master;
+ /* Propagate to parents of genpd */
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ parent = link->parent;
- if (!master->set_performance_state)
+ if (!parent->set_performance_state)
continue;
- /* Find master's performance state */
+ /* Find parent's performance state */
ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
- master->opp_table,
+ parent->opp_table,
state);
if (unlikely(ret < 0))
goto err;
- master_state = ret;
+ parent_state = ret;
- genpd_lock_nested(master, depth + 1);
+ genpd_lock_nested(parent, depth + 1);
link->prev_performance_state = link->performance_state;
- link->performance_state = master_state;
- master_state = _genpd_reeval_performance_state(master,
- master_state);
- ret = _genpd_set_performance_state(master, master_state, depth + 1);
+ link->performance_state = parent_state;
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
if (ret)
link->performance_state = link->prev_performance_state;
- genpd_unlock(master);
+ genpd_unlock(parent);
if (ret)
goto err;
@@ -333,26 +333,26 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
err:
/* Encountered an error, lets rollback */
- list_for_each_entry_continue_reverse(link, &genpd->slave_links,
- slave_node) {
- master = link->master;
+ list_for_each_entry_continue_reverse(link, &genpd->child_links,
+ child_node) {
+ parent = link->parent;
- if (!master->set_performance_state)
+ if (!parent->set_performance_state)
continue;
- genpd_lock_nested(master, depth + 1);
+ genpd_lock_nested(parent, depth + 1);
- master_state = link->prev_performance_state;
- link->performance_state = master_state;
+ parent_state = link->prev_performance_state;
+ link->performance_state = parent_state;
- master_state = _genpd_reeval_performance_state(master,
- master_state);
- if (_genpd_set_performance_state(master, master_state, depth + 1)) {
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
pr_err("%s: Failed to roll back to %d performance state\n",
- master->name, master_state);
+ parent->name, parent_state);
}
- genpd_unlock(master);
+ genpd_unlock(parent);
}
return ret;
@@ -552,7 +552,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call genpd_power_on() for the master yet after
+ * managed to call genpd_power_on() for the parent yet after
* incrementing it. In that case genpd_power_on() will wait
* for us to drop the lock, so we can call .power_off() and let
* the genpd_power_on() restore power for us (this shouldn't
@@ -566,22 +566,22 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
genpd->status = GPD_STATE_POWER_OFF;
genpd_update_accounting(genpd);
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_lock_nested(link->master, depth + 1);
- genpd_power_off(link->master, false, depth + 1);
- genpd_unlock(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
}
return 0;
}
/**
- * genpd_power_on - Restore power to a given PM domain and its masters.
+ * genpd_power_on - Restore power to a given PM domain and its parents.
* @genpd: PM domain to power up.
* @depth: nesting count for lockdep.
*
- * Restore power to @genpd and all of its masters so that it is possible to
+ * Restore power to @genpd and all of its parents so that it is possible to
* resume a device belonging to it.
*/
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
@@ -594,20 +594,20 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
/*
* The list is guaranteed not to change while the loop below is being
- * executed, unless one of the masters' .power_on() callbacks fiddles
+ * executed, unless one of the parents' .power_on() callbacks fiddles
* with it.
*/
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- struct generic_pm_domain *master = link->master;
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
- genpd_sd_counter_inc(master);
+ genpd_sd_counter_inc(parent);
- genpd_lock_nested(master, depth + 1);
- ret = genpd_power_on(master, depth + 1);
- genpd_unlock(master);
+ genpd_lock_nested(parent, depth + 1);
+ ret = genpd_power_on(parent, depth + 1);
+ genpd_unlock(parent);
if (ret) {
- genpd_sd_counter_dec(master);
+ genpd_sd_counter_dec(parent);
goto err;
}
}
@@ -623,12 +623,12 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
err:
list_for_each_entry_continue_reverse(link,
- &genpd->slave_links,
- slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_lock_nested(link->master, depth + 1);
- genpd_power_off(link->master, false, depth + 1);
- genpd_unlock(link->master);
+ &genpd->child_links,
+ child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
}
return ret;
@@ -943,13 +943,13 @@ static bool genpd_present(const struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_SLEEP
/**
- * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
+ * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
* @genpd: PM domain to power off, if possible.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
*
* Check if the given PM domain can be powered off (during system suspend or
- * hibernation) and do that if so. Also, in that case propagate to its masters.
+ * hibernation) and do that if so. Also, in that case propagate to its parents.
*
* This function is only called in "noirq" and "syscore" stages of system power
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
@@ -974,21 +974,21 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
genpd->status = GPD_STATE_POWER_OFF;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
if (use_lock)
- genpd_lock_nested(link->master, depth + 1);
+ genpd_lock_nested(link->parent, depth + 1);
- genpd_sync_power_off(link->master, use_lock, depth + 1);
+ genpd_sync_power_off(link->parent, use_lock, depth + 1);
if (use_lock)
- genpd_unlock(link->master);
+ genpd_unlock(link->parent);
}
}
/**
- * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
+ * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
* @genpd: PM domain to power on.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
@@ -1005,16 +1005,16 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
if (genpd_status_on(genpd))
return;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_inc(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_inc(link->parent);
if (use_lock)
- genpd_lock_nested(link->master, depth + 1);
+ genpd_lock_nested(link->parent, depth + 1);
- genpd_sync_power_on(link->master, use_lock, depth + 1);
+ genpd_sync_power_on(link->parent, use_lock, depth + 1);
if (use_lock)
- genpd_unlock(link->master);
+ genpd_unlock(link->parent);
}
_genpd_power_on(genpd, false);
@@ -1454,12 +1454,12 @@ static void genpd_update_cpumask(struct generic_pm_domain *genpd,
if (!genpd_is_cpu_domain(genpd))
return;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- struct generic_pm_domain *master = link->master;
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
- genpd_lock_nested(master, depth + 1);
- genpd_update_cpumask(master, cpu, set, depth + 1);
- genpd_unlock(master);
+ genpd_lock_nested(parent, depth + 1);
+ genpd_update_cpumask(parent, cpu, set, depth + 1);
+ genpd_unlock(parent);
}
if (set)
@@ -1647,17 +1647,17 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(itr, &genpd->master_links, master_node) {
- if (itr->slave == subdomain && itr->master == genpd) {
+ list_for_each_entry(itr, &genpd->parent_links, parent_node) {
+ if (itr->child == subdomain && itr->parent == genpd) {
ret = -EINVAL;
goto out;
}
}
- link->master = genpd;
- list_add_tail(&link->master_node, &genpd->master_links);
- link->slave = subdomain;
- list_add_tail(&link->slave_node, &subdomain->slave_links);
+ link->parent = genpd;
+ list_add_tail(&link->parent_node, &genpd->parent_links);
+ link->child = subdomain;
+ list_add_tail(&link->child_node, &subdomain->child_links);
if (genpd_status_on(subdomain))
genpd_sd_counter_inc(genpd);
@@ -1671,7 +1671,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
- * @genpd: Master PM domain to add the subdomain to.
+ * @genpd: Leader PM domain to add the subdomain to.
* @subdomain: Subdomain to be added.
*/
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
@@ -1689,7 +1689,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
- * @genpd: Master PM domain to remove the subdomain from.
+ * @genpd: Leader PM domain to remove the subdomain from.
* @subdomain: Subdomain to be removed.
*/
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
@@ -1704,19 +1704,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
genpd_lock(subdomain);
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
- if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
+ if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n",
genpd->name, subdomain->name);
ret = -EBUSY;
goto out;
}
- list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
- if (link->slave != subdomain)
+ list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
+ if (link->child != subdomain)
continue;
- list_del(&link->master_node);
- list_del(&link->slave_node);
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
kfree(link);
if (genpd_status_on(subdomain))
genpd_sd_counter_dec(genpd);
@@ -1781,8 +1781,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
- INIT_LIST_HEAD(&genpd->master_links);
- INIT_LIST_HEAD(&genpd->slave_links);
+ INIT_LIST_HEAD(&genpd->parent_links);
+ INIT_LIST_HEAD(&genpd->child_links);
INIT_LIST_HEAD(&genpd->dev_list);
genpd_lock_init(genpd);
genpd->gov = gov;
@@ -1858,15 +1858,15 @@ static int genpd_remove(struct generic_pm_domain *genpd)
return -EBUSY;
}
- if (!list_empty(&genpd->master_links) || genpd->device_count) {
+ if (!list_empty(&genpd->parent_links) || genpd->device_count) {
genpd_unlock(genpd);
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
return -EBUSY;
}
- list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
- list_del(&link->master_node);
- list_del(&link->slave_node);
+ list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
kfree(link);
}
@@ -2793,12 +2793,12 @@ static int genpd_summary_one(struct seq_file *s,
/*
* Modifications on the list require holding locks on both
- * master and slave, so we are safe.
+ * parent and child, so we are safe.
* Also genpd->name is immutable.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
- seq_printf(s, "%s", link->slave->name);
- if (!list_is_last(&link->master_node, &genpd->master_links))
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ seq_printf(s, "%s", link->child->name);
+ if (!list_is_last(&link->parent_node, &genpd->parent_links))
seq_puts(s, ", ");
}
@@ -2826,7 +2826,7 @@ static int summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, "domain status slaves\n");
+ seq_puts(s, "domain status children\n");
seq_puts(s, " /device runtime status\n");
seq_puts(s, "----------------------------------------------------------------------\n");
@@ -2881,8 +2881,8 @@ static int sub_domains_show(struct seq_file *s, void *data)
if (ret)
return -ERESTARTSYS;
- list_for_each_entry(link, &genpd->master_links, master_node)
- seq_printf(s, "%s\n", link->slave->name);
+ list_for_each_entry(link, &genpd->parent_links, parent_node)
+ seq_printf(s, "%s\n", link->child->name);
genpd_unlock(genpd);
return ret;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index daa8c7689f7e..490ed7deb99a 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -135,8 +135,8 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
*
* All subdomains have been powered off already at this point.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
- struct generic_pm_domain *sd = link->slave;
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *sd = link->child;
s64 sd_max_off_ns = sd->max_off_time_ns;
if (sd_max_off_ns < 0)
@@ -217,13 +217,13 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
}
/*
- * We have to invalidate the cached results for the masters, so
+ * We have to invalidate the cached results for the parents, so
* use the observation that default_power_down_ok() is not
- * going to be called for any master until this instance
+ * going to be called for any parent until this instance
* returns.
*/
- list_for_each_entry(link, &genpd->slave_links, slave_node)
- link->master->max_off_time_changed = true;
+ list_for_each_entry(link, &genpd->child_links, child_node)
+ link->parent->max_off_time_changed = true;
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = false;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 2b3103c30857..d94f41a0abbe 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1298,7 +1298,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
- bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags);
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 81125fb18035..fd9571d5fdac 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -343,7 +343,6 @@ static void btsdio_remove(struct sdio_func *func)
BT_DBG("func %p", func);
- cancel_work_sync(&data->work);
if (!data)
return;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index d5f068a10a5a..2907b0bca1af 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -562,8 +562,10 @@ static void retry_timeout(struct timer_list *t)
if (waiting)
start_get(ssif_info);
- if (resend)
+ if (resend) {
start_resend(ssif_info);
+ ssif_inc_stat(ssif_info, send_retries);
+ }
}
static void watch_timeout(struct timer_list *t)
@@ -789,9 +791,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| data[1] != IPMI_GET_MSG_FLAGS_CMD) {
/*
- * Don't abort here, maybe it was a queued
- * response to a previous command.
+ * Recv error response, give up.
*/
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
dev_warn(&ssif_info->client->dev,
"Invalid response getting flags: %x %x\n",
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 70f785994228..cb0660304e48 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -613,7 +613,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
return IRQ_HANDLED;
}
-static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
+static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
{
const char *desc = "attempting to generate an interrupt";
u32 cap2;
@@ -622,7 +622,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
ret = request_locality(chip, 0);
if (ret < 0)
- return ret;
+ return;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
@@ -630,8 +630,6 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
release_locality(chip, 0);
-
- return ret;
}
/* Register the IRQ and issue a command that will cause an interrupt. If an
@@ -661,42 +659,37 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
if (rc < 0)
- return rc;
+ goto restore_irqs;
rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
if (rc < 0)
- return rc;
+ goto restore_irqs;
/* Clear all existing */
rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
if (rc < 0)
- return rc;
-
+ goto restore_irqs;
/* Turn on */
rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
intmask | TPM_GLOBAL_INT_ENABLE);
if (rc < 0)
- return rc;
+ goto restore_irqs;
priv->irq_tested = false;
/* Generate an interrupt by having the core call through to
* tpm_tis_send
*/
- rc = tpm_tis_gen_interrupt(chip);
- if (rc < 0)
- return rc;
+ tpm_tis_gen_interrupt(chip);
+restore_irqs:
/* tpm_tis_send will either confirm the interrupt is working or it
* will call disable_irq which undoes all of the above.
*/
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- rc = tpm_tis_write8(priv, original_int_vec,
- TPM_INT_VECTOR(priv->locality));
- if (rc < 0)
- return rc;
-
- return 1;
+ tpm_tis_write8(priv, original_int_vec,
+ TPM_INT_VECTOR(priv->locality));
+ return -1;
}
return 0;
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 2ef819606c41..1a4e6340f95c 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -33,9 +33,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
else
return rc;
}
- if (clkspec.np == node && !clk_supplier)
+ if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
return 0;
+ }
pclk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(pclk)) {
if (PTR_ERR(pclk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get parent clock %d for %pOF\n",
@@ -48,10 +51,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
if (rc < 0)
goto err;
if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
rc = 0;
goto err;
}
clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
@@ -93,10 +98,13 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
else
return rc;
}
- if (clkspec.np == node && !clk_supplier)
+ if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
return 0;
+ }
clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get clock %d for %pOF\n",
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index ce1d2446f142..1a75cc9f68ef 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -1259,7 +1259,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKSEL_CON(56), 6, 2, MFLAGS,
RK3399_CLKGATE_CON(10), 7, GFLAGS),
- COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, 0,
+ COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(56), 5, 1, MFLAGS, 0, 5, DFLAGS),
/* gic */
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
index e421946a91c5..3dc0c6ceed02 100644
--- a/drivers/clocksource/timer-davinci.c
+++ b/drivers/clocksource/timer-davinci.c
@@ -18,7 +18,7 @@
#include <clocksource/timer-davinci.h>
#undef pr_fmt
-#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+#define pr_fmt(fmt) "%s: " fmt, __func__
#define DAVINCI_TIMER_REG_TIM12 0x10
#define DAVINCI_TIMER_REG_TIM34 0x14
@@ -250,30 +250,32 @@ int __init davinci_timer_register(struct clk *clk,
rv = clk_prepare_enable(clk);
if (rv) {
- pr_err("Unable to prepare and enable the timer clock");
+ pr_err("Unable to prepare and enable the timer clock\n");
return rv;
}
if (!request_mem_region(timer_cfg->reg.start,
resource_size(&timer_cfg->reg),
"davinci-timer")) {
- pr_err("Unable to request memory region");
- return -EBUSY;
+ pr_err("Unable to request memory region\n");
+ rv = -EBUSY;
+ goto exit_clk_disable;
}
base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
if (!base) {
- pr_err("Unable to map the register range");
- return -ENOMEM;
+ pr_err("Unable to map the register range\n");
+ rv = -ENOMEM;
+ goto exit_mem_region;
}
davinci_timer_init(base);
tick_rate = clk_get_rate(clk);
- clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL | __GFP_NOFAIL);
+ clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
if (!clockevent) {
- pr_err("Error allocating memory for clockevent data");
- return -ENOMEM;
+ rv = -ENOMEM;
+ goto exit_iounmap_base;
}
clockevent->dev.name = "tim12";
@@ -298,8 +300,8 @@ int __init davinci_timer_register(struct clk *clk,
davinci_timer_irq_timer, IRQF_TIMER,
"clockevent/tim12", clockevent);
if (rv) {
- pr_err("Unable to request the clockevent interrupt");
- return rv;
+ pr_err("Unable to request the clockevent interrupt\n");
+ goto exit_free_clockevent;
}
davinci_clocksource.dev.rating = 300;
@@ -325,14 +327,28 @@ int __init davinci_timer_register(struct clk *clk,
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
if (rv) {
- pr_err("Unable to register clocksource");
- return rv;
+ pr_err("Unable to register clocksource\n");
+ goto exit_free_irq;
}
sched_clock_register(davinci_timer_read_sched_clock,
DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
return 0;
+
+exit_free_irq:
+ free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
+ clockevent);
+exit_free_clockevent:
+ kfree(clockevent);
+exit_iounmap_base:
+ iounmap(base);
+exit_mem_region:
+ release_mem_region(timer_cfg->reg.start,
+ resource_size(&timer_cfg->reg));
+exit_clk_disable:
+ clk_disable_unprepare(clk);
+ return rv;
}
static int __init of_davinci_timer_register(struct device_node *np)
@@ -343,20 +359,20 @@ static int __init of_davinci_timer_register(struct device_node *np)
rv = of_address_to_resource(np, 0, &timer_cfg.reg);
if (rv) {
- pr_err("Unable to get the register range for timer");
+ pr_err("Unable to get the register range for timer\n");
return rv;
}
rv = of_irq_to_resource_table(np, timer_cfg.irq,
DAVINCI_TIMER_NUM_IRQS);
if (rv != DAVINCI_TIMER_NUM_IRQS) {
- pr_err("Unable to get the interrupts for timer");
+ pr_err("Unable to get the interrupts for timer\n");
return rv;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_err("Unable to get the timer clock");
+ pr_err("Unable to get the timer clock\n");
return PTR_ERR(clk);
}
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index f261a57af1c0..919d6f1ced8d 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -57,10 +57,6 @@ struct quad8_iio {
#define QUAD8_REG_CHAN_OP 0x11
#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
-/* Borrow Toggle flip-flop */
-#define QUAD8_FLAG_BT BIT(0)
-/* Carry Toggle flip-flop */
-#define QUAD8_FLAG_CT BIT(1)
/* Error flag */
#define QUAD8_FLAG_E BIT(4)
/* Up/Down flag */
@@ -97,9 +93,6 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
{
struct quad8_iio *const priv = iio_priv(indio_dev);
const int base_offset = priv->base + 2 * chan->channel;
- unsigned int flags;
- unsigned int borrow;
- unsigned int carry;
int i;
switch (mask) {
@@ -110,12 +103,7 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
}
- flags = inb(base_offset + 1);
- borrow = flags & QUAD8_FLAG_BT;
- carry = !!(flags & QUAD8_FLAG_CT);
-
- /* Borrow XOR Carry effectively doubles count range */
- *val = (borrow ^ carry) << 24;
+ *val = 0;
mutex_lock(&priv->lock);
@@ -639,19 +627,9 @@ static int quad8_count_read(struct counter_device *counter,
{
struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
- unsigned int flags;
- unsigned int borrow;
- unsigned int carry;
- unsigned long position;
+ unsigned long position = 0;
int i;
- flags = inb(base_offset + 1);
- borrow = flags & QUAD8_FLAG_BT;
- carry = !!(flags & QUAD8_FLAG_CT);
-
- /* Borrow XOR Carry effectively doubles count range */
- position = (unsigned long)(borrow ^ carry) << 24;
-
mutex_lock(&priv->lock);
/* Reset Byte Pointer; transfer Counter to Output Latch */
@@ -1204,8 +1182,8 @@ static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
mutex_unlock(&priv->lock);
- /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
- return sprintf(buf, "33554431\n");
+ /* By default 0xFFFFFF (24 bits unsigned) is maximum count */
+ return sprintf(buf, "16777215\n");
}
static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 9534f52210af..04638e183351 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1090,11 +1090,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
static int safexcel_request_ring_irq(void *pdev, int irqid,
int is_pci_dev,
+ int ring_id,
irq_handler_t handler,
irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
- int ret, irq;
+ int ret, irq, cpu;
struct device *dev;
if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
@@ -1132,6 +1133,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
return ret;
}
+ /* Set affinity */
+ cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+
return irq;
}
@@ -1462,19 +1467,23 @@ static int safexcel_probe_generic(void *pdev,
&priv->ring[i].rdr);
if (ret) {
dev_err(dev, "Failed to initialize rings\n");
- return ret;
+ goto err_cleanup_rings;
}
priv->ring[i].rdr_req = devm_kcalloc(dev,
EIP197_DEFAULT_RING_SIZE,
sizeof(*priv->ring[i].rdr_req),
GFP_KERNEL);
- if (!priv->ring[i].rdr_req)
- return -ENOMEM;
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
- if (!ring_irq)
- return -ENOMEM;
+ if (!ring_irq) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
ring_irq->priv = priv;
ring_irq->ring = i;
@@ -1482,14 +1491,17 @@ static int safexcel_probe_generic(void *pdev,
irq = safexcel_request_ring_irq(pdev,
EIP197_IRQ_NUMBER(i, is_pci_dev),
is_pci_dev,
+ i,
safexcel_irq_ring,
safexcel_irq_ring_thread,
ring_irq);
if (irq < 0) {
dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
- return irq;
+ ret = irq;
+ goto err_cleanup_rings;
}
+ priv->ring[i].irq = irq;
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
INIT_WORK(&priv->ring[i].work_data.work,
@@ -1498,8 +1510,10 @@ static int safexcel_probe_generic(void *pdev,
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue =
create_singlethread_workqueue(wq_name);
- if (!priv->ring[i].workqueue)
- return -ENOMEM;
+ if (!priv->ring[i].workqueue) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
priv->ring[i].requests = 0;
priv->ring[i].busy = false;
@@ -1516,16 +1530,26 @@ static int safexcel_probe_generic(void *pdev,
ret = safexcel_hw_init(priv);
if (ret) {
dev_err(dev, "HW init failed (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}
ret = safexcel_register_algorithms(priv);
if (ret) {
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}
return 0;
+
+err_cleanup_rings:
+ for (i = 0; i < priv->config.rings; i++) {
+ if (priv->ring[i].irq)
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
+ if (priv->ring[i].workqueue)
+ destroy_workqueue(priv->ring[i].workqueue);
+ }
+
+ return ret;
}
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
@@ -1627,8 +1651,10 @@ static int safexcel_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
- for (i = 0; i < priv->config.rings; i++)
+ for (i = 0; i < priv->config.rings; i++) {
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
destroy_workqueue(priv->ring[i].workqueue);
+ }
return 0;
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 930cc48a6f85..6a4d7f09bca9 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -640,6 +640,9 @@ struct safexcel_ring {
*/
struct crypto_async_request *req;
struct crypto_async_request *backlog;
+
+ /* irq of this ring */
+ int irq;
};
/* EIP integration context flags */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a406b3c0d170..fdf3b5be2d50 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -212,6 +212,7 @@ struct at_xdmac {
int irq;
struct clk *clk;
u32 save_gim;
+ u32 save_gs;
struct dma_pool *at_xdmac_desc_pool;
struct at_xdmac_chan chan[0];
};
@@ -1922,6 +1923,7 @@ static int atmel_xdmac_suspend(struct device *dev)
}
}
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
+ atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
at_xdmac_off(atxdmac);
clk_disable_unprepare(atxdmac->clk);
@@ -1958,7 +1960,8 @@ static int atmel_xdmac_resume(struct device *dev)
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
wmb();
- at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+ if (atxdmac->save_gs & atchan->mask)
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
}
}
return 0;
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index afbd1a459019..25d65f64cd50 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -165,7 +165,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
dw_edma_free_desc(vd2dw_edma_desc(vdesc));
}
-static void dw_edma_start_transfer(struct dw_edma_chan *chan)
+static int dw_edma_start_transfer(struct dw_edma_chan *chan)
{
struct dw_edma_chunk *child;
struct dw_edma_desc *desc;
@@ -173,16 +173,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
vd = vchan_next_desc(&chan->vc);
if (!vd)
- return;
+ return 0;
desc = vd2dw_edma_desc(vd);
if (!desc)
- return;
+ return 0;
child = list_first_entry_or_null(&desc->chunk->list,
struct dw_edma_chunk, list);
if (!child)
- return;
+ return 0;
dw_edma_v0_core_start(child, !desc->xfer_sz);
desc->xfer_sz += child->ll_region.sz;
@@ -190,6 +190,8 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
list_del(&child->list);
kfree(child);
desc->chunks_alloc--;
+
+ return 1;
}
static int dw_edma_device_config(struct dma_chan *dchan,
@@ -273,9 +275,12 @@ static void dw_edma_device_issue_pending(struct dma_chan *dchan)
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
unsigned long flags;
+ if (!chan->configured)
+ return;
+
spin_lock_irqsave(&chan->vc.lock, flags);
- if (chan->configured && chan->request == EDMA_REQ_NONE &&
- chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
+ if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
+ chan->status == EDMA_ST_IDLE) {
chan->status = EDMA_ST_BUSY;
dw_edma_start_transfer(chan);
}
@@ -483,14 +488,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
switch (chan->request) {
case EDMA_REQ_NONE:
desc = vd2dw_edma_desc(vd);
- if (desc->chunks_alloc) {
- chan->status = EDMA_ST_BUSY;
- dw_edma_start_transfer(chan);
- } else {
+ if (!desc->chunks_alloc) {
list_del(&vd->node);
vchan_cookie_complete(vd);
- chan->status = EDMA_ST_IDLE;
}
+
+ /* Continue transferring if there are remaining chunks or issued requests.
+ */
+ chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
break;
case EDMA_REQ_STOP:
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 3fa884145eb1..d5109d96ebf1 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -751,7 +751,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
- ret = EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
goto disable_reg_clk;
}
if (!IS_ERR(xor_dev->clk)) {
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index b1d717cb8df9..f382cc70f9aa 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -459,7 +459,7 @@ static bool skx_rir_decode(struct decoded_addr *res)
}
static u8 skx_close_row[] = {
- 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+ 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33, 34
};
static u8 skx_close_column[] = {
@@ -467,7 +467,7 @@ static u8 skx_close_column[] = {
};
static u8 skx_open_row[] = {
- 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+ 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34
};
static u8 skx_open_column[] = {
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index b9fdc20b4eb9..eda25d506059 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -585,8 +585,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
static void qcom_scm_shutdown(struct platform_device *pdev)
{
/* Clean shutdown, disable download mode to allow normal restart */
- if (download_mode)
- qcom_scm_set_download_mode(false);
+ qcom_scm_set_download_mode(false);
}
static const struct of_device_id qcom_scm_dt_match[] = {
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index da26a584dca0..1a690b2c1e2f 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -7,6 +7,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/kref.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -27,6 +28,8 @@ struct rpi_firmware {
struct mbox_chan *chan; /* The property channel. */
struct completion c;
u32 enabled;
+
+ struct kref consumers;
};
static DEFINE_MUTEX(transaction_lock);
@@ -214,12 +217,38 @@ static void rpi_register_clk_driver(struct device *dev)
-1, NULL, 0);
}
+static void rpi_firmware_delete(struct kref *kref)
+{
+ struct rpi_firmware *fw = container_of(kref, struct rpi_firmware,
+ consumers);
+
+ mbox_free_channel(fw->chan);
+ kfree(fw);
+}
+
+void rpi_firmware_put(struct rpi_firmware *fw)
+{
+ kref_put(&fw->consumers, rpi_firmware_delete);
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_put);
+
+static void devm_rpi_firmware_put(void *data)
+{
+ struct rpi_firmware *fw = data;
+
+ rpi_firmware_put(fw);
+}
+
static int rpi_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rpi_firmware *fw;
- fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ /*
+ * Memory will be freed by rpi_firmware_delete() once all users have
+ * released their firmware handles. Don't use devm_kzalloc() here.
+ */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
if (!fw)
return -ENOMEM;
@@ -232,10 +261,12 @@ static int rpi_firmware_probe(struct platform_device *pdev)
int ret = PTR_ERR(fw->chan);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get mbox channel: %d\n", ret);
+ kfree(fw);
return ret;
}
init_completion(&fw->c);
+ kref_init(&fw->consumers);
platform_set_drvdata(pdev, fw);
@@ -264,7 +295,8 @@ static int rpi_firmware_remove(struct platform_device *pdev)
rpi_hwmon = NULL;
platform_device_unregister(rpi_clk);
rpi_clk = NULL;
- mbox_free_channel(fw->chan);
+
+ rpi_firmware_put(fw);
return 0;
}
@@ -273,19 +305,51 @@ static int rpi_firmware_remove(struct platform_device *pdev)
* rpi_firmware_get - Get pointer to rpi_firmware structure.
* @firmware_node: Pointer to the firmware Device Tree node.
*
+ * The reference to rpi_firmware has to be released with rpi_firmware_put().
+ *
* Returns NULL is the firmware device is not ready.
*/
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
{
struct platform_device *pdev = of_find_device_by_node(firmware_node);
+ struct rpi_firmware *fw;
if (!pdev)
return NULL;
- return platform_get_drvdata(pdev);
+ fw = platform_get_drvdata(pdev);
+ if (!fw)
+ return NULL;
+
+ if (!kref_get_unless_zero(&fw->consumers))
+ return NULL;
+
+ return fw;
}
EXPORT_SYMBOL_GPL(rpi_firmware_get);
+/**
+ * devm_rpi_firmware_get - Get pointer to rpi_firmware structure.
+ * @firmware_node: Pointer to the firmware Device Tree node.
+ *
+ * Returns NULL is the firmware device is not ready.
+ */
+struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node)
+{
+ struct rpi_firmware *fw;
+
+ fw = rpi_firmware_get(firmware_node);
+ if (!fw)
+ return NULL;
+
+ if (devm_add_action_or_reset(dev, devm_rpi_firmware_put, fw))
+ return NULL;
+
+ return fw;
+}
+EXPORT_SYMBOL_GPL(devm_rpi_firmware_get);
+
static const struct of_device_id rpi_firmware_of_match[] = {
{ .compatible = "raspberrypi,bcm2835-firmware", },
{},
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 08c422380a00..7122bc6ea796 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -981,8 +981,8 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
return ret;
genpool = svc_create_memory_pool(pdev, sh_memory);
- if (!genpool)
- return -ENOMEM;
+ if (IS_ERR(genpool))
+ return PTR_ERR(genpool);
/* allocate service controller and supporting channel */
controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 4bab9028940a..aea4ceeed536 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -115,7 +115,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
/**
* fpga_bridge_get - get an exclusive reference to a fpga bridge
* @dev: parent device that fpga bridge was registered with
- * @info: fpga manager info
+ * @info: fpga image specific information
*
* Given a device, get an exclusive reference to a fpga bridge.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index c799691dfa84..94a503dc08b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -58,6 +58,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
{
struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx_mgr *mgr;
struct amdgpu_ctx *ctx;
uint32_t id;
int r;
@@ -71,8 +72,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
return r;
}
- idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+ mgr = &fpriv->ctx_mgr;
+ mutex_lock(&mgr->lock);
+ idr_for_each_entry(&mgr->ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
+ mutex_unlock(&mgr->lock);
fdput(f);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 685a2df01d09..762a407a4997 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3908,7 +3908,8 @@ static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 63205de4a565..7a50713268b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1534,7 +1534,6 @@ static int gmc_v9_0_hw_fini(void *handle)
return 0;
}
- amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 23de332f3c6e..7ca7a68d588e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1911,9 +1911,11 @@ static int sdma_v4_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
return 0;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
}
sdma_v4_0_ctx_switch_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1e7083bc8a52..3f3242783e1c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5776,6 +5776,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
continue;
dc_plane = dm_new_plane_state->dc_state;
+ if (!dc_plane)
+ continue;
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
@@ -7029,8 +7031,9 @@ static int dm_update_plane_state(struct dc *dc,
return -EINVAL;
}
+ if (dm_old_plane_state->dc_state)
+ dc_plane_state_release(dm_old_plane_state->dc_state);
- dc_plane_state_release(dm_old_plane_state->dc_state);
dm_new_plane_state->dc_state = NULL;
*lock_and_validation_needed = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index de246e183d6b..cdcd5051dd66 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1315,6 +1315,9 @@ bool dc_remove_plane_from_context(
struct dc_stream_status *stream_status = NULL;
struct resource_pool *pool = dc->res_pool;
+ if (!plane_state)
+ return true;
+
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
stream_status = &context->stream_status[i];
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4ae68bf04892..9eb490a6e016 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1340,6 +1340,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
+ var->xres_virtual = fb->width;
+ var->yres_virtual = fb->height;
+
/*
* Workaround for SDL 1.2, which is known to be setting all pixel format
* fields values to zero in some cases. We treat this situation as a
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d45f43feaf86..4c8bb93a89fb 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -460,8 +460,9 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
- schedule_delayed_work(&dev->mode_config.output_poll_work,
- 0);
+ mod_delayed_work(system_wq,
+ &dev->mode_config.output_poll_work,
+ 0);
}
/* Re-enable polling in case the global poll config changed. */
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 9c262daf5816..1061430aced2 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -775,8 +775,8 @@ static int decon_conf_irq(struct decon_context *ctx, const char *name,
return irq;
}
}
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
- ret = devm_request_irq(ctx->dev, irq, handler, flags, "drm_decon", ctx);
+ ret = devm_request_irq(ctx->dev, irq, handler,
+ flags | IRQF_NO_AUTOEN, "drm_decon", ctx);
if (ret < 0) {
dev_err(ctx->dev, "IRQ %s request failed\n", name);
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index babf3db82ce3..bfcbb89ff40a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1350,10 +1350,9 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
}
te_gpio_irq = gpio_to_irq(dsi->te_gpio);
- irq_set_status_flags(te_gpio_irq, IRQ_NOAUTOEN);
ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
- IRQF_TRIGGER_RISING, "TE", dsi);
+ IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi);
if (ret) {
dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
gpio_free(dsi->te_gpio);
@@ -1792,9 +1791,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
return dsi->irq;
}
- irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
- exynos_dsi_irq, IRQF_ONESHOT,
+ exynos_dsi_irq,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
dev_name(dev), dsi);
if (ret) {
dev_err(dev, "failed to request dsi irq\n");
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 75ec703d22e0..89513c7c50d5 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -300,8 +300,10 @@ static int lima_pdev_probe(struct platform_device *pdev)
/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
+ if (IS_ERR(ddev)) {
+ err = PTR_ERR(ddev);
+ goto err_out0;
+ }
ddev->dev_private = ldev;
ldev->ddev = ddev;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 0888e0df660d..2a727ab0faf7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -233,8 +233,11 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
if (ret)
return NULL;
- /* Make sure pm runtime is active and reset any previous errors */
- pm_runtime_set_active(&pdev->dev);
+ /*
+ * Now that we have firmware loaded, and are ready to begin
+ * booting the gpu, go ahead and enable runpm:
+ */
+ pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 3802ad38c519..6f83253a8c58 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -899,7 +899,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
pm_runtime_set_autosuspend_delay(&pdev->dev,
adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config);
@@ -908,11 +907,15 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
struct msm_gpu *gpu = &adreno_gpu->base;
+ struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
+ if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev))
+ pm_runtime_disable(&priv->gpu_pdev->dev);
+
icc_put(gpu->icc_path);
msm_gpu_cleanup(&adreno_gpu->base);
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index c7b48df8869a..3ee265f1755f 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -460,7 +460,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->panel.funcs = &otm8009a_drm_funcs;
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
- dsi->host->dev, ctx,
+ dev, ctx,
&otm8009a_backlight_ops,
NULL);
if (IS_ERR(ctx->bl_dev)) {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 291e89b4045f..96cc794147b5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -249,9 +249,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
else
ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
- if (ret)
- drm_gem_vm_close(vma);
-
return ret;
}
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 9268f6fc3f66..92895937aba9 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -249,4 +249,5 @@ void vgem_fence_close(struct vgem_file *vfile)
{
idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
idr_destroy(&vfile->fence_idr);
+ mutex_destroy(&vfile->fence_mutex);
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index aa477a43a312..339bc7f1fced 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1265,6 +1265,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
struct input_dev *pen_input = wacom->pen_input;
unsigned char *data = wacom->data;
+ int number_of_valid_frames = 0;
+ int time_interval = 15000000;
+ ktime_t time_packet_received = ktime_get();
int i;
if (wacom->features.type == INTUOSP2_BT ||
@@ -1285,12 +1288,30 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
}
+ /* number of valid frames */
for (i = 0; i < pen_frames; i++) {
unsigned char *frame = &data[i*pen_frame_len + 1];
bool valid = frame[0] & 0x80;
+
+ if (valid)
+ number_of_valid_frames++;
+ }
+
+ if (number_of_valid_frames) {
+ if (wacom->hid_data.time_delayed)
+ time_interval = ktime_get() - wacom->hid_data.time_delayed;
+ time_interval /= number_of_valid_frames;
+ wacom->hid_data.time_delayed = time_packet_received;
+ }
+
+ for (i = 0; i < number_of_valid_frames; i++) {
+ unsigned char *frame = &data[i*pen_frame_len + 1];
+ bool valid = frame[0] & 0x80;
bool prox = frame[0] & 0x40;
bool range = frame[0] & 0x20;
bool invert = frame[0] & 0x10;
+ int frames_number_reversed = number_of_valid_frames - i - 1;
+ int event_timestamp = time_packet_received - frames_number_reversed * time_interval;
if (!valid)
continue;
@@ -1303,6 +1324,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
wacom->tool[0] = 0;
wacom->id[0] = 0;
wacom->serial[0] = 0;
+ wacom->hid_data.time_delayed = 0;
return;
}
@@ -1339,6 +1361,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
get_unaligned_le16(&frame[11]));
}
}
+
if (wacom->tool[0]) {
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
if (wacom->features.type == INTUOSP2_BT ||
@@ -1362,6 +1385,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
wacom->shared->stylus_in_proximity = prox;
+ /* add timestamp to unpack the frames */
+ input_set_timestamp(pen_input, event_timestamp);
+
input_sync(pen_input);
}
}
@@ -1853,6 +1879,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
int fmax = field->logical_maximum;
unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
int resolution_code = code;
+ int resolution = hidinput_calc_abs_res(field, resolution_code);
if (equivalent_usage == HID_DG_TWIST) {
resolution_code = ABS_RZ;
@@ -1875,8 +1902,15 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
switch (type) {
case EV_ABS:
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
- input_abs_set_res(input, code,
- hidinput_calc_abs_res(field, resolution_code));
+
+ /* older tablet may miss physical usage */
+ if ((code == ABS_X || code == ABS_Y) && !resolution) {
+ resolution = WACOM_INTUOS_RES;
+ hid_warn(input,
+ "Wacom usage (%d) missing resolution \n",
+ code);
+ }
+ input_abs_set_res(input, code, resolution);
break;
case EV_KEY:
input_set_capability(input, EV_KEY, code);
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index ca172efcf072..88badfbae999 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -320,6 +320,7 @@ struct hid_data {
int bat_connected;
int ps_connected;
bool pad_input_event_flag;
+ int time_delayed;
};
struct wacom_remote_data {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index c36522849325..a788f2e70677 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1058,7 +1058,7 @@ omap_i2c_isr(int irq, void *dev_id)
u16 stat;
stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
+ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
if (stat & mask)
ret = IRQ_WAKE_THREAD;
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 2bd785e9e42a..4861093b0764 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -630,7 +630,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
static int palmas_gpadc_remove(struct platform_device *pdev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);
if (adc->wakeup1_enable || adc->wakeup2_enable)
diff --git a/drivers/iio/light/max44009.c b/drivers/iio/light/max44009.c
index 00ba15499638..5103b1061a77 100644
--- a/drivers/iio/light/max44009.c
+++ b/drivers/iio/light/max44009.c
@@ -529,6 +529,12 @@ static int max44009_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}
+static const struct of_device_id max44009_of_match[] = {
+ { .compatible = "maxim,max44009" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max44009_of_match);
+
static const struct i2c_device_id max44009_id[] = {
{ "max44009", 0 },
{ }
@@ -538,18 +544,13 @@ MODULE_DEVICE_TABLE(i2c, max44009_id);
static struct i2c_driver max44009_driver = {
.driver = {
.name = MAX44009_DRV_NAME,
+ .of_match_table = max44009_of_match,
},
.probe = max44009_probe,
.id_table = max44009_id,
};
module_i2c_driver(max44009_driver);
-static const struct of_device_id max44009_of_match[] = {
- { .compatible = "maxim,max44009" },
- { }
-};
-MODULE_DEVICE_TABLE(of, max44009_of_match);
-
MODULE_AUTHOR("Robert Eshleman <bobbyeshleman@...il.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MAX44009 ambient light sensor driver");
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 14d2a90964c3..a5631286c8e0 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -173,7 +173,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
goto unlock;
}
__mmu_int_rb_insert(mnode, &handler->root);
- list_add(&mnode->list, &handler->lru_list);
+ list_add_tail(&mnode->list, &handler->lru_list);
ret = handler->ops->insert(handler->ops_arg, mnode);
if (ret) {
@@ -220,8 +220,10 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, addr, len);
if (node) {
- if (node->addr == addr && node->len == len)
+ if (node->addr == addr && node->len == len) {
+ list_move_tail(&node->list, &handler->lru_list);
goto unlock;
+ }
__mmu_int_rb_remove(node, &handler->root);
list_del(&node->list); /* remove from LRU list */
ret = true;
@@ -242,8 +244,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
INIT_LIST_HEAD(&del_list);
spin_lock_irqsave(&handler->lock, flags);
- list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
- list) {
+ list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
&stop)) {
__mmu_int_rb_remove(rbnode, &handler->root);
@@ -255,9 +256,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
}
spin_unlock_irqrestore(&handler->lock, flags);
- while (!list_empty(&del_list)) {
- rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
- list_del(&rbnode->list);
+ list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
handler->ops->remove(handler->ops_arg, rbnode);
}
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bca5358f3ef2..4523eb85ec69 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -438,9 +438,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
struct mlx4_ib_qp *qp,
struct mlx4_ib_create_qp *ucmd)
{
+ u32 cnt;
+
/* Sanity check SQ size before proceeding */
- if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
- ucmd->log_sq_stride >
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
+ cnt > dev->dev->caps.max_wqes)
+ return -EINVAL;
+ if (ucmd->log_sq_stride >
ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6edd30c92156..51623431b879 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3821,7 +3821,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return -EINVAL;
if (attr->port_num == 0 ||
- attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
+ attr->port_num > dev->num_ports) {
mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
attr->port_num, dev->num_ports);
return -EINVAL;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e97c13967174..905e2eaed095 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -505,8 +505,6 @@ void rvt_qp_exit(struct rvt_dev_info *rdi)
if (qps_inuse)
rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
qps_inuse);
- if (!rdi->qp_dev)
- return;
kfree(rdi->qp_dev->qp_table);
free_qpn_table(&rdi->qp_dev->qpn_table);
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index dbbf8c6c16d3..a462c2fc6f31 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -472,9 +472,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
dev_dbg(&netdev->dev, "siw: event %lu\n", event);
- if (dev_net(netdev) != &init_net)
- return NOTIFY_OK;
-
base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW);
if (!base_dev)
return NOTIFY_OK;
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 2b5120a13e37..42d03bd1622d 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -548,7 +548,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
data_len -= plen;
fp_off = 0;
- if (++seg > (int)MAX_ARRAY) {
+ if (++seg >= (int)MAX_ARRAY) {
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
siw_unmap_pages(page_array, kmap_mask);
wqe->processed -= c_tx->bytes_unsent;
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index 69881265d121..8135a80226fd 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -137,7 +137,7 @@ static int rpi_ts_probe(struct platform_device *pdev)
return -ENOENT;
}
- fw = rpi_firmware_get(fw_node);
+ fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
of_node_put(fw_node);
if (!fw)
return -EPROBE_DEFER;
@@ -164,7 +164,6 @@ static int rpi_ts_probe(struct platform_device *pdev)
touchbuf = (u32)ts->fw_regs_phys;
error = rpi_firmware_property(fw, RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF,
&touchbuf, sizeof(touchbuf));
-
if (error || touchbuf != 0) {
dev_warn(dev, "Failed to set touchbuf, %d\n", error);
return error;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 2cbf66d1c300..34334adcad01 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -802,7 +802,7 @@ config LEDS_SPI_BYTE
config LEDS_TI_LMU_COMMON
tristate "LED driver for TI LMU"
depends on LEDS_CLASS
- depends on REGMAP
+ select REGMAP
help
Say Y to enable the LED driver for TI LMU devices.
This supports common features between the TI LM3532, LM3631, LM3632,
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index b5a534206edd..1605dcaa89e3 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -86,6 +86,7 @@ config ADB_PMU_LED
config ADB_PMU_LED_DISK
bool "Use front LED as DISK LED by default"
+ depends on ATA
depends on ADB_PMU_LED
depends on LEDS_CLASS
select LEDS_TRIGGERS
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index cb75dc035616..30d53a535d55 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -171,6 +171,7 @@ static void wf_sat_release(struct kref *ref)
if (sat->nr >= 0)
sats[sat->nr] = NULL;
+ of_node_put(sat->node);
kfree(sat);
}
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 1d0b8abbafc3..bb7bb1738647 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -110,7 +110,7 @@ struct zynqmp_ipi_pdata {
unsigned int method;
u32 local_id;
int num_mboxes;
- struct zynqmp_ipi_mbox *ipi_mboxes;
+ struct zynqmp_ipi_mbox ipi_mboxes[];
};
static struct device_driver zynqmp_ipi_mbox_driver = {
@@ -152,7 +152,7 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
struct zynqmp_ipi_message *msg;
u64 arg0, arg3;
struct arm_smccc_res res;
- int ret, i;
+ int ret, i, status = IRQ_NONE;
(void)irq;
arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
@@ -170,11 +170,11 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
memcpy_fromio(msg->data, mchan->req_buf,
msg->len);
mbox_chan_received_data(chan, (void *)msg);
- return IRQ_HANDLED;
+ status = IRQ_HANDLED;
}
}
}
- return IRQ_NONE;
+ return status;
}
/**
@@ -634,8 +634,13 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
struct zynqmp_ipi_mbox *mbox;
int num_mboxes, ret = -EINVAL;
- num_mboxes = of_get_child_count(np);
- pdata = devm_kzalloc(dev, sizeof(*pdata) + (num_mboxes * sizeof(*mbox)),
+ num_mboxes = of_get_available_child_count(np);
+ if (num_mboxes == 0) {
+ dev_err(dev, "mailbox nodes not available\n");
+ return -EINVAL;
+ }
+
+ pdata = devm_kzalloc(dev, struct_size(pdata, ipi_mboxes, num_mboxes),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -649,8 +654,6 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
}
pdata->num_mboxes = num_mboxes;
- pdata->ipi_mboxes = (struct zynqmp_ipi_mbox *)
- ((char *)pdata + sizeof(*pdata));
mbox = pdata->ipi_mboxes;
for_each_available_child_of_node(np, nc) {
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index ce9d03f4e981..355afca2969d 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -2232,6 +2232,7 @@ static int __init dm_clone_init(void)
r = dm_register_target(&clone_target);
if (r < 0) {
DMERR("Failed to register clone target");
+ kmem_cache_destroy(_hydration_cache);
return r;
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index bed263267323..967d4ad6d32c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -124,9 +124,9 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
* Direction r or w?
*/
arg_name = dm_shift_arg(as);
- if (!strcasecmp(arg_name, "w"))
+ if (arg_name && !strcasecmp(arg_name, "w"))
fc->corrupt_bio_rw = WRITE;
- else if (!strcasecmp(arg_name, "r"))
+ else if (arg_name && !strcasecmp(arg_name, "r"))
fc->corrupt_bio_rw = READ;
else {
ti->error = "Invalid corrupt bio direction (r or w)";
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 3c9a2be97e55..d7911c623edd 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4288,11 +4288,13 @@ static int __init dm_integrity_init(void)
}
r = dm_register_target(&integrity_target);
-
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ kmem_cache_destroy(journal_io_cache);
+ return r;
+ }
- return r;
+ return 0;
}
static void __exit dm_integrity_exit(void)
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index bcb282f11053..477e616a21c7 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1435,11 +1435,12 @@ static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_s
hc->new_map = NULL;
}
- param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
-
- __dev_status(hc->md, param);
md = hc->md;
up_write(&_hash_lock);
+
+ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+ __dev_status(md, param);
+
if (old_map) {
dm_sync_table(md);
dm_table_destroy(old_map);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 9dcdf34b7e32..965b5139c897 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -471,13 +471,14 @@ static int verity_verify_io(struct dm_verity_io *io)
struct bvec_iter start;
unsigned b;
struct crypto_wait wait;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
for (b = 0; b < io->n_blocks; b++) {
int r;
sector_t cur_block = io->block + b;
struct ahash_request *req = verity_io_hash_req(v, io);
- if (v->validated_blocks &&
+ if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
likely(test_bit(cur_block, v->validated_blocks))) {
verity_bv_skip_block(v, io, &io->iter);
continue;
@@ -525,9 +526,17 @@ static int verity_verify_io(struct dm_verity_io *io)
else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0)
continue;
- else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
- cur_block))
- return -EIO;
+ else {
+ if (bio->bi_status) {
+ /*
+ * Error correction failed; Just return error
+ */
+ return -EIO;
+ }
+ if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+ cur_block))
+ return -EIO;
+ }
}
return 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9fcc141e1ad6..aee429ab114a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2229,11 +2229,22 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
int d;
- struct bio *wbio, *wbio2;
+ struct bio *wbio = r10_bio->devs[1].bio;
+ struct bio *wbio2 = r10_bio->devs[1].repl_bio;
+
+ /* Need to test wbio2->bi_end_io before we call
+ * generic_make_request as if the former is NULL,
+ * the latter is free to free wbio2.
+ */
+ if (wbio2 && !wbio2->bi_end_io)
+ wbio2 = NULL;
if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
fix_recovery_read_error(r10_bio);
- end_sync_request(r10_bio);
+ if (wbio->bi_end_io)
+ end_sync_request(r10_bio);
+ if (wbio2)
+ end_sync_request(r10_bio);
return;
}
@@ -2242,14 +2253,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* and submit the write request
*/
d = r10_bio->devs[1].devnum;
- wbio = r10_bio->devs[1].bio;
- wbio2 = r10_bio->devs[1].repl_bio;
- /* Need to test wbio2->bi_end_io before we call
- * generic_make_request as if the former is NULL,
- * the latter is free to free wbio2.
- */
- if (wbio2 && !wbio2->bi_end_io)
- wbio2 = NULL;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
@@ -2917,10 +2920,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t chunk_mask = conf->geo.chunk_mask;
int page_idx = 0;
- if (!mempool_initialized(&conf->r10buf_pool))
- if (init_resync(conf))
- return 0;
-
/*
* Allow skipping a full rebuild for incremental assembly
* of a clean array, like RAID1 does.
@@ -2936,6 +2935,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
return mddev->dev_sectors - sector_nr;
}
+ if (!mempool_initialized(&conf->r10buf_pool))
+ if (init_resync(conf))
+ return 0;
+
skipped:
max_sector = mddev->dev_sectors;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -3632,6 +3635,20 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
return nc*fc;
}
+static void raid10_free_conf(struct r10conf *conf)
+{
+ if (!conf)
+ return;
+
+ mempool_exit(&conf->r10bio_pool);
+ kfree(conf->mirrors);
+ kfree(conf->mirrors_old);
+ kfree(conf->mirrors_new);
+ safe_put_page(conf->tmppage);
+ bioset_exit(&conf->bio_split);
+ kfree(conf);
+}
+
static struct r10conf *setup_conf(struct mddev *mddev)
{
struct r10conf *conf = NULL;
@@ -3714,20 +3731,24 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf;
out:
- if (conf) {
- mempool_exit(&conf->r10bio_pool);
- kfree(conf->mirrors);
- safe_put_page(conf->tmppage);
- bioset_exit(&conf->bio_split);
- kfree(conf);
- }
+ raid10_free_conf(conf);
return ERR_PTR(err);
}
+static void raid10_set_io_opt(struct r10conf *conf)
+{
+ int raid_disks = conf->geo.raid_disks;
+
+ if (!(conf->geo.raid_disks % conf->geo.near_copies))
+ raid_disks /= conf->geo.near_copies;
+ blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
+ raid_disks);
+}
+
static int raid10_run(struct mddev *mddev)
{
struct r10conf *conf;
- int i, disk_idx, chunk_size;
+ int i, disk_idx;
struct raid10_info *disk;
struct md_rdev *rdev;
sector_t size;
@@ -3748,6 +3769,9 @@ static int raid10_run(struct mddev *mddev)
if (!conf)
goto out;
+ mddev->thread = conf->thread;
+ conf->thread = NULL;
+
if (mddev_is_clustered(conf->mddev)) {
int fc, fo;
@@ -3760,21 +3784,13 @@ static int raid10_run(struct mddev *mddev)
}
}
- mddev->thread = conf->thread;
- conf->thread = NULL;
-
- chunk_size = mddev->chunk_sectors << 9;
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
- blk_queue_io_min(mddev->queue, chunk_size);
- if (conf->geo.raid_disks % conf->geo.near_copies)
- blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
- else
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->geo.raid_disks / conf->geo.near_copies));
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ raid10_set_io_opt(conf);
}
rdev_for_each(rdev, mddev) {
@@ -3934,10 +3950,7 @@ static int raid10_run(struct mddev *mddev)
out_free_conf:
md_unregister_thread(&mddev->thread);
- mempool_exit(&conf->r10bio_pool);
- safe_put_page(conf->tmppage);
- kfree(conf->mirrors);
- kfree(conf);
+ raid10_free_conf(conf);
mddev->private = NULL;
out:
return -EIO;
@@ -3945,15 +3958,7 @@ static int raid10_run(struct mddev *mddev)
static void raid10_free(struct mddev *mddev, void *priv)
{
- struct r10conf *conf = priv;
-
- mempool_exit(&conf->r10bio_pool);
- safe_put_page(conf->tmppage);
- kfree(conf->mirrors);
- kfree(conf->mirrors_old);
- kfree(conf->mirrors_new);
- bioset_exit(&conf->bio_split);
- kfree(conf);
+ raid10_free_conf(priv);
}
static void raid10_quiesce(struct mddev *mddev, int quiesce)
@@ -4748,6 +4753,7 @@ static void end_reshape(struct r10conf *conf)
stripe /= conf->geo.near_copies;
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
+ raid10_set_io_opt(conf);
}
conf->fullsync = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d0c3f49c8c16..113ba084fab4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7159,6 +7159,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 0;
}
+static void raid5_set_io_opt(struct r5conf *conf)
+{
+ blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
+ (conf->raid_disks - conf->max_degraded));
+}
+
static int raid5_run(struct mddev *mddev)
{
struct r5conf *conf;
@@ -7448,8 +7454,7 @@ static int raid5_run(struct mddev *mddev)
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->raid_disks - conf->max_degraded));
+ raid5_set_io_opt(conf);
mddev->queue->limits.raid_partial_stripes_expensive = 1;
/*
* We can only discard a whole stripe. It doesn't make sense to
@@ -8043,6 +8048,7 @@ static void end_reshape(struct r5conf *conf)
/ PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
+ raid5_set_io_opt(conf);
}
}
}
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index bb3a8cc9de0c..6dbd98a3e5b8 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -1179,6 +1179,7 @@ static void dm1105_remove(struct pci_dev *pdev)
struct dvb_demux *dvbdemux = &dev->demux;
struct dmx_demux *dmx = &dvbdemux->dmx;
+ cancel_work_sync(&dev->ir.work);
dm1105_ir_exit(dev);
dmx->close(dmx);
dvb_net_release(&dev->dvbnet);
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 6a5053126237..437dbe5e75e2 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -300,6 +300,7 @@ int saa7134_ts_start(struct saa7134_dev *dev)
int saa7134_ts_fini(struct saa7134_dev *dev)
{
+ del_timer_sync(&dev->ts_q.timeout);
saa7134_pgtable_free(dev->pci, &dev->ts_q.pt);
return 0;
}
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index 3f0b0933eed6..3e773690468b 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -185,6 +185,7 @@ int saa7134_vbi_init1(struct saa7134_dev *dev)
int saa7134_vbi_fini(struct saa7134_dev *dev)
{
/* nothing */
+ del_timer_sync(&dev->vbi_q.timeout);
return 0;
}
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index e454a288229b..ea26c8c57494 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2154,6 +2154,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
void saa7134_video_fini(struct saa7134_dev *dev)
{
+ del_timer_sync(&dev->video_q.timeout);
/* free stuff */
vb2_queue_release(&dev->video_vbq);
saa7134_pgtable_free(dev->pci, &dev->video_q.pt);
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index ea9f7d0058a2..e201d5a56bc6 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -822,10 +822,10 @@ static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, s
av7110_ipack_flush(ipack);
if (buf[3] & ADAPT_FIELD) {
+ if (buf[4] > len - 1 - 4)
+ return 0;
len -= buf[4] + 1;
buf += buf[4] + 1;
- if (!len)
- return 0;
}
av7110_ipack_instant_repack(buf + 4, len - 4, ipack);
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 97bed45360f0..af2408b4856e 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2121,9 +2121,7 @@ static int fdp1_open(struct file *file)
if (ctx->hdl.error) {
ret = ctx->hdl.error;
- v4l2_ctrl_handler_free(&ctx->hdl);
- kfree(ctx);
- goto done;
+ goto error_ctx;
}
ctx->fh.ctrl_handler = &ctx->hdl;
@@ -2137,20 +2135,27 @@ static int fdp1_open(struct file *file)
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
-
- v4l2_ctrl_handler_free(&ctx->hdl);
- kfree(ctx);
- goto done;
+ goto error_ctx;
}
/* Perform any power management required */
- pm_runtime_get_sync(fdp1->dev);
+ ret = pm_runtime_resume_and_get(fdp1->dev);
+ if (ret < 0)
+ goto error_pm;
v4l2_fh_add(&ctx->fh);
dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
+ mutex_unlock(&fdp1->dev_mutex);
+ return 0;
+
+error_pm:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+error_ctx:
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx);
done:
mutex_unlock(&fdp1->dev_mutex);
return ret;
@@ -2255,7 +2260,6 @@ static int fdp1_probe(struct platform_device *pdev)
struct fdp1_dev *fdp1;
struct video_device *vfd;
struct device_node *fcp_node;
- struct resource *res;
struct clk *clk;
unsigned int i;
@@ -2282,17 +2286,15 @@ static int fdp1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fdp1);
/* Memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
+ fdp1->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fdp1->regs))
return PTR_ERR(fdp1->regs);
/* Interrupt service routine registration */
- fdp1->irq = ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot find IRQ\n");
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
return ret;
- }
+ fdp1->irq = ret;
ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
dev_name(&pdev->dev), fdp1);
@@ -2315,8 +2317,10 @@ static int fdp1_probe(struct platform_device *pdev)
/* Determine our clock rate */
clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto put_dev;
+ }
fdp1->clk_rate = clk_get_rate(clk);
clk_put(clk);
@@ -2325,7 +2329,7 @@ static int fdp1_probe(struct platform_device *pdev)
ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
if (ret) {
v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
- return ret;
+ goto put_dev;
}
/* M2M registration */
@@ -2355,7 +2359,9 @@ static int fdp1_probe(struct platform_device *pdev)
/* Power up the cells to read HW */
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(fdp1->dev);
+ ret = pm_runtime_resume_and_get(fdp1->dev);
+ if (ret < 0)
+ goto disable_pm;
hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
switch (hw_version) {
@@ -2384,12 +2390,17 @@ static int fdp1_probe(struct platform_device *pdev)
return 0;
+disable_pm:
+ pm_runtime_disable(fdp1->dev);
+
release_m2m:
v4l2_m2m_release(fdp1->m2m_dev);
unreg_dev:
v4l2_device_unregister(&fdp1->v4l2_dev);
+put_dev:
+ rcar_fcp_put(fdp1->fcp);
return ret;
}
@@ -2401,6 +2412,7 @@ static int fdp1_remove(struct platform_device *pdev)
video_unregister_device(&fdp1->vfd);
v4l2_device_unregister(&fdp1->v4l2_dev);
pm_runtime_disable(&pdev->dev);
+ rcar_fcp_put(fdp1->fcp);
return 0;
}
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 16a097f93b42..2485a3657c89 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1308,6 +1308,8 @@ static int bdisp_probe(struct platform_device *pdev)
init_waitqueue_head(&bdisp->irq_queue);
INIT_DELAYED_WORK(&bdisp->timeout_work, bdisp_irq_timeout);
bdisp->work_queue = create_workqueue(BDISP_NAME);
+ if (!bdisp->work_queue)
+ return -ENOMEM;
spin_lock_init(&bdisp->slock);
mutex_init(&bdisp->lock);
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index a20413008c3c..f50398b3ed5f 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -83,6 +83,8 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
rcdev->map_name = RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
+ if (of_property_read_bool(np, "wakeup-source"))
+ device_init_wakeup(dev, true);
rc = devm_rc_register_device(dev, rcdev);
if (rc < 0) {
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 833e2bd248a5..8ff8d649d9b3 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -160,10 +160,16 @@ static int vmci_host_close(struct inode *inode, struct file *filp)
static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
{
struct vmci_host_dev *vmci_host_dev = filp->private_data;
- struct vmci_ctx *context = vmci_host_dev->context;
+ struct vmci_ctx *context;
__poll_t mask = 0;
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+ /*
+ * Read context only if ct_type == VMCIOBJ_CONTEXT to make
+ * sure that context is initialized
+ */
+ context = vmci_host_dev->context;
+
/* Check for VMCI calls to this VM context. */
if (wait)
poll_wait(filp, &context->host_context.wait_queue,
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 69c133e7ced0..48765208e295 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -124,6 +124,7 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
}
+
/*
* The DAT[3:0] line signal levels and the CMD line signal level are
* not compatible with standard SDHC register. The line signal levels
@@ -135,6 +136,16 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
ret = value & 0x000fffff;
ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
ret |= (value << 1) & SDHCI_CMD_LVL;
+
+ /*
+ * Some controllers have unreliable Data Line Active
+ * bit for commands with busy signal. This affects
+ * Command Inhibit (data) bit. Just ignore it since
+ * MMC core driver has already polled card status
+ * with CMD13 after any command with busy siganl.
+ */
+ if (esdhc->quirk_ignore_data_inhibit)
+ ret &= ~SDHCI_DATA_INHIBIT;
return ret;
}
@@ -149,19 +160,6 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
- /*
- * Some controllers have unreliable Data Line Active
- * bit for commands with busy signal. This affects
- * Command Inhibit (data) bit. Just ignore it since
- * MMC core driver has already polled card status
- * with CMD13 after any command with busy siganl.
- */
- if ((spec_reg == SDHCI_PRESENT_STATE) &&
- (esdhc->quirk_ignore_data_inhibit == true)) {
- ret = value & ~SDHCI_DATA_INHIBIT;
- return ret;
- }
-
ret = value;
return ret;
}
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 97a5e1eaeefd..7bdc558d8560 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -34,6 +34,7 @@
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
+#define CQSPI_DISABLE_DAC_MODE BIT(1)
/* Capabilities mask */
#define CQSPI_BASE_HWCAPS_MASK \
@@ -77,9 +78,6 @@ struct cqspi_st {
dma_addr_t mmap_phys_base;
int current_cs;
- int current_page_size;
- int current_erase_size;
- int current_addr_width;
unsigned long master_ref_clk_hz;
bool is_decoded_cs;
u32 fifo_depth;
@@ -736,32 +734,6 @@ static void cqspi_chipselect(struct spi_nor *nor)
writel(reg, reg_base + CQSPI_REG_CONFIG);
}
-static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
-{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
- struct cqspi_st *cqspi = f_pdata->cqspi;
- void __iomem *iobase = cqspi->iobase;
- unsigned int reg;
-
- /* configure page size and block size. */
- reg = readl(iobase + CQSPI_REG_SIZE);
- reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
- reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
- reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
- reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
- reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
- reg |= (nor->addr_width - 1);
- writel(reg, iobase + CQSPI_REG_SIZE);
-
- /* configure the chip select */
- cqspi_chipselect(nor);
-
- /* Store the new configuration of the controller */
- cqspi->current_page_size = nor->page_size;
- cqspi->current_erase_size = nor->mtd.erasesize;
- cqspi->current_addr_width = nor->addr_width;
-}
-
static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
const unsigned int ns_val)
{
@@ -867,18 +839,13 @@ static void cqspi_configure(struct spi_nor *nor)
int switch_cs = (cqspi->current_cs != f_pdata->cs);
int switch_ck = (cqspi->sclk != sclk);
- if ((cqspi->current_page_size != nor->page_size) ||
- (cqspi->current_erase_size != nor->mtd.erasesize) ||
- (cqspi->current_addr_width != nor->addr_width))
- switch_cs = 1;
-
if (switch_cs || switch_ck)
cqspi_controller_enable(cqspi, 0);
/* Switch chip select. */
if (switch_cs) {
cqspi->current_cs = f_pdata->cs;
- cqspi_configure_cs_and_sizes(nor);
+ cqspi_chipselect(nor);
}
/* Setup baudrate divisor and delays */
@@ -1201,7 +1168,7 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
cqspi_controller_enable(cqspi, 1);
}
-static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
+static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
{
dma_cap_mask_t mask;
@@ -1210,10 +1177,16 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
cqspi->rx_chan = dma_request_chan_by_mask(&mask);
if (IS_ERR(cqspi->rx_chan)) {
- dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
+ int ret = PTR_ERR(cqspi->rx_chan);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
cqspi->rx_chan = NULL;
+ return ret;
}
init_completion(&cqspi->rx_dma_complete);
+
+ return 0;
}
static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
@@ -1291,13 +1264,17 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
f_pdata->registered = true;
- if (mtd->size <= cqspi->ahb_size) {
+ if (mtd->size <= cqspi->ahb_size &&
+ !(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) {
f_pdata->use_direct_mode = true;
dev_dbg(nor->dev, "using direct mode for %s\n",
mtd->name);
- if (!cqspi->rx_chan)
- cqspi_request_mmap_dma(cqspi);
+ if (!cqspi->rx_chan) {
+ ret = cqspi_request_mmap_dma(cqspi);
+ if (ret == -EPROBE_DEFER)
+ goto err;
+ }
}
}
@@ -1464,17 +1441,30 @@ static int cqspi_remove(struct platform_device *pdev)
static int cqspi_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+ ret = spi_master_suspend(master);
cqspi_controller_enable(cqspi, 0);
- return 0;
+
+ clk_disable_unprepare(cqspi->clk);
+
+ return ret;
}
static int cqspi_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
- cqspi_controller_enable(cqspi, 1);
- return 0;
+ clk_prepare_enable(cqspi->clk);
+ cqspi_wait_idle(cqspi);
+ cqspi_controller_init(cqspi);
+
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+ return spi_master_resume(master);
}
static const struct dev_pm_ops cqspi__dev_pm_ops = {
@@ -1489,6 +1479,7 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
static const struct cqspi_driver_platdata cdns_qspi = {
.hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
+ .quirks = CQSPI_DISABLE_DAC_MODE,
};
static const struct cqspi_driver_platdata k2g_qspi = {
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5133e1be5331..3243bca19b8b 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -947,7 +947,7 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
int offset, int len)
{
struct ubi_device *ubi = vol->ubi;
- int pnum, opnum, err, vol_id = vol->vol_id;
+ int pnum, opnum, err, err2, vol_id = vol->vol_id;
pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
@@ -982,10 +982,19 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
out_put:
up_read(&ubi->fm_eba_sem);
- if (err && pnum >= 0)
- err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
- else if (!err && opnum >= 0)
- err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+ if (err && pnum >= 0) {
+ err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err2) {
+ ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
+ pnum, err2);
+ }
+ } else if (!err && opnum >= 0) {
+ err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+ if (err2) {
+ ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
+ opnum, err2);
+ }
+ }
return err;
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 2d8382eb9add..baa994b7f78b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -397,9 +397,9 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
case PHY_INTERFACE_MODE_TRGMII:
trgint = 1;
if (priv->id == ID_MT7621) {
- /* PLL frequency: 150MHz: 1.2GBit */
+ /* PLL frequency: 125MHz: 1.0GBit */
if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0780;
+ ncpo1 = 0x0640;
if (xtal == HWTRAP_XTAL_25MHZ)
ncpo1 = 0x0a00;
} else { /* PLL frequency: 250MHz: 2.0Gbit */
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index ea32be579e7b..3a8a49b7ec3d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3876,6 +3876,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 9c152d85840d..c9d2a6f15062 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -652,7 +652,7 @@ static int nmclan_config(struct pcmcia_device *link)
} else {
pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
sig[0], sig[1]);
- return -ENODEV;
+ goto failed;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 7c52ae8ac005..a43cb7bfcccd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2542,6 +2542,14 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
return 0;
}
+static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type < ixgbe_mac_X550)
+ return 16;
+ else
+ return 64;
+}
+
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2550,7 +2558,8 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
+ cmd->data = min_t(int, adapter->num_rx_queues,
+ ixgbe_rss_indir_tbl_max(adapter));
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
@@ -2952,14 +2961,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
-static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
-{
- if (adapter->hw.mac.type < ixgbe_mac_X550)
- return 16;
- else
- return 64;
-}
-
static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
{
return IXGBE_RSS_KEY_SIZE;
@@ -3008,8 +3009,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
- if (hfunc)
- return -EINVAL;
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
/* Fill out the redirection table */
if (indir) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index b8de3784d976..c53d2271d8ab 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -573,7 +573,7 @@ static int ionic_get_rxnfc(struct net_device *netdev,
info->data = lif->nxqs;
break;
default:
- netdev_err(netdev, "Command parameter %d is not supported\n",
+ netdev_dbg(netdev, "Command parameter %d is not supported\n",
info->cmd);
err = -EOPNOTSUPP;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 59d4449450ee..7922e833620e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2792,6 +2792,27 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ if (!is_xdp_frame(buf))
+ dev_kfree_skb(buf);
+ else
+ xdp_return_frame(ptr_to_xdp(buf));
+}
+
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2rxq(vq);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -2799,26 +2820,16 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_frame(buf))
- dev_kfree_skb(buf);
- else
- xdp_return_frame(ptr_to_xdp(buf));
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_sq_free_unused_buf(vq, buf);
+ cond_resched();
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
-
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(&vi->rq[i], buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_free_unused_buf(vq, buf);
+ cond_resched();
}
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 01163b333945..92f5c8e83090 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -529,7 +529,7 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
ee->ee_n_piers[mode]++;
freq2 = (val >> 8) & 0xff;
- if (!freq2)
+ if (!freq2 || i >= max)
break;
pc[i++].freq = ath5k_eeprom_bin2freq(ee,
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index bde5a10d470c..af98e871199d 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -246,7 +246,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
return -EACCES;
}
- size = sizeof(cid) + sizeof(addr) + sizeof(param);
+ size = sizeof(cid) + sizeof(addr) + sizeof(*param);
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index c68848819a52..9b88d96bfe96 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -960,8 +960,8 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
* Thus the possibility of ar->htc_target being NULL
* via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
*/
- if (WARN_ON_ONCE(!target)) {
- ath6kl_err("Target not yet initialized\n");
+ if (!target) {
+ ath6kl_dbg(ATH6KL_DBG_HTC, "Target not yet initialized\n");
status = -EINVAL;
goto free_skb;
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index e23d58f83dd6..3aa915d21554 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -534,6 +534,24 @@ static struct ath9k_htc_hif hif_usb = {
.send = hif_usb_send,
};
+/* Need to free remain_skb allocated in ath9k_hif_usb_rx_stream
+ * in case ath9k_hif_usb_rx_stream wasn't called next time to
+ * process the buffer and subsequently free it.
+ */
+static void ath9k_hif_usb_free_rx_remain_skb(struct hif_device_usb *hif_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->rx_lock, flags);
+ if (hif_dev->remain_skb) {
+ dev_kfree_skb_any(hif_dev->remain_skb);
+ hif_dev->remain_skb = NULL;
+ hif_dev->rx_remain_len = 0;
+ RX_STAT_INC(hif_dev, skb_dropped);
+ }
+ spin_unlock_irqrestore(&hif_dev->rx_lock, flags);
+}
+
static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
@@ -868,6 +886,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->rx_submitted);
+ ath9k_hif_usb_free_rx_remain_skb(hif_dev);
}
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index cd146bbca670..5bfff309f547 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5466,6 +5466,11 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
(struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
req_len = le32_to_cpu(assoc_info->req_len);
resp_len = le32_to_cpu(assoc_info->resp_len);
+ if (req_len > WL_EXTRA_BUF_MAX || resp_len > WL_EXTRA_BUF_MAX) {
+ bphy_err(drvr, "invalid lengths in assoc info: req %u resp %u\n",
+ req_len, resp_len);
+ return -EINVAL;
+ }
if (req_len) {
err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies",
cfg->extra_buf,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 524f9dd2323d..f8785c70842d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1877,6 +1877,11 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
if (ret < 0)
return ret;
+ if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
+ ret = -EIO;
+ goto out;
+ }
+
rsp = (void *)hcmd.resp_pkt->data;
if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
ret = -ENXIO;
@@ -1954,6 +1959,11 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
if (ret < 0)
return ret;
+ if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
+ ret = -EIO;
+ goto out;
+ }
+
rsp = (void *)hcmd.resp_pkt->data;
if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
ret = -ENXIO;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 8915030030c4..3a93a7b8ba0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -630,7 +630,6 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
- int t = 0;
int iter;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
@@ -645,6 +644,8 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
usleep_range(1000, 2000);
for (iter = 0; iter < 10; iter++) {
+ int t = 0;
+
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index edd2960c109a..e26a722852ee 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1702,6 +1702,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
.has_s0s1 = 0,
.gen2_thermal_meter = 1,
+ .needs_full_init = 1,
.adda_1t_init = 0x0fc01616,
.adda_1t_path_on = 0x0fc01616,
.adda_2t_path_on_a = 0x0fc01616,
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index c9ad6761032a..8fb0b54738ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -195,8 +195,8 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
} else {
if (get_rf_type(rtlphy) == RF_1T2R ||
get_rf_type(rtlphy) == RF_2T2R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "1T2R or 2T2R\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "1T2R or 2T2R\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0xFF;
ht_cap->mcs.rx_mask[4] = 0x01;
@@ -204,7 +204,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
ht_cap->mcs.rx_highest =
cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
} else if (get_rf_type(rtlphy) == RF_1T1R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0x00;
@@ -1324,7 +1324,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
rtlpriv->cfg->ops->chk_switch_dmdp(hw);
}
if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
mac->link_state = MAC80211_LINKING;
/* Dul mac */
@@ -1385,7 +1385,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
if (mac->act_scanning)
return false;
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
"%s ACT_ADDBAREQ From :%pM\n",
is_tx ? "Tx" : "Rx", hdr->addr2);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "req\n",
@@ -1400,8 +1400,8 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
rcu_read_lock();
sta = rtl_find_sta(hw, hdr->addr3);
if (sta == NULL) {
- RT_TRACE(rtlpriv, COMP_SEND | COMP_RECV,
- DBG_DMESG, "sta is NULL\n");
+ rtl_dbg(rtlpriv, COMP_SEND | COMP_RECV,
+ DBG_DMESG, "sta is NULL\n");
rcu_read_unlock();
return true;
}
@@ -1428,13 +1428,13 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
break;
case ACT_ADDBARSP:
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "%s ACT_ADDBARSP From :%pM\n",
- is_tx ? "Tx" : "Rx", hdr->addr2);
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "%s ACT_ADDBARSP From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2);
break;
case ACT_DELBA:
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "ACT_ADDBADEL From :%pM\n", hdr->addr2);
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "ACT_ADDBADEL From :%pM\n", hdr->addr2);
break;
}
break;
@@ -1519,9 +1519,9 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
/* 68 : UDP BOOTP client
* 67 : UDP BOOTP server
*/
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
- DBG_DMESG, "dhcp %s !!\n",
- (is_tx) ? "Tx" : "Rx");
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV),
+ DBG_DMESG, "dhcp %s !!\n",
+ (is_tx) ? "Tx" : "Rx");
if (is_tx)
setup_special_tx(rtlpriv, ppsc,
@@ -1540,8 +1540,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
rtlpriv->btcoexist.btc_info.in_4way = true;
rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies;
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx");
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx");
if (is_tx) {
rtlpriv->ra.is_special_data = true;
@@ -1583,12 +1583,12 @@ static void rtl_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
if (ack) {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
- "tx report: ack\n");
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
+ "tx report: ack\n");
info->flags |= IEEE80211_TX_STAT_ACK;
} else {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
- "tx report: not ack\n");
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
+ "tx report: not ack\n");
info->flags &= ~IEEE80211_TX_STAT_ACK;
}
ieee80211_tx_status_irqsafe(hw, skb);
@@ -1626,8 +1626,8 @@ static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw,
tx_report->last_sent_time = jiffies;
tx_info->sn = sn;
tx_info->send_time = tx_report->last_sent_time;
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
- "Send TX-Report sn=0x%X\n", sn);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
+ "Send TX-Report sn=0x%X\n", sn);
return sn;
}
@@ -1674,9 +1674,9 @@ void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len)
break;
}
}
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
- "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
- st, sn, retry);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
+ "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
+ st, sn, retry);
}
EXPORT_SYMBOL_GPL(rtl_tx_report_handler);
@@ -1689,9 +1689,9 @@ bool rtl_check_tx_report_acked(struct ieee80211_hw *hw)
return true;
if (time_before(tx_report->last_sent_time + 3 * HZ, jiffies)) {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
- "Check TX-Report timeout!! s_sn=0x%X r_sn=0x%X\n",
- tx_report->last_sent_sn, tx_report->last_recv_sn);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
+ "Check TX-Report timeout!! s_sn=0x%X r_sn=0x%X\n",
+ tx_report->last_sent_sn, tx_report->last_recv_sn);
return true; /* 3 sec. (timeout) seen as acked */
}
@@ -1707,8 +1707,8 @@ void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms)
if (rtl_check_tx_report_acked(hw))
break;
usleep_range(1000, 2000);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
}
}
@@ -1770,9 +1770,9 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return -ENXIO;
tid_data = &sta_entry->tids[tid];
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
- *ssn);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
+ *ssn);
tid_data->agg.agg_state = RTL_AGG_START;
@@ -1789,8 +1789,8 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1829,8 +1829,8 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
return -ENXIO;
tid_data = &sta_entry->tids[tid];
- RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_RECV, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
tid_data->agg.rx_agg_state = RTL_RX_AGG_START;
return 0;
@@ -1845,8 +1845,8 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1866,8 +1866,8 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1887,9 +1887,9 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
&ctrl_agg_size, &agg_size);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
- reject_agg, ctrl_agg_size, agg_size);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
+ reject_agg, ctrl_agg_size, agg_size);
rtlpriv->hw->max_rx_aggregation_subframes =
(ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
@@ -1977,9 +1977,9 @@ void rtl_scan_list_expire(struct ieee80211_hw *hw)
list_del(&entry->list);
rtlpriv->scan_list.num--;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "BSSID=%pM is expire in scan list (total=%d)\n",
- entry->bssid, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "BSSID=%pM is expire in scan list (total=%d)\n",
+ entry->bssid, rtlpriv->scan_list.num);
kfree(entry);
}
@@ -2013,9 +2013,9 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
if (memcmp(entry->bssid, hdr->addr3, ETH_ALEN) == 0) {
list_del_init(&entry->list);
entry_found = true;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Update BSSID=%pM to scan list (total=%d)\n",
- hdr->addr3, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Update BSSID=%pM to scan list (total=%d)\n",
+ hdr->addr3, rtlpriv->scan_list.num);
break;
}
}
@@ -2029,9 +2029,9 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
memcpy(entry->bssid, hdr->addr3, ETH_ALEN);
rtlpriv->scan_list.num++;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Add BSSID=%pM to scan list (total=%d)\n",
- hdr->addr3, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Add BSSID=%pM to scan list (total=%d)\n",
+ hdr->addr3, rtlpriv->scan_list.num);
}
entry->age = jiffies;
@@ -2191,8 +2191,8 @@ void rtl_watchdog_wq_callback(void *data)
if ((rtlpriv->link_info.bcn_rx_inperiod +
rtlpriv->link_info.num_rx_inperiod) == 0) {
rtlpriv->link_info.roam_times++;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "AP off for %d s\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "AP off for %d s\n",
(rtlpriv->link_info.roam_times * 2));
/* if we can't recv beacon for 10s,
@@ -2305,11 +2305,11 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
switch (cmd_id) {
case C2H_DBG:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_DBG!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_DBG!!\n");
break;
case C2H_TXBF:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_TXBF!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_TXBF!!\n");
break;
case C2H_TX_REPORT:
rtl_tx_report_handler(hw, cmd_buf, cmd_len);
@@ -2319,20 +2319,20 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
hal_ops->c2h_ra_report_handler(hw, cmd_buf, cmd_len);
break;
case C2H_BT_INFO:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_BT_INFO!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_BT_INFO!!\n");
if (rtlpriv->cfg->ops->get_btc_status())
btc_ops->btc_btinfo_notify(rtlpriv, cmd_buf, cmd_len);
break;
case C2H_BT_MP:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_BT_MP!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_BT_MP!!\n");
if (rtlpriv->cfg->ops->get_btc_status())
btc_ops->btc_btmpinfo_notify(rtlpriv, cmd_buf, cmd_len);
break;
default:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], Unknown packet!! cmd_id(%#X)!\n", cmd_id);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], Unknown packet!! cmd_id(%#X)!\n", cmd_id);
break;
}
}
@@ -2356,8 +2356,8 @@ void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
if (!skb)
break;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "C2H rx_desc_shift=%d\n",
- *((u8 *)skb->cb));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "C2H rx_desc_shift=%d\n",
+ *((u8 *)skb->cb));
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_DMESG,
"C2H data: ", skb->data, skb->len);
@@ -2702,29 +2702,29 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
(memcmp(mac->bssid, ap5_6, 3) == 0) ||
vendor == PEER_ATH) {
vendor = PEER_ATH;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ath find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ath find\n");
} else if ((memcmp(mac->bssid, ap4_4, 3) == 0) ||
(memcmp(mac->bssid, ap4_5, 3) == 0) ||
(memcmp(mac->bssid, ap4_1, 3) == 0) ||
(memcmp(mac->bssid, ap4_2, 3) == 0) ||
(memcmp(mac->bssid, ap4_3, 3) == 0) ||
vendor == PEER_RAL) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ral find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ral find\n");
vendor = PEER_RAL;
} else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
vendor == PEER_CISCO) {
vendor = PEER_CISCO;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>cisco find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>cisco find\n");
} else if ((memcmp(mac->bssid, ap3_1, 3) == 0) ||
(memcmp(mac->bssid, ap3_2, 3) == 0) ||
(memcmp(mac->bssid, ap3_3, 3) == 0) ||
vendor == PEER_BROAD) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>broad find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>broad find\n");
vendor = PEER_BROAD;
} else if (memcmp(mac->bssid, ap7_1, 3) == 0 ||
vendor == PEER_MARV) {
vendor = PEER_MARV;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>marv find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>marv find\n");
}
mac->vendor = vendor;
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index bf0e0bb1f99b..7aa28da39409 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -43,14 +43,14 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE %x: %x\n",
- rtlpriv->cfg->maps[WCAMI], target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The Key ID is %d\n", entry_no);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE %x: %x\n",
- rtlpriv->cfg->maps[RWCAM], target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[WCAMI], target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The Key ID is %d\n", entry_no);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[RWCAM], target_command);
} else if (entry_i == 1) {
@@ -64,10 +64,10 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A4: %x\n", target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A0: %x\n", target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A4: %x\n", target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A0: %x\n", target_command);
} else {
@@ -83,15 +83,15 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A4: %x\n", target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A0: %x\n", target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A4: %x\n", target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A0: %x\n", target_command);
}
}
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "after set key, usconfig:%x\n", us_config);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "after set key, usconfig:%x\n", us_config);
}
u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
@@ -101,14 +101,14 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
u32 us_config;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
- ul_entry_idx, ul_key_id, ul_enc_alg,
- ul_default_key, mac_addr);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
+ ul_entry_idx, ul_key_id, ul_enc_alg,
+ ul_default_key, mac_addr);
if (ul_key_id == TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ulKeyId exceed!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ulKeyId exceed!\n");
return 0;
}
@@ -120,7 +120,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
(u8 *)key_content, us_config);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "end\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "end\n");
return 1;
@@ -133,7 +133,7 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
u32 ul_command;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "key_idx:%d\n", ul_key_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "key_idx:%d\n", ul_key_id);
ul_command = ul_key_id * CAM_CONTENT_COUNT;
ul_command = ul_command | BIT(31) | BIT(16);
@@ -141,10 +141,10 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_delete_one_entry(): WRITE A4: %x\n", 0);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_delete_one_entry(): WRITE A0: %x\n", ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A4: %x\n", __func__, 0);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
return 0;
@@ -195,10 +195,10 @@ void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_mark_invalid(): WRITE A4: %x\n", ul_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_mark_invalid(): WRITE A0: %x\n", ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A4: %x\n", __func__, ul_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
}
EXPORT_SYMBOL(rtl_cam_mark_invalid);
@@ -245,12 +245,10 @@ void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "rtl_cam_empty_entry(): WRITE A4: %x\n",
- ul_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "rtl_cam_empty_entry(): WRITE A0: %x\n",
- ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "%s: WRITE A4: %x\n", __func__, ul_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
}
}
@@ -313,8 +311,8 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Remove from HW Security CAM */
eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "&&&&&&&&&del entry %d\n", i);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "&&&&&&&&&del entry %d\n", i);
}
}
return;
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index f73e690bbe8e..c4c89aade8fb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -76,8 +76,8 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "Firmware callback routine entered!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
@@ -214,8 +214,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
u8 retry_limit = 0x30;
if (mac->vif) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "vif has been set!! mac->vif = 0x%p\n", mac->vif);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "vif has been set!! mac->vif = 0x%p\n", mac->vif);
return -EOPNOTSUPP;
}
@@ -230,16 +230,16 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
/*fall through*/
case NL80211_IFTYPE_STATION:
if (mac->beacon_enabled == 1) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_STATION\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_STATION\n");
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
}
break;
case NL80211_IFTYPE_ADHOC:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_ADHOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_ADHOC\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -256,8 +256,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
mac->p2p = P2P_ROLE_GO;
/*fall through*/
case NL80211_IFTYPE_AP:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_AP\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_AP\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -271,8 +271,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
retry_limit = 0x07;
break;
case NL80211_IFTYPE_MESH_POINT:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_MESH_POINT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_MESH_POINT\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -293,8 +293,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
}
if (mac->p2p) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "p2p role %x\n", vif->type);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "p2p role %x\n", vif->type);
mac->basic_rates = 0xff0;/*disable cck rate for p2p*/
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *)(&mac->basic_rates));
@@ -360,8 +360,8 @@ static int rtl_op_change_interface(struct ieee80211_hw *hw,
vif->type = new_type;
vif->p2p = p2p;
ret = rtl_op_add_interface(hw, vif);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "p2p %x\n", p2p);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "p2p %x\n", p2p);
return ret;
}
@@ -435,8 +435,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
memset(mask, 0, MAX_WOL_BIT_MASK_SIZE);
if (patterns[i].pattern_len < 0 ||
patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_WARNING,
- "Pattern[%d] is too long\n", i);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_WARNING,
+ "Pattern[%d] is too long\n", i);
continue;
}
pattern_os = patterns[i].pattern;
@@ -515,8 +515,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
"pattern to hw\n", content, len);
/* 3. calculate crc */
rtl_pattern.crc = _calculate_wol_pattern_crc(content, len);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "CRC_Remainder = 0x%x\n", rtl_pattern.crc);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "CRC_Remainder = 0x%x\n", rtl_pattern.crc);
/* 4. write crc & mask_for_hw to hw */
rtlpriv->cfg->ops->add_wowlan_pattern(hw, &rtl_pattern, i);
@@ -531,7 +531,7 @@ static int rtl_op_suspend(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
if (WARN_ON(!wow))
return -EINVAL;
@@ -557,7 +557,7 @@ static int rtl_op_resume(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
time64_t now;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
rtlhal->driver_is_goingto_unload = false;
rtlhal->enter_pnp_sleep = false;
rtlhal->wake_from_pnp_sleep = true;
@@ -588,8 +588,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&rtlpriv->locks.conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /* BIT(2)*/
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n");
}
/*For IPS */
@@ -632,9 +632,9 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
- hw->conf.long_frame_max_tx_count);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
+ hw->conf.long_frame_max_tx_count);
/* brought up everything changes (changed == ~0) indicates first
* open, so use our default value instead of that of wiphy.
*/
@@ -809,13 +809,13 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (*new_flags & FIF_ALLMULTI) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive multicast frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive multicast frame\n");
} else {
mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB]);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive multicast frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive multicast frame\n");
}
update_rcr = true;
}
@@ -823,12 +823,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_FCSFAIL) {
if (*new_flags & FIF_FCSFAIL) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive FCS error frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive FCS error frame\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive FCS error frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive FCS error frame\n");
}
if (!update_rcr)
update_rcr = true;
@@ -855,12 +855,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (*new_flags & FIF_CONTROL) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive control frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive control frame.\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive control frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive control frame.\n");
}
if (!update_rcr)
update_rcr = true;
@@ -869,12 +869,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_OTHER_BSS) {
if (*new_flags & FIF_OTHER_BSS) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive other BSS's frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive other BSS's frame.\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive other BSS's frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive other BSS's frame.\n");
}
if (!update_rcr)
update_rcr = true;
@@ -923,7 +923,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
sta->supp_rates[0] &= 0xfffffff0;
memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
"Add sta addr is %pM\n", sta->addr);
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0, true);
}
@@ -939,8 +939,8 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry;
if (sta) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "Remove sta addr is %pM\n", sta->addr);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "Remove sta addr is %pM\n", sta->addr);
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
sta_entry->wireless_mode = 0;
sta_entry->ratr_index = 0;
@@ -988,8 +988,8 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw,
int aci;
if (queue >= AC_MAX) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "queue number %d is incorrect!\n", queue);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "queue number %d is incorrect!\n", queue);
return -EINVAL;
}
@@ -1034,8 +1034,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
(changed & BSS_CHANGED_BEACON_ENABLED &&
bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 0) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_BEACON_ENABLED\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_BEACON_ENABLED\n");
/*start hw beacon interrupt. */
/*rtlpriv->cfg->ops->set_bcn_reg(hw); */
@@ -1052,8 +1052,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_BEACON_ENABLED &&
!bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 1) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "ADHOC DISABLE BEACON\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "ADHOC DISABLE BEACON\n");
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -1062,8 +1062,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
if (changed & BSS_CHANGED_BEACON_INT) {
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE,
- "BSS_CHANGED_BEACON_INT\n");
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_TRACE,
+ "BSS_CHANGED_BEACON_INT\n");
mac->beacon_interval = bss_conf->beacon_int;
rtlpriv->cfg->ops->set_bcn_intv(hw);
}
@@ -1102,8 +1102,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_unlock();
goto out;
}
- RT_TRACE(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
- "send PS STATIC frame\n");
+ rtl_dbg(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
+ "send PS STATIC frame\n");
if (rtlpriv->dm.supp_phymode_switch) {
if (sta->ht_cap.ht_supported)
rtl_send_smps_action(hw, sta,
@@ -1143,8 +1143,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
HW_VAR_KEEP_ALIVE,
(u8 *)(&keep_alive));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_ASSOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_ASSOC\n");
} else {
struct cfg80211_bss *bss = NULL;
@@ -1161,14 +1161,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
IEEE80211_BSS_TYPE_ESS,
IEEE80211_PRIVACY_OFF);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "bssid = %pMF\n", mac->bssid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "bssid = %pMF\n", mac->bssid);
if (bss) {
cfg80211_unlink_bss(hw->wiphy, bss);
cfg80211_put_bss(hw->wiphy, bss);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "cfg80211_unlink !!\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "cfg80211_unlink !!\n");
}
eth_zero_addr(mac->bssid);
@@ -1179,8 +1179,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->chk_switch_dmdp)
rtlpriv->cfg->ops->chk_switch_dmdp(hw);
}
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_UN_ASSOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_UN_ASSOC\n");
}
rtlpriv->cfg->ops->set_network_type(hw, vif->type);
/* For FW LPS:
@@ -1198,14 +1198,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_ERP_CTS_PROT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_ERP_CTS_PROT\n");
mac->use_cts_protect = bss_conf->use_cts_prot;
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
bss_conf->use_short_preamble);
mac->short_preamble = bss_conf->use_short_preamble;
@@ -1214,8 +1214,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_SLOT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_ERP_SLOT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_ERP_SLOT\n");
if (bss_conf->use_short_slot)
mac->slot_time = RTL_SLOT_TIME_9;
@@ -1229,8 +1229,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_HT) {
struct ieee80211_sta *sta = NULL;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_HT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_HT\n");
rcu_read_lock();
sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
@@ -1261,8 +1261,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
(u8 *)bss_conf->bssid);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "bssid: %pM\n", bss_conf->bssid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "bssid: %pM\n", bss_conf->bssid);
mac->vendor = PEER_UNKNOWN;
memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
@@ -1393,27 +1393,27 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_TX_START:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
return rtl_tx_agg_start(hw, vif, sta, tid, ssn);
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
return rtl_tx_agg_stop(hw, vif, sta, tid);
case IEEE80211_AMPDU_TX_OPERATIONAL:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
rtl_tx_agg_oper(hw, sta, tid);
break;
case IEEE80211_AMPDU_RX_START:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
return rtl_rx_agg_start(hw, sta, tid);
case IEEE80211_AMPDU_RX_STOP:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
return rtl_rx_agg_stop(hw, sta, tid);
default:
pr_err("IEEE80211_AMPDU_ERR!!!!:\n");
@@ -1429,7 +1429,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
mac->act_scanning = true;
if (rtlpriv->link_info.higher_busytraffic) {
mac->skip_scan = true;
@@ -1467,7 +1467,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
mac->act_scanning = false;
mac->skip_scan = false;
@@ -1517,8 +1517,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->btcoexist.btc_info.in_4way = false;
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "not open hw encryption\n");
return -ENOSPC; /*User disabled HW-crypto */
}
/* To support IBSS, use sw-crypto for GTK */
@@ -1526,10 +1526,10 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
vif->type == NL80211_IFTYPE_MESH_POINT) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -ENOSPC;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "%s hardware based encryption for keyidx: %d, mac: %pM\n",
- cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
- sta ? sta->addr : bcast_addr);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s hardware based encryption for keyidx: %d, mac: %pM\n",
+ cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
+ sta ? sta->addr : bcast_addr);
rtlpriv->sec.being_setkey = true;
rtl_ips_nic_on(hw);
mutex_lock(&rtlpriv->locks.conf_mutex);
@@ -1538,28 +1538,28 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key_type = WEP40_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP40\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP104\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP104\n");
key_type = WEP104_ENCRYPTION;
break;
case WLAN_CIPHER_SUITE_TKIP:
key_type = TKIP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:TKIP\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:TKIP\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = AESCCMP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
/* HW don't support CMAC encryption,
* use software CMAC encryption
*/
key_type = AESCMAC_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "HW don't support CMAC encryption, use software CMAC encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "HW don't support CMAC encryption, use software CMAC encryption\n");
err = -EOPNOTSUPP;
goto out_unlock;
default:
@@ -1605,9 +1605,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type == WEP104_ENCRYPTION))
wep_only = true;
rtlpriv->sec.pairwise_enc_algorithm = key_type;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1 TKIP:2 AES:4 WEP104:5)\n",
- key_type);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1 TKIP:2 AES:4 WEP104:5)\n",
+ key_type);
rtlpriv->cfg->ops->enable_hw_sec(hw);
}
}
@@ -1615,8 +1615,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
if (wep_only) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set WEP(group/pairwise) key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set WEP(group/pairwise) key\n");
/* Pairwise key with an assigned MAC address. */
rtlpriv->sec.pairwise_enc_algorithm = key_type;
rtlpriv->sec.group_enc_algorithm = key_type;
@@ -1626,8 +1626,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = key->keylen;
eth_zero_addr(mac_addr);
} else if (group_key) { /* group key */
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
/* group key */
rtlpriv->sec.group_enc_algorithm = key_type;
/*set local buf about group key. */
@@ -1636,8 +1636,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = key->keylen;
memcpy(mac_addr, bcast_addr, ETH_ALEN);
} else { /* pairwise key */
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set pairwise key\n");
if (!sta) {
WARN_ONCE(true,
"rtlwifi: pairwise key without mac_addr\n");
@@ -1669,8 +1669,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case DISABLE_KEY:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "disable key delete one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "disable key delete one entry\n");
/*set local buf about wep key. */
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) {
@@ -1718,9 +1718,9 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) {
rtlpriv->rfkill.rfkill_state = radio_state;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "wireless radio switch turned %s\n",
- radio_state ? "on" : "off");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "wireless radio switch turned %s\n",
+ radio_state ? "on" : "off");
blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
@@ -1765,26 +1765,27 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
do {
cfg_cmd = pwrcfgcmd[ary_idx];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
- GET_PWR_CFG_OFFSET(cfg_cmd),
- GET_PWR_CFG_CUT_MASK(cfg_cmd),
- GET_PWR_CFG_FAB_MASK(cfg_cmd),
- GET_PWR_CFG_INTF_MASK(cfg_cmd),
- GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
- GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+ __func__,
+ GET_PWR_CFG_OFFSET(cfg_cmd),
+ GET_PWR_CFG_CUT_MASK(cfg_cmd),
+ GET_PWR_CFG_FAB_MASK(cfg_cmd),
+ GET_PWR_CFG_INTF_MASK(cfg_cmd),
+ GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
+ GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) &&
(GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) &&
(GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) {
switch (GET_PWR_CFG_CMD(cfg_cmd)) {
case PWR_CMD_READ:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
break;
case PWR_CMD_WRITE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_WRITE\n", __func__);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s(): PWR_CMD_WRITE\n", __func__);
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
/*Read the value from system register*/
@@ -1797,7 +1798,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
rtl_write_byte(rtlpriv, offset, value);
break;
case PWR_CMD_POLLING:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
polling_bit = false;
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
@@ -1818,8 +1819,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
} while (!polling_bit);
break;
case PWR_CMD_DELAY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: PWR_CMD_DELAY\n", __func__);
if (GET_PWR_CFG_VALUE(cfg_cmd) ==
PWRSEQ_DELAY_US)
udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
@@ -1827,8 +1828,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
break;
case PWR_CMD_END:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: PWR_CMD_END\n", __func__);
return true;
default:
WARN_ONCE(true,
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index ec0da33da4f8..3866ea300a70 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -298,8 +298,8 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;
tmp[tmp_len] = '\0';
@@ -307,7 +307,7 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
if (num != 3)
- return count;
+ return -EINVAL;
switch (len) {
case 1:
@@ -395,8 +395,8 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;
tmp[tmp_len] = '\0';
@@ -404,9 +404,9 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
&path, &addr, &bitmask, &data);
if (num != 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "Format is <path> <addr> <mask> <data>\n");
- return count;
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "Format is <path> <addr> <mask> <data>\n");
+ return -EINVAL;
}
rtl_set_rfreg(hw, path, addr, bitmask, data);
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index 69f169d4d4ae..dbfb4d67ca31 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -160,6 +160,10 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *titlestring,
const void *hexdata, int hexdatalen);
+#define rtl_dbg(rtlpriv, comp, level, fmt, ...) \
+ _rtl_dbg_trace(rtlpriv, comp, level, \
+ fmt, ##__VA_ARGS__)
+
#define RT_TRACE(rtlpriv, comp, level, fmt, ...) \
_rtl_dbg_trace(rtlpriv, comp, level, \
fmt, ##__VA_ARGS__)
@@ -176,6 +180,13 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
struct rtl_priv;
+__printf(4, 5)
+static inline void rtl_dbg(struct rtl_priv *rtlpriv,
+ u64 comp, int level,
+ const char *fmt, ...)
+{
+}
+
__printf(4, 5)
static inline void RT_TRACE(struct rtl_priv *rtlpriv,
u64 comp, int level,
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 264667203f6f..305d43818018 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -120,8 +120,8 @@ void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
const u32 efuse_len =
rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
- address, value);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
+ address, value);
if (address < efuse_len) {
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
@@ -211,9 +211,9 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
u8 efuse_usage;
if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "read_efuse(): Invalid offset(%#x) with read bytes(%#x)!!\n",
- _offset, _size_byte);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "%s: Invalid offset(%#x) with read bytes(%#x)!!\n",
+ __func__, _offset, _size_byte);
return;
}
@@ -376,9 +376,9 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
(EFUSE_MAX_SIZE - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))
result = false;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse_shadow_update_chk(): totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
- totalbytes, hdr_num, words_need, efuse_used);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "%s: totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
+ __func__, totalbytes, hdr_num, words_need, efuse_used);
return result;
}
@@ -416,7 +416,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
u8 word_en = 0x0F;
u8 first_pg = false;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
if (!efuse_shadow_update_chk(hw)) {
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
@@ -424,8 +424,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse out of capacity!!\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "efuse out of capacity!!\n");
return false;
}
efuse_power_switch(hw, true, true);
@@ -464,8 +464,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
tmpdata)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "PG section(%#x) fail!!\n", offset);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "PG section(%#x) fail!!\n", offset);
break;
}
}
@@ -478,7 +478,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
return true;
}
@@ -616,8 +616,8 @@ static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmpidx = 0;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "Addr = %x Data=%x\n", addr, data);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "Addr = %x Data=%x\n", addr, data);
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
@@ -997,8 +997,8 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
if (efuse_addr >= (EFUSE_MAX_SIZE -
rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse_addr(%#x) Out of size!!\n", efuse_addr);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "efuse_addr(%#x) Out of size!!\n", efuse_addr);
}
return true;
@@ -1038,8 +1038,8 @@ static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
u8 tmpdata[8];
memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "word_en = %x efuse_addr=%x\n", word_en, efuse_addr);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "word_en = %x efuse_addr=%x\n", word_en, efuse_addr);
if (!(word_en & BIT(0))) {
tmpaddr = start_addr;
@@ -1242,11 +1242,11 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
eeprom_id = *((u16 *)&hwinfo[0]);
if (eeprom_id != params[0]) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
@@ -1257,30 +1257,30 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
rtlefuse->eeprom_did = *(u16 *)&hwinfo[params[2]];
rtlefuse->eeprom_svid = *(u16 *)&hwinfo[params[3]];
rtlefuse->eeprom_smid = *(u16 *)&hwinfo[params[4]];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROMId = 0x%4x\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
for (i = 0; i < 6; i += 2) {
usvalue = *(u16 *)&hwinfo[params[5] + i];
*((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
rtlefuse->eeprom_channelplan = *&hwinfo[params[6]];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[params[7]];
rtlefuse->txpwr_fromeprom = true;
rtlefuse->eeprom_oemid = *&hwinfo[params[8]];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
/* set channel plan to world wide 13 */
rtlefuse->channel_plan = params[9];
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 25335bd2873b..9006aa4446b3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -204,8 +204,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
return;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "PCI(Bridge) UNKNOWN\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "PCI(Bridge) UNKNOWN\n");
return;
}
@@ -254,8 +254,8 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
return;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "PCI(Bridge) UNKNOWN\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "PCI(Bridge) UNKNOWN\n");
return;
}
@@ -271,10 +271,10 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
u_pcibridge_aspmsetting);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PlatformEnableASPM(): Write reg[%x] = %x\n",
- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
- u_pcibridge_aspmsetting);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PlatformEnableASPM(): Write reg[%x] = %x\n",
+ (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+ u_pcibridge_aspmsetting);
udelay(50);
@@ -331,11 +331,11 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
list) {
tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "pcipriv->ndis_adapter.funcnumber %x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "pcipriv->ndis_adapter.funcnumber %x\n",
pcipriv->ndis_adapter.funcnumber);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "tpcipriv->ndis_adapter.funcnumber %x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "tpcipriv->ndis_adapter.funcnumber %x\n",
tpcipriv->ndis_adapter.funcnumber);
if (pcipriv->ndis_adapter.busnumber ==
@@ -350,8 +350,8 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "find_buddy_priv %d\n", find_buddy_priv);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "find_buddy_priv %d\n", find_buddy_priv);
if (find_buddy_priv)
*buddy_priv = tpriv;
@@ -388,8 +388,8 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg);
pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
- pcipriv->ndis_adapter.linkctrl_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
+ pcipriv->ndis_adapter.linkctrl_reg);
pci_read_config_byte(pdev, 0x98, &tmp);
tmp |= BIT(4);
@@ -557,11 +557,11 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
if (rtlpriv->rtlhal.earlymode_enable)
skb_pull(skb, EM_HDR_LEN);
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
- "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
- ring->idx,
- skb_queue_len(&ring->queue),
- *(u16 *)(skb->data + 22));
+ rtl_dbg(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
+ "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
+ ring->idx,
+ skb_queue_len(&ring->queue),
+ *(u16 *)(skb->data + 22));
if (prio == TXCMD_QUEUE) {
dev_kfree_skb(skb);
@@ -608,10 +608,10 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
}
if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
- prio, ring->idx,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
+ prio, ring->idx,
+ skb_queue_len(&ring->queue));
ieee80211_wake_queue(hw, skb_get_queue_mapping(skb));
}
@@ -801,9 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
skb_reserve(skb, stats.rx_drvinfo_size +
stats.rx_bufshift);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "skb->end - skb->tail = %d, len is %d\n",
- skb->end - skb->tail, len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "skb->end - skb->tail = %d, len is %d\n",
+ skb->end - skb->tail, len);
dev_kfree_skb_any(skb);
goto new_trx_end;
}
@@ -925,67 +925,67 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
/*<1> beacon related */
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "beacon ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "beacon ok interrupt!\n");
if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "beacon err interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "beacon err interrupt!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "prepare beacon for interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "prepare beacon for interrupt!\n");
tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
}
/*<2> Tx related */
if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "Manage ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "Manage ok interrupt!\n");
_rtl_pci_tx_isr(hw, MGNT_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "HIGH_QUEUE ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "HIGH_QUEUE ok interrupt!\n");
_rtl_pci_tx_isr(hw, HIGH_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "BK Tx OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "BK Tx OK interrupt!\n");
_rtl_pci_tx_isr(hw, BK_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "BE TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "BE TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, BE_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "VI TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "VI TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VI_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "Vo TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "Vo TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VO_QUEUE);
}
@@ -993,8 +993,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "H2C TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "H2C TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, H2C_QUEUE);
}
}
@@ -1003,34 +1003,34 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "CMD TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "CMD TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, TXCMD_QUEUE);
}
}
/*<3> Rx related */
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "rx descriptor unavailable!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "rx descriptor unavailable!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
_rtl_pci_rx_interrupt(hw);
}
/*<4> fw related*/
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "firmware interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "firmware interrupt!\n");
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.fwevt_wq, 0);
}
@@ -1046,8 +1046,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
if (unlikely(intvec.inta &
rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "hsisr interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "hsisr interrupt!\n");
_rtl_pci_hs_interrupt(hw);
}
}
@@ -1251,8 +1251,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
rtlpci->tx_ring[prio].entries = entries;
skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
- prio, desc);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
+ prio, desc);
/* init every desc in this ring */
if (!rtlpriv->use_new_trx_flow) {
@@ -1649,10 +1649,10 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
true, HW_DESC_OWN);
if (own == 1 && hw_queue != BEACON_QUEUE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
- hw_queue, ring->idx, idx,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue));
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
flags);
@@ -1662,8 +1662,8 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->get_available_desc &&
rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "get_available_desc fail\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "get_available_desc fail\n");
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
return skb->len;
}
@@ -1686,8 +1686,8 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
hw_queue != BEACON_QUEUE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
hw_queue, ring->idx, idx,
skb_queue_len(&ring->queue));
@@ -1794,8 +1794,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
err = rtlpriv->cfg->ops->hw_init(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Failed to config hardware!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Failed to config hardware!\n");
kfree(rtlpriv->btcoexist.btc_context);
kfree(rtlpriv->btcoexist.wifi_only_context);
return err;
@@ -1804,7 +1804,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
&rtlmac->retry_long);
rtlpriv->cfg->ops->enable_interrupt(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
rtl_init_rx_config(hw);
@@ -1815,7 +1815,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
rtlpci->up_first_time = false;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
return 0;
}
@@ -1909,71 +1909,71 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
deviceid == RTL_PCI_8171_DID) {
switch (revisionid) {
case RTL_PCI_REVISION_ID_8192PCIE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192 PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192 PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
return false;
case RTL_PCI_REVISION_ID_8192SE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192SE is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192SE is found - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Err: Unknown device - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Err: Unknown device - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
}
} else if (deviceid == RTL_PCI_8723AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8723AE PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8723AE PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8192CET_DID ||
deviceid == RTL_PCI_8192CE_DID ||
deviceid == RTL_PCI_8191CE_DID ||
deviceid == RTL_PCI_8188CE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192C PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192C PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8192DE_DID ||
deviceid == RTL_PCI_8192DE_DID2) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192D PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192D PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8188EE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8188EE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8188EE\n");
} else if (deviceid == RTL_PCI_8723BE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8723BE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8723BE\n");
} else if (deviceid == RTL_PCI_8192EE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8192EE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8192EE\n");
} else if (deviceid == RTL_PCI_8821AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8821AE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8821AE\n");
} else if (deviceid == RTL_PCI_8812AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8812AE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8812AE\n");
} else if (deviceid == RTL_PCI_8822BE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8822BE;
rtlhal->bandset = BAND_ON_BOTH;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8822BE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8822BE\n");
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Err: Unknown device - vid/did=%x/%x\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Err: Unknown device - vid/did=%x/%x\n",
venderid, deviceid);
rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
@@ -1982,17 +1982,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
if (revisionid == 0 || revisionid == 1) {
if (revisionid == 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find 92DE MAC0\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find 92DE MAC0\n");
rtlhal->interfaceindex = 0;
} else if (revisionid == 1) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find 92DE MAC1\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find 92DE MAC1\n");
rtlhal->interfaceindex = 1;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
venderid, deviceid, revisionid);
rtlhal->interfaceindex = 0;
}
@@ -2026,9 +2026,9 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
pcipriv->ndis_adapter.pcibridge_vendor = tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Pci Bridge Vendor is found index: %d\n",
- tmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Pci Bridge Vendor is found index: %d\n",
+ tmp);
break;
}
}
@@ -2056,22 +2056,22 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
- pcipriv->ndis_adapter.busnumber,
- pcipriv->ndis_adapter.devnumber,
- pcipriv->ndis_adapter.funcnumber,
- pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.busnumber,
+ pcipriv->ndis_adapter.devnumber,
+ pcipriv->ndis_adapter.funcnumber,
+ pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
- pcipriv->ndis_adapter.pcibridge_busnum,
- pcipriv->ndis_adapter.pcibridge_devnum,
- pcipriv->ndis_adapter.pcibridge_funcnum,
- pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
- pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
- pcipriv->ndis_adapter.pcibridge_linkctrlreg,
- pcipriv->ndis_adapter.amd_l1_patch);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
+ pcipriv->ndis_adapter.pcibridge_busnum,
+ pcipriv->ndis_adapter.pcibridge_devnum,
+ pcipriv->ndis_adapter.pcibridge_funcnum,
+ pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+ pcipriv->ndis_adapter.amd_l1_patch);
rtl_pci_parse_configuration(pdev, hw);
list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
@@ -2099,8 +2099,8 @@ static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
rtlpci->using_msi = true;
- RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
- "MSI Interrupt Mode!\n");
+ rtl_dbg(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
+ "MSI Interrupt Mode!\n");
return 0;
}
@@ -2117,8 +2117,8 @@ static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
return ret;
rtlpci->using_msi = false;
- RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
- "Pin-based Interrupt Mode!\n");
+ rtl_dbg(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
+ "Pin-based Interrupt Mode!\n");
return 0;
}
@@ -2245,10 +2245,10 @@ int rtl_pci_probe(struct pci_dev *pdev,
goto fail2;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
- pmem_start, pmem_len, pmem_flags,
- rtlpriv->io.pci_mem_start);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
+ pmem_start, pmem_len, pmem_flags,
+ rtlpriv->io.pci_mem_start);
/* Disable Clk Request */
pci_write_config_byte(pdev, 0x81, 0);
@@ -2310,9 +2310,9 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpci = rtl_pcidev(pcipriv);
err = rtl_pci_intr_mode_decide(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "%s: failed to register IRQ handler\n",
- wiphy_name(hw->wiphy));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "%s: failed to register IRQ handler\n",
+ wiphy_name(hw->wiphy));
goto fail3;
}
rtlpci->irq_alloc = 1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index fff8dda14023..9a4601a71eae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -19,8 +19,8 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
rtlpriv->intf_ops->reset_trx_ring(hw);
if (is_hal_stop(rtlhal))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Driver is already down!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Driver is already down!\n");
/*<2> Enable Adapter */
if (rtlpriv->cfg->ops->hw_init(hw))
@@ -81,9 +81,9 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
if (ppsc->rfchange_inprogress) {
spin_unlock(&rtlpriv->locks.rf_ps_lock);
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "RF Change in progress! Wait to set..state_toset(%d).\n",
- state_toset);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "RF Change in progress! Wait to set..state_toset(%d).\n",
+ state_toset);
/* Set RF after the previous action is done. */
while (ppsc->rfchange_inprogress) {
@@ -195,8 +195,8 @@ void rtl_ips_nic_off_wq_callback(void *data)
enum rf_pwrstate rtstate;
if (mac->opmode != NL80211_IFTYPE_STATION) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "not station return\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "not station return\n");
return;
}
@@ -232,8 +232,8 @@ void rtl_ips_nic_off_wq_callback(void *data)
!ppsc->swrf_processing &&
(mac->link_state == MAC80211_NOLINK) &&
!mac->act_scanning) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "IPSEnter(): Turn off RF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "IPSEnter(): Turn off RF\n");
ppsc->inactive_pwrstate = ERFOFF;
ppsc->in_powersavemode = true;
@@ -311,8 +311,8 @@ static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
ppsc->last_delaylps_stamp_jiffies);
if (ps_timediff < 2000) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Delay enter Fw LPS for DHCP, ARP, or EAPOL exchanging state\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Delay enter Fw LPS for DHCP, ARP, or EAPOL exchanging state\n");
return false;
}
@@ -357,9 +357,9 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
if ((ppsc->fwctrl_lps) && ppsc->report_linked) {
if (ppsc->dot11_psmode == EACTIVE) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "FW LPS leave ps_mode:%x\n",
- FW_PS_ACTIVE_MODE);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "FW LPS leave ps_mode:%x\n",
+ FW_PS_ACTIVE_MODE);
enter_fwlps = false;
ppsc->pwr_mode = FW_PS_ACTIVE_MODE;
ppsc->smart_ps = 0;
@@ -372,9 +372,9 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode);
} else {
if (rtl_get_fwlps_doze(hw)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "FW LPS enter ps_mode:%x\n",
- ppsc->fwctrl_psmode);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "FW LPS enter ps_mode:%x\n",
+ ppsc->fwctrl_psmode);
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode);
enter_fwlps = true;
@@ -424,8 +424,8 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
* bt_ccoexist may ask to enter lps.
* In normal case, this constraint move to rtl_lps_set_psmode().
*/
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Enter 802.11 power save mode...\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Enter 802.11 power save mode...\n");
rtl_lps_set_psmode(hw, EAUTOPS);
mutex_unlock(&rtlpriv->locks.lps_mutex);
@@ -453,8 +453,8 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Busy Traffic,Leave 802.11 power save..\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Busy Traffic,Leave 802.11 power save..\n");
rtl_lps_set_psmode(hw, EACTIVE);
}
@@ -538,8 +538,8 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.ps_work, MSECS(5));
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
}
}
EXPORT_SYMBOL_GPL(rtl_swlps_beacon);
@@ -634,9 +634,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
/* this print should always be dtim_conter = 0 &
* sleep = dtim_period, that meaons, we should
* awake before every dtim */
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "dtim_counter:%x will sleep :%d beacon_intv\n",
- rtlpriv->psc.dtim_counter, sleep_intv);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "dtim_counter:%x will sleep :%d beacon_intv\n",
+ rtlpriv->psc.dtim_counter, sleep_intv);
/* we tested that 40ms is enough for sw & hw sw delay */
queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq,
@@ -748,9 +748,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
if (ie[0] == 12) {
find_p2p_ps_ie = true;
if ((noa_len - 2) % 13 != 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "P2P notice of absence: invalid length.%d\n",
- noa_len);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "P2P notice of absence: invalid length.%d\n",
+ noa_len);
return;
} else {
noa_num = (noa_len - 2) / 13;
@@ -761,8 +761,8 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
noa_index = ie[3];
if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
P2P_PS_NONE || noa_index != p2pinfo->noa_index) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "update NOA ie.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "update NOA ie.\n");
p2pinfo->noa_index = noa_index;
p2pinfo->opp_ps = (ie[4] >> 7);
p2pinfo->ctwindow = ie[4] & 0x7F;
@@ -833,7 +833,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
if (ie == NULL)
return;
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
/*to find noa ie*/
while (ie + 1 < end) {
noa_len = READEF2BYTE((__le16 *)&ie[1]);
@@ -841,13 +841,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
return;
if (ie[0] == 12) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "find NOA IE.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "find NOA IE.\n");
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, "noa ie ",
ie, noa_len);
if ((noa_len - 2) % 13 != 0) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P notice of absence: invalid length.%d\n",
- noa_len);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "P2P notice of absence: invalid length.%d\n",
+ noa_len);
return;
} else {
noa_num = (noa_len - 2) / 13;
@@ -905,7 +905,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state)
struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n" , p2p_ps_state);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n", p2p_ps_state);
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
p2pinfo->p2p_ps_state = p2p_ps_state;
@@ -957,18 +957,18 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "ctwindow %x oppps %x\n",
- p2pinfo->ctwindow , p2pinfo->opp_ps);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "count %x duration %x index %x interval %x start time %x noa num %x\n",
- p2pinfo->noa_count_type[0],
- p2pinfo->noa_duration[0],
- p2pinfo->noa_index,
- p2pinfo->noa_interval[0],
- p2pinfo->noa_start_time[0],
- p2pinfo->noa_num);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "end\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "ctwindow %x oppps %x\n",
+ p2pinfo->ctwindow, p2pinfo->opp_ps);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "count %x duration %x index %x interval %x start time %x noa num %x\n",
+ p2pinfo->noa_count_type[0],
+ p2pinfo->noa_duration[0],
+ p2pinfo->noa_index,
+ p2pinfo->noa_interval[0],
+ p2pinfo->noa_start_time[0],
+ p2pinfo->noa_num);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "end\n");
}
void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 8be31e0ad878..4cf8face0bbd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -393,13 +393,13 @@ int rtl_regd_init(struct ieee80211_hw *hw,
rtlpriv->regd.country_code =
channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
- rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_DMESG,
+ "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
+ rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
- RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_DMESG,
+ "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
}
@@ -414,9 +414,9 @@ int rtl_regd_init(struct ieee80211_hw *hw,
rtlpriv->regd.alpha2[1] = '0';
}
- RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- "rtl: Country alpha2 being used: %c%c\n",
- rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]);
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_TRACE,
+ "rtl: Country alpha2 being used: %c%c\n",
+ rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]);
_rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
@@ -428,7 +428,7 @@ void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
_rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 9bcb187d37dc..fc1548ad434f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -259,15 +259,15 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
? USB_HIGH_SPEED_BULK_SIZE
: USB_FULL_SPEED_BULK_SIZE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n",
- rtlusb->max_bulk_out_size);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n",
+ rtlusb->max_bulk_out_size);
for (i = 0; i < __RTL_TXQ_NUM; i++) {
u32 ep_num = rtlusb->ep_map.ep_mapping[i];
if (!ep_num) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Invalid endpoint map setting!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Invalid endpoint map setting!\n");
return -EINVAL;
}
}
@@ -337,10 +337,10 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
else if (usb_endpoint_dir_out(pep_desc))
rtlusb->out_ep_nums++;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
- pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
- pep_desc->bInterval);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
+ pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
+ pep_desc->bInterval);
}
if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) {
pr_err("Too few input end points found\n");
@@ -931,7 +931,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
}
if (rtlpriv->psc.sw_ps_enabled) {
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index b61b073031e5..94e69e97d5f5 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -210,7 +210,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
if (ret)
- return -EBUSY;
+ return ret;
idx++;
} while (1);
@@ -224,6 +224,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
+ int ret;
rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
@@ -245,8 +246,9 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
return -EALREADY;
pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
- if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
- return -EINVAL;
+ ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 029a89aead53..9144ed14b074 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3976,11 +3976,19 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_get_fw_slot_info(ctrl);
}
-static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+static u32 nvme_aer_type(u32 result)
{
- u32 aer_notice_type = (result & 0xff00) >> 8;
+ return result & 0x7;
+}
- trace_nvme_async_event(ctrl, aer_notice_type);
+static u32 nvme_aer_subtype(u32 result)
+{
+ return (result & 0xff00) >> 8;
+}
+
+static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+{
+ u32 aer_notice_type = nvme_aer_subtype(result);
switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED:
@@ -4011,24 +4019,40 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
}
}
+static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+{
+ dev_warn(ctrl->device, "resetting controller due to AER\n");
+ nvme_reset_ctrl(ctrl);
+}
+
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
- u32 aer_type = result & 0x07;
+ u32 aer_type = nvme_aer_type(result);
+ u32 aer_subtype = nvme_aer_subtype(result);
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
+ trace_nvme_async_event(ctrl, result);
switch (aer_type) {
case NVME_AER_NOTICE:
nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
+ /*
+ * For a persistent internal error, don't run async_event_work
+ * to submit a new AER. The controller reset will do it.
+ */
+ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
+ nvme_handle_aer_persistent_error(ctrl);
+ return;
+ }
+ fallthrough;
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
- trace_nvme_async_event(ctrl, aer_type);
ctrl->aen_result = result;
break;
default:
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 35bac7a25422..700fdce2ecf1 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -127,15 +127,12 @@ TRACE_EVENT(nvme_async_event,
),
TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
__entry->ctrl_id, __entry->result,
- __print_symbolic(__entry->result,
- aer_name(NVME_AER_NOTICE_NS_CHANGED),
- aer_name(NVME_AER_NOTICE_ANA),
- aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
- aer_name(NVME_AER_NOTICE_DISC_CHANGED),
- aer_name(NVME_AER_ERROR),
- aer_name(NVME_AER_SMART),
- aer_name(NVME_AER_CSS),
- aer_name(NVME_AER_VS))
+ __print_symbolic(__entry->result & 0x7,
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_NOTICE),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
)
);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index b50b53db3746..34345c45a3e5 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -431,10 +431,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
struct fcloop_fcpreq *tfcp_req =
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ unsigned long flags;
int ret = 0;
bool aborted = false;
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
switch (tfcp_req->inistate) {
case INI_IO_START:
tfcp_req->inistate = INI_IO_ACTIVE;
@@ -443,11 +444,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
aborted = true;
break;
default:
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1);
return;
}
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (unlikely(aborted))
ret = -ECANCELED;
@@ -468,8 +469,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
struct nvmefc_fcp_req *fcpreq;
bool completed = false;
+ unsigned long flags;
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq;
switch (tfcp_req->inistate) {
case INI_IO_ABORTED:
@@ -478,11 +480,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
completed = true;
break;
default:
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1);
return;
}
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (unlikely(completed)) {
/* remove reference taken in original abort downcall */
@@ -494,9 +496,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
&tfcp_req->tgt_fcp_req);
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->fcpreq = NULL;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
/* call_host_done releases reference for abort downcall */
@@ -512,11 +514,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
struct fcloop_fcpreq *tfcp_req =
container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq;
+ unsigned long flags;
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq;
tfcp_req->inistate = INI_IO_COMPLETED;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}
@@ -620,13 +623,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
u32 rsplen = 0, xfrlen = 0;
int fcp_err = 0, active, aborted;
u8 op = tgt_fcpreq->op;
+ unsigned long flags;
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq;
active = tfcp_req->active;
aborted = tfcp_req->aborted;
tfcp_req->active = true;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (unlikely(active))
/* illegal - call while i/o active */
@@ -634,9 +638,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
if (unlikely(aborted)) {
/* target transport has aborted i/o prior */
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->active = false;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
tgt_fcpreq->transferred_length = 0;
tgt_fcpreq->fcp_error = -ECANCELED;
tgt_fcpreq->done(tgt_fcpreq);
@@ -693,9 +697,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
break;
}
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->active = false;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
tgt_fcpreq->transferred_length = xfrlen;
tgt_fcpreq->fcp_error = fcp_err;
@@ -709,15 +713,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+ unsigned long flags;
/*
* mark aborted only in case there were 2 threads in transport
* (one doing io, other doing abort) and only kills ops posted
* after the abort request
*/
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->aborted = true;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
tfcp_req->status = NVME_SC_INTERNAL;
@@ -753,6 +758,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
struct fcloop_fcpreq *tfcp_req;
bool abortio = true;
+ unsigned long flags;
spin_lock(&inireq->inilock);
tfcp_req = inireq->tfcp_req;
@@ -765,7 +771,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
return;
/* break initiator/target relationship for io */
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
switch (tfcp_req->inistate) {
case INI_IO_START:
case INI_IO_ACTIVE:
@@ -775,11 +781,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
abortio = false;
break;
default:
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1);
return;
}
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (abortio)
/* leave the reference while the work item is scheduled */
diff --git a/drivers/of/device.c b/drivers/of/device.c
index da8158392010..7fb870097a84 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -246,12 +246,15 @@ int of_device_request_module(struct device *dev)
if (size < 0)
return size;
- str = kmalloc(size + 1, GFP_KERNEL);
+ /* Reserve an additional byte for the trailing '\0' */
+ size++;
+
+ str = kmalloc(size, GFP_KERNEL);
if (!str)
return -ENOMEM;
of_device_get_modalias(dev, str, size);
- str[size] = '\0';
+ str[size - 1] = '\0';
ret = request_module(str);
kfree(str);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index b34b52b364d5..30c259f63239 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -1295,6 +1295,13 @@ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
static int __init imx6_pcie_init(void)
{
#ifdef CONFIG_ARM
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, imx6_pcie_of_match);
+ if (!np)
+ return -ENODEV;
+ of_node_put(np);
+
/*
* Since probe() can be deferred we need to make sure that
* hook_fault_code is not called after __init memory is freed
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index aa61d4c219d7..79a713f5dbf4 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -72,6 +72,8 @@ extern int pciehp_poll_time;
* @reset_lock: prevents access to the Data Link Layer Link Active bit in the
* Link Status register and to the Presence Detect State bit in the Slot
* Status register during a slot reset which may cause them to flap
+ * @depth: Number of additional hotplug ports in the path to the root bus,
+ * used as lock subclass for @reset_lock
* @ist_running: flag to keep user request waiting while IRQ thread is running
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
@@ -102,6 +104,7 @@ struct controller {
struct hotplug_slot hotplug_slot; /* hotplug core interface */
struct rw_semaphore reset_lock;
+ unsigned int depth;
unsigned int ist_running;
int request_result;
wait_queue_head_t requester;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 312cc45c44c7..f5255045b149 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -165,7 +165,7 @@ static void pciehp_check_presence(struct controller *ctrl)
{
int occupied;
- down_read(&ctrl->reset_lock);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
mutex_lock(&ctrl->state_lock);
occupied = pciehp_card_present_or_link_active(ctrl);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 13f3bc239c66..651664fe4058 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -674,7 +674,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
* Disable requests have higher priority than Presence Detect Changed
* or Data Link Layer State Changed events.
*/
- down_read(&ctrl->reset_lock);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
if (events & DISABLE_SLOT)
pciehp_handle_disable_request(ctrl);
else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
@@ -808,7 +808,7 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
if (probe)
return 0;
- down_write(&ctrl->reset_lock);
+ down_write_nested(&ctrl->reset_lock, ctrl->depth);
if (!ATTN_BUTTN(ctrl)) {
ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
@@ -864,6 +864,20 @@ static inline void dbg_ctrl(struct controller *ctrl)
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
+static inline int pcie_hotplug_depth(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ int depth = 0;
+
+ while (bus->parent) {
+ bus = bus->parent;
+ if (bus->self && bus->self->is_hotplug_bridge)
+ depth++;
+ }
+
+ return depth;
+}
+
struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
@@ -877,6 +891,7 @@ struct controller *pcie_init(struct pcie_device *dev)
return NULL;
ctrl->pcie = dev;
+ ctrl->depth = pcie_hotplug_depth(dev->port);
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
if (pdev->hotplug_user_indicators)
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index d17f3bf36f70..ad12515a4a12 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -63,7 +63,14 @@ int pciehp_configure_device(struct controller *ctrl)
pci_assign_unassigned_bridge_resources(bridge);
pcie_bus_configure_settings(parent);
+
+ /*
+ * Release reset_lock during driver binding
+ * to avoid AB-BA deadlock with device_lock.
+ */
+ up_read(&ctrl->reset_lock);
pci_bus_add_devices(parent);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
out:
pci_unlock_rescan_remove();
@@ -104,7 +111,15 @@ void pciehp_unconfigure_device(struct controller *ctrl, bool presence)
list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
bus_list) {
pci_dev_get(dev);
+
+ /*
+ * Release reset_lock during driver unbinding
+ * to avoid AB-BA deadlock with device_lock.
+ */
+ up_read(&ctrl->reset_lock);
pci_stop_and_remove_bus_device(dev);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
+
/*
* Ensure that no new Requests will be generated from
* the device.
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index bf5d80b97597..efe7abf459fd 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -606,6 +606,7 @@ static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
usb2->base.lane = usb2->base.ops->map(&usb2->base);
if (IS_ERR(usb2->base.lane)) {
err = PTR_ERR(usb2->base.lane);
+ tegra_xusb_port_unregister(&usb2->base);
goto out;
}
@@ -658,6 +659,7 @@ static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl,
ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
if (IS_ERR(ulpi->base.lane)) {
err = PTR_ERR(ulpi->base.lane);
+ tegra_xusb_port_unregister(&ulpi->base);
goto out;
}
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 61cb1a4a8257..0346bf751537 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -232,6 +232,22 @@ static const struct ts_dmi_data dexp_ursus_7w_data = {
.properties = dexp_ursus_7w_props,
};
+static const struct property_entry dexp_ursus_kx210i_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 2),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data dexp_ursus_kx210i_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = dexp_ursus_kx210i_props,
+};
+
static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -773,6 +789,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
},
},
+ {
+ /* DEXP Ursus KX210i */
+ .driver_data = (void *)&dexp_ursus_kx210i_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
+ },
+ },
{
/* Digma Citi E200 */
.driver_data = (void *)&digma_citi_e200_data,
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index 97b0e873e87d..c2d6378bb897 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -138,6 +138,9 @@ static int read_channel(struct gab *adc_bat, enum power_supply_property psp,
result);
if (ret < 0)
pr_err("read channel error\n");
+ else
+ *result *= 1000;
+
return ret;
}
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 3c81858a8261..9fc08d1de34c 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -424,7 +424,7 @@ static const struct meson_pwm_data pwm_axg_ee_data = {
};
static const char * const pwm_axg_ao_parent_names[] = {
- "aoclk81", "xtal", "fclk_div4", "fclk_div5"
+ "xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5"
};
static const struct meson_pwm_data pwm_axg_ao_data = {
@@ -433,7 +433,7 @@ static const struct meson_pwm_data pwm_axg_ao_data = {
};
static const char * const pwm_g12a_ao_ab_parent_names[] = {
- "xtal", "aoclk81", "fclk_div4", "fclk_div5"
+ "xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5"
};
static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
@@ -442,7 +442,7 @@ static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
};
static const char * const pwm_g12a_ao_cd_parent_names[] = {
- "xtal", "aoclk81",
+ "xtal", "g12a_ao_clk81",
};
static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 83b8be0209b7..327c780d433d 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -74,6 +74,19 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
u64 div, rate;
int err;
+ err = clk_prepare_enable(mdp->clk_main);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
+ return err;
+ }
+
+ err = clk_prepare_enable(mdp->clk_mm);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
+ clk_disable_unprepare(mdp->clk_main);
+ return err;
+ }
+
/*
* Find period, high_width and clk_div to suit duty_ns and period_ns.
* Calculate proper div value to keep period value in the bound.
@@ -87,8 +100,11 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
rate = clk_get_rate(mdp->clk_main);
clk_div = div_u64(rate * period_ns, NSEC_PER_SEC) >>
PWM_PERIOD_BIT_WIDTH;
- if (clk_div > PWM_CLKDIV_MAX)
+ if (clk_div > PWM_CLKDIV_MAX) {
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
return -EINVAL;
+ }
div = NSEC_PER_SEC * (clk_div + 1);
period = div64_u64(rate * period_ns, div);
@@ -98,14 +114,17 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
high_width = div64_u64(rate * duty_ns, div);
value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);
- err = clk_enable(mdp->clk_main);
- if (err < 0)
- return err;
-
- err = clk_enable(mdp->clk_mm);
- if (err < 0) {
- clk_disable(mdp->clk_main);
- return err;
+ if (mdp->data->bls_debug && !mdp->data->has_commit) {
+ /*
+ * For MT2701, disable double buffer before writing register
+ * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
+ */
+ mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+ mdp->data->bls_debug_mask,
+ mdp->data->bls_debug_mask);
+ mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+ mdp->data->con0_sel,
+ mdp->data->con0_sel);
}
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
@@ -124,8 +143,8 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
0x0);
}
- clk_disable(mdp->clk_mm);
- clk_disable(mdp->clk_main);
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
return 0;
}
@@ -135,13 +154,16 @@ static int mtk_disp_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
int err;
- err = clk_enable(mdp->clk_main);
- if (err < 0)
+ err = clk_prepare_enable(mdp->clk_main);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
return err;
+ }
- err = clk_enable(mdp->clk_mm);
+ err = clk_prepare_enable(mdp->clk_mm);
if (err < 0) {
- clk_disable(mdp->clk_main);
+ dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
+ clk_disable_unprepare(mdp->clk_main);
return err;
}
@@ -158,8 +180,8 @@ static void mtk_disp_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
0x0);
- clk_disable(mdp->clk_mm);
- clk_disable(mdp->clk_main);
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
}
static const struct pwm_ops mtk_disp_pwm_ops = {
@@ -194,14 +216,6 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
if (IS_ERR(mdp->clk_mm))
return PTR_ERR(mdp->clk_mm);
- ret = clk_prepare(mdp->clk_main);
- if (ret < 0)
- return ret;
-
- ret = clk_prepare(mdp->clk_mm);
- if (ret < 0)
- goto disable_clk_main;
-
mdp->chip.dev = &pdev->dev;
mdp->chip.ops = &mtk_disp_pwm_ops;
mdp->chip.base = -1;
@@ -209,44 +223,22 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
ret = pwmchip_add(&mdp->chip);
if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
- goto disable_clk_mm;
+ dev_err(&pdev->dev, "pwmchip_add() failed: %pe\n", ERR_PTR(ret));
+ return ret;
}
platform_set_drvdata(pdev, mdp);
- /*
- * For MT2701, disable double buffer before writing register
- * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
- */
- if (!mdp->data->has_commit) {
- mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
- mdp->data->bls_debug_mask,
- mdp->data->bls_debug_mask);
- mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
- mdp->data->con0_sel,
- mdp->data->con0_sel);
- }
-
return 0;
-
-disable_clk_mm:
- clk_unprepare(mdp->clk_mm);
-disable_clk_main:
- clk_unprepare(mdp->clk_main);
- return ret;
}
static int mtk_disp_pwm_remove(struct platform_device *pdev)
{
struct mtk_disp_pwm *mdp = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&mdp->chip);
- clk_unprepare(mdp->clk_mm);
- clk_unprepare(mdp->clk_main);
+ pwmchip_remove(&mdp->chip);
- return ret;
+ return 0;
}
static const struct mtk_pwm_data mt2701_pwm_data = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 0179716f74d7..06da271ad5dd 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -217,6 +217,78 @@ void regulator_unlock(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(regulator_unlock);
+/**
+ * regulator_lock_two - lock two regulators
+ * @rdev1: first regulator
+ * @rdev2: second regulator
+ * @ww_ctx: w/w mutex acquire context
+ *
+ * Locks both rdevs using the regulator_ww_class.
+ */
+static void regulator_lock_two(struct regulator_dev *rdev1,
+ struct regulator_dev *rdev2,
+ struct ww_acquire_ctx *ww_ctx)
+{
+ struct regulator_dev *tmp;
+ int ret;
+
+ ww_acquire_init(ww_ctx, ®ulator_ww_class);
+
+ /* Try to just grab both of them */
+ ret = regulator_lock_nested(rdev1, ww_ctx);
+ WARN_ON(ret);
+ ret = regulator_lock_nested(rdev2, ww_ctx);
+ if (ret != -EDEADLOCK) {
+ WARN_ON(ret);
+ goto exit;
+ }
+
+ while (true) {
+ /*
+ * Start of loop: rdev1 was locked and rdev2 was contended.
+ * Need to unlock rdev1, slowly lock rdev2, then try rdev1
+ * again.
+ */
+ regulator_unlock(rdev1);
+
+ ww_mutex_lock_slow(&rdev2->mutex, ww_ctx);
+ rdev2->ref_cnt++;
+ rdev2->mutex_owner = current;
+ ret = regulator_lock_nested(rdev1, ww_ctx);
+
+ if (ret == -EDEADLOCK) {
+ /* More contention; swap which needs to be slow */
+ tmp = rdev1;
+ rdev1 = rdev2;
+ rdev2 = tmp;
+ } else {
+ WARN_ON(ret);
+ break;
+ }
+ }
+
+exit:
+ ww_acquire_done(ww_ctx);
+}
+
+/**
+ * regulator_unlock_two - unlock two regulators
+ * @rdev1: first regulator
+ * @rdev2: second regulator
+ * @ww_ctx: w/w mutex acquire context
+ *
+ * The inverse of regulator_lock_two().
+ */
+
+static void regulator_unlock_two(struct regulator_dev *rdev1,
+ struct regulator_dev *rdev2,
+ struct ww_acquire_ctx *ww_ctx)
+{
+ regulator_unlock(rdev2);
+ regulator_unlock(rdev1);
+ ww_acquire_fini(ww_ctx);
+}
+
static bool regulator_supply_is_couple(struct regulator_dev *rdev)
{
struct regulator_dev *c_rdev;
@@ -344,6 +416,7 @@ static void regulator_lock_dependent(struct regulator_dev *rdev,
ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
old_contended_rdev = new_contended_rdev;
old_contended_rdev->ref_cnt++;
+ old_contended_rdev->mutex_owner = current;
}
err = regulator_lock_recursive(rdev,
@@ -1418,8 +1491,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)
/**
* set_supply - set regulator supply regulator
- * @rdev: regulator name
- * @supply_rdev: supply regulator name
+ * @rdev: regulator (locked)
+ * @supply_rdev: supply regulator (locked))
*
* Called by platform initialisation code to set the supply regulator for this
* regulator. This ensures that a regulators supply will also be enabled by the
@@ -1591,6 +1664,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
struct regulator *regulator;
int err = 0;
+ lockdep_assert_held_once(&rdev->mutex.base);
+
if (dev) {
char buf[REG_STR_SIZE];
int size;
@@ -1618,9 +1693,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
regulator->rdev = rdev;
regulator->supply_name = supply_name;
- regulator_lock(rdev);
list_add(®ulator->list, &rdev->consumer_list);
- regulator_unlock(rdev);
if (dev) {
regulator->dev = dev;
@@ -1786,6 +1859,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
{
struct regulator_dev *r;
struct device *dev = rdev->dev.parent;
+ struct ww_acquire_ctx ww_ctx;
int ret = 0;
/* No supply to resolve? */
@@ -1852,23 +1926,23 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
* between rdev->supply null check and setting rdev->supply in
* set_supply() from concurrent tasks.
*/
- regulator_lock(rdev);
+ regulator_lock_two(rdev, r, &ww_ctx);
/* Supply just resolved by a concurrent task? */
if (rdev->supply) {
- regulator_unlock(rdev);
+ regulator_unlock_two(rdev, r, &ww_ctx);
put_device(&r->dev);
goto out;
}
ret = set_supply(rdev, r);
if (ret < 0) {
- regulator_unlock(rdev);
+ regulator_unlock_two(rdev, r, &ww_ctx);
put_device(&r->dev);
goto out;
}
- regulator_unlock(rdev);
+ regulator_unlock_two(rdev, r, &ww_ctx);
/*
* In set_machine_constraints() we may have turned this regulator on
@@ -1983,7 +2057,9 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
return regulator;
}
+ regulator_lock(rdev);
regulator = create_regulator(rdev, dev, id);
+ regulator_unlock(rdev);
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
module_put(rdev->owner);
@@ -5659,6 +5735,7 @@ static void regulator_summary_lock(struct ww_acquire_ctx *ww_ctx)
ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
old_contended_rdev = new_contended_rdev;
old_contended_rdev->ref_cnt++;
+ old_contended_rdev->mutex_owner = current;
}
err = regulator_summary_lock_all(ww_ctx,
diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
index e0e627b0106e..b94da4992376 100644
--- a/drivers/regulator/stm32-pwr.c
+++ b/drivers/regulator/stm32-pwr.c
@@ -129,17 +129,16 @@ static const struct regulator_desc stm32_pwr_desc[] = {
static int stm32_pwr_regulator_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct stm32_pwr_reg *priv;
void __iomem *base;
struct regulator_dev *rdev;
struct regulator_config config = { };
int i, ret = 0;
- base = of_iomap(np, 0);
- if (!base) {
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
dev_err(&pdev->dev, "Unable to map IO memory\n");
- return -ENOMEM;
+ return PTR_ERR(base);
}
config.dev = &pdev->dev;
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index ee13d23b43a9..2ed8f96efe8e 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -129,6 +129,7 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
while (of_phandle_iterator_next(&it) == 0) {
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
+ of_node_put(it.node);
dev_err(dev, "unable to acquire memory-region\n");
return -EINVAL;
}
@@ -150,8 +151,10 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
it.node->name);
}
- if (!mem)
+ if (!mem) {
+ of_node_put(it.node);
return -ENOMEM;
+ }
rproc_add_carveout(rproc, mem);
index++;
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 2cf4b2992bfc..041dff62ddff 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -211,11 +211,13 @@ static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
while (of_phandle_iterator_next(&it) == 0) {
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
+ of_node_put(it.node);
dev_err(dev, "unable to acquire memory-region\n");
return -EINVAL;
}
if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) {
+ of_node_put(it.node);
dev_err(dev, "memory region not valid %pa\n",
&rmem->base);
return -EINVAL;
@@ -242,8 +244,10 @@ static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
it.node->name);
}
- if (!mem)
+ if (!mem) {
+ of_node_put(it.node);
return -ENOMEM;
+ }
rproc_add_carveout(rproc, mem);
index++;
diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
index 89e5ba0dae69..1cf98542578f 100644
--- a/drivers/rtc/rtc-meson-vrtc.c
+++ b/drivers/rtc/rtc-meson-vrtc.c
@@ -23,7 +23,7 @@ static int meson_vrtc_read_time(struct device *dev, struct rtc_time *tm)
struct timespec64 time;
dev_dbg(dev, "%s\n", __func__);
- ktime_get_raw_ts64(&time);
+ ktime_get_real_ts64(&time);
rtc_time64_to_tm(time.tv_sec, tm);
return 0;
@@ -101,7 +101,7 @@ static int __maybe_unused meson_vrtc_suspend(struct device *dev)
long alarm_secs;
struct timespec64 time;
- ktime_get_raw_ts64(&time);
+ ktime_get_real_ts64(&time);
local_time = time.tv_sec;
dev_dbg(dev, "alarm_time = %lus, local_time=%lus\n",
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index a2941c875a06..bf502fcd6077 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rtc.h>
+#include <linux/rtc/rtc-omap.h>
/*
* The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 3362f235e891..187565830b5b 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3000,7 +3000,7 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
return 0;
spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data;
- blk_mq_requeue_request(req, false);
+ blk_mq_requeue_request(req, true);
spin_unlock_irq(&cqr->dq->lock);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 8930696021fb..af5238ab6309 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -10226,7 +10226,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
goto out_iounmap_all;
} else {
error = -ENOMEM;
- goto out_iounmap_all;
+ goto out_iounmap_ctrl;
}
}
@@ -10244,7 +10244,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
dev_err(&pdev->dev,
"ioremap failed for SLI4 HBA dpp registers.\n");
error = -ENOMEM;
- goto out_iounmap_ctrl;
+ goto out_iounmap_all;
}
phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
}
@@ -10269,9 +10269,11 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
return 0;
out_iounmap_all:
- iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ if (phba->sli4_hba.drbl_regs_memmap_p)
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
out_iounmap_ctrl:
- iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ if (phba->sli4_hba.ctrl_regs_memmap_p)
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
out_iounmap_conf:
iounmap(phba->sli4_hba.conf_regs_memmap_p);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 8b1ba690039b..8c88190673c2 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1439,6 +1439,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
*/
if (cmdid == CMDID_INT_CMDS) {
scb = &adapter->int_scb;
+ cmd = scb->cmd;
list_del_init(&scb->list);
scb->state = SCB_FREE;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 02b999d48ca1..ae805f91eafa 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -208,8 +208,8 @@ static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
struct spi_device *spi,
int bits_per_word)
{
- /* QE uses Little Endian for words > 8
- * so transform all words > 8 into 8 bits
+ /* CPM/QE uses Little Endian for words > 8
+ * so transform 16 and 32 bits words into 8 bits
* Unfortnatly that doesn't work for LSB so
* reject these for now */
/* Note: 32 bits word, LSB works iff
@@ -217,9 +217,11 @@ static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
if (spi->mode & SPI_LSB_FIRST &&
bits_per_word > 8)
return -EINVAL;
- if (bits_per_word > 8)
+ if (bits_per_word <= 8)
+ return bits_per_word;
+ if (bits_per_word == 16 || bits_per_word == 32)
return 8; /* pretend its 8 bits */
- return bits_per_word;
+ return -EINVAL;
}
static int fsl_spi_setup_transfer(struct spi_device *spi,
@@ -249,7 +251,7 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
mpc8xxx_spi,
bits_per_word);
- else if (mpc8xxx_spi->flags & SPI_QE)
+ else
bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
bits_per_word);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index ead6c211047d..ebcf9f4d5679 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1276,18 +1276,22 @@ static int spi_qup_remove(struct platform_device *pdev)
struct spi_qup *controller = spi_master_get_devdata(master);
int ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(&pdev->dev);
- ret = spi_qup_set_state(controller, QUP_STATE_RESET);
- if (ret)
- return ret;
+ if (ret >= 0) {
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
+ ERR_PTR(ret));
- spi_qup_release_dma(master);
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ } else {
+ dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
+ ERR_PTR(ret));
+ }
- clk_disable_unprepare(controller->cclk);
- clk_disable_unprepare(controller->iclk);
+ spi_qup_release_dma(master);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index c16b60f645a4..8ca7e004a53d 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -348,7 +348,8 @@ static int spmi_drv_remove(struct device *dev)
const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
pm_runtime_get_sync(dev);
- sdrv->remove(to_spmi_device(dev));
+ if (sdrv->remove)
+ sdrv->remove(to_spmi_device(dev));
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index ed404355ea4c..a00693c61870 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -101,7 +101,7 @@ struct ad2s1210_state {
static const int ad2s1210_mode_vals[4][2] = {
[MOD_POS] = { 0, 0 },
[MOD_VEL] = { 0, 1 },
- [MOD_CONFIG] = { 1, 0 },
+ [MOD_CONFIG] = { 1, 1 },
};
static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index bcbf0c8cd420..be377e75703b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -767,6 +767,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
else
netif_wake_queue(dev);
+ priv->bfirst_after_down = false;
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 3403667a9592..8b8afa95fbba 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4084,9 +4084,12 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
struct se_cmd *se_cmd = &cmd->se_cmd;
- if (se_cmd->se_tfo != NULL) {
- spin_lock_irq(&se_cmd->t_state_lock);
- if (se_cmd->transport_state & CMD_T_ABORTED) {
+ if (!se_cmd->se_tfo)
+ continue;
+
+ spin_lock_irq(&se_cmd->t_state_lock);
+ if (se_cmd->transport_state & CMD_T_ABORTED) {
+ if (!(se_cmd->transport_state & CMD_T_TAS))
/*
* LIO's abort path owns the cleanup for this,
* so put it back on the list and let
@@ -4094,11 +4097,10 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
*/
list_move_tail(&cmd->i_conn_node,
&conn->conn_cmd_list);
- } else {
- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
- }
- spin_unlock_irq(&se_cmd->t_state_lock);
+ } else {
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
}
+ spin_unlock_irq(&se_cmd->t_state_lock);
}
spin_unlock_bh(&conn->cmd_lock);
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 33ad9d6de532..e50af46f8c19 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -305,6 +305,13 @@ extern int serial8250_rx_dma(struct uart_8250_port *);
extern void serial8250_rx_dma_flush(struct uart_8250_port *);
extern int serial8250_request_dma(struct uart_8250_port *);
extern void serial8250_release_dma(struct uart_8250_port *);
+
+static inline bool serial8250_tx_dma_running(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ return dma && dma->tx_running;
+}
#else
static inline int serial8250_tx_dma(struct uart_8250_port *p)
{
@@ -320,6 +327,11 @@ static inline int serial8250_request_dma(struct uart_8250_port *p)
return -1;
}
static inline void serial8250_release_dma(struct uart_8250_port *p) { }
+
+static inline bool serial8250_tx_dma_running(struct uart_8250_port *p)
+{
+ return false;
+}
#endif
static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 907130244e1f..96ae6c6031d4 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -19,6 +19,7 @@
#include <linux/moduleparam.h>
#include <linux/ioport.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
@@ -1840,6 +1841,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
unsigned char status;
unsigned long flags;
struct uart_8250_port *up = up_to_u8250p(port);
+ struct tty_port *tport = &port->state->port;
bool skip_rx = false;
if (iir & UART_IIR_NO_INT)
@@ -1863,6 +1865,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
skip_rx = true;
if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
+ if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
+ pm_wakeup_event(tport->tty->dev, 0);
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
}
@@ -1918,19 +1922,25 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
static unsigned int serial8250_tx_empty(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned int result = 0;
unsigned long flags;
unsigned int lsr;
serial8250_rpm_get(up);
spin_lock_irqsave(&port->lock, flags);
- lsr = serial_port_in(port, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ if (!serial8250_tx_dma_running(up)) {
+ lsr = serial_port_in(port, UART_LSR);
+ up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+
+ if ((lsr & BOTH_EMPTY) == BOTH_EMPTY)
+ result = TIOCSER_TEMT;
+ }
spin_unlock_irqrestore(&port->lock, flags);
serial8250_rpm_put(up);
- return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+ return result;
}
unsigned int serial8250_do_get_mctrl(struct uart_port *port)
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index b722ab941528..0b57973e7c65 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1172,7 +1172,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
* 10ms at any baud rate.
*/
sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud / bits / 1000) * 2;
- sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
+ sport->rx_dma_rng_buf_len = (1 << fls(sport->rx_dma_rng_buf_len));
if (sport->rx_dma_rng_buf_len < 16)
sport->rx_dma_rng_buf_len = 16;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 37c46c65a65f..bb14bd4f6e55 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -876,13 +876,13 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
return i;
}
-static void tty_write_unlock(struct tty_struct *tty)
+void tty_write_unlock(struct tty_struct *tty)
{
mutex_unlock(&tty->atomic_write_lock);
wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
}
-static int tty_write_lock(struct tty_struct *tty, int ndelay)
+int tty_write_lock(struct tty_struct *tty, int ndelay)
{
if (!mutex_trylock(&tty->atomic_write_lock)) {
if (ndelay)
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 9245fffdbceb..5d833f0b412d 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -397,21 +397,42 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
tmp_termios.c_ispeed = tty_termios_input_baud_rate(&tmp_termios);
tmp_termios.c_ospeed = tty_termios_baud_rate(&tmp_termios);
- ld = tty_ldisc_ref(tty);
+ if (opt & (TERMIOS_FLUSH|TERMIOS_WAIT)) {
+retry_write_wait:
+ retval = wait_event_interruptible(tty->write_wait, !tty_chars_in_buffer(tty));
+ if (retval < 0)
+ return retval;
- if (ld != NULL) {
- if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
- ld->ops->flush_buffer(tty);
- tty_ldisc_deref(ld);
- }
+ if (tty_write_lock(tty, 0) < 0)
+ goto retry_write_wait;
- if (opt & TERMIOS_WAIT) {
- tty_wait_until_sent(tty, 0);
- if (signal_pending(current))
- return -ERESTARTSYS;
- }
+ /* Racing writer? */
+ if (tty_chars_in_buffer(tty)) {
+ tty_write_unlock(tty);
+ goto retry_write_wait;
+ }
+
+ ld = tty_ldisc_ref(tty);
+ if (ld != NULL) {
+ if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_ldisc_deref(ld);
+ }
+
+ if ((opt & TERMIOS_WAIT) && tty->ops->wait_until_sent) {
+ tty->ops->wait_until_sent(tty, 0);
+ if (signal_pending(current)) {
+ tty_write_unlock(tty);
+ return -ERESTARTSYS;
+ }
+ }
+
+ tty_set_termios(tty, &tmp_termios);
- tty_set_termios(tty, &tmp_termios);
+ tty_write_unlock(tty);
+ } else {
+ tty_set_termios(tty, &tmp_termios);
+ }
/* FIXME: Arguably if tmp_termios == tty->termios AND the
actual requested termios was not tmp_termios then we may
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 7a86979cf140..27298ca083d1 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1084,7 +1084,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ret = ci_usb_phy_init(ci);
if (ret) {
dev_err(dev, "unable to init phy: %d\n", ret);
- return ret;
+ goto ulpi_exit;
}
ci->hw_bank.phys = res->start;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index db1aaed07e05..27b60623c280 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1496,13 +1496,11 @@ static int dwc3_probe(struct platform_device *pdev)
spin_lock_init(&dwc->lock);
+ pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- goto err1;
pm_runtime_forbid(dev);
@@ -1562,12 +1560,10 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_free_event_buffers(dwc);
err2:
- pm_runtime_allow(&pdev->dev);
-
-err1:
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
+ pm_runtime_allow(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
disable_clks:
clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
assert_reset:
@@ -1588,6 +1584,7 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
+ pm_runtime_allow(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 0bbd180022aa..e04acf2dfa65 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2553,6 +2553,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
debugfs_remove_recursive(usb3->dentry);
device_remove_file(&pdev->dev, &dev_attr_role);
+ cancel_work_sync(&usb3->role_work);
usb_role_switch_unregister(usb3->role_sw);
usb_del_gadget_udc(&usb3->gadget);
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index f5c8e4eb62ae..dc332f5800ad 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -132,6 +132,7 @@ static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
regset->regs = regs;
regset->nregs = nregs;
regset->base = hcd->regs + base;
+ regset->dev = hcd->self.controller;
debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset);
}
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 3da75b367f95..5d7835299245 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -74,7 +74,6 @@ MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
/* For soc_device_attribute */
#define RCAR_XHCI_FIRMWARE_V2 BIT(0) /* FIRMWARE V2 */
-#define RCAR_XHCI_FIRMWARE_V3 BIT(1) /* FIRMWARE V3 */
static const struct soc_device_attribute rcar_quirks_match[] = {
{
@@ -147,8 +146,6 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
if (quirks & RCAR_XHCI_FIRMWARE_V2)
firmware_name = XHCI_RCAR_FIRMWARE_NAME_V2;
- else if (quirks & RCAR_XHCI_FIRMWARE_V3)
- firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3;
else
firmware_name = priv->firmware_name;
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 2ea3157ddb6e..e65586147965 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -210,6 +210,7 @@ static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
return ring->enqueue;
}
+/* @dequeue may be NULL if ring is unallocated or freed */
static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
{
if (ring->dequeue < ring->end)
@@ -484,7 +485,7 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
- while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
+ while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
mreq = next_request(mep);
@@ -523,7 +524,7 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
- while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
+ while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
mreq = next_request(mep);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e51ed6b45a47..972d9ddb0473 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -595,6 +595,11 @@ static void option_instat_callback(struct urb *urb);
#define SIERRA_VENDOR_ID 0x1199
#define SIERRA_PRODUCT_EM9191 0x90d3
+/* UNISOC (Spreadtrum) products */
+#define UNISOC_VENDOR_ID 0x1782
+/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
+#define TOZED_PRODUCT_LT70C 0x4055
+
/* Device flags */
/* Highest interface number which can be used with NCTRL() and RSVD() */
@@ -2225,6 +2230,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 622363af4c1b..fd681eec49aa 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -227,6 +227,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
}
change_size = true;
+ data_changed = true;
} else if (vnode->status.type == AFS_FTYPE_DIR) {
/* Expected directory change is handled elsewhere so
* that we can locally edit the directory and save on a
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index 343400d49bd1..36ef3228bac8 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -29,7 +29,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
} else {
num_bytes = 0;
}
- if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
+ if (qgroup_to_release_ret &&
+ block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
qgroup_to_release = block_rsv->qgroup_rsv_reserved -
block_rsv->qgroup_rsv_size;
block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 822c615840e8..1420df997485 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -5138,10 +5138,12 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
{
struct btrfs_key key;
+ struct btrfs_key orig_key;
struct btrfs_disk_key found_key;
int ret;
btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
+ orig_key = key;
if (key.offset > 0) {
key.offset--;
@@ -5158,8 +5160,36 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
+ if (ret <= 0)
return ret;
+
+ /*
+ * Previous key not found. Even if we were at slot 0 of the leaf we had
+ * before releasing the path and calling btrfs_search_slot(), we now may
+ * be in a slot pointing to the same original key - this can happen if
+ * after we released the path, one of more items were moved from a
+ * sibling leaf into the front of the leaf we had due to an insertion
+ * (see push_leaf_right()).
+ * If we hit this case and our slot is > 0 and just decrement the slot
+ * so that the caller does not process the same key again, which may or
+ * may not break the caller, depending on its logic.
+ */
+ if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
+ btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
+ ret = comp_keys(&found_key, &orig_key);
+ if (ret == 0) {
+ if (path->slots[0] > 0) {
+ path->slots[0]--;
+ return 0;
+ }
+ /*
+ * At slot 0, same key as before, it means orig_key is
+ * the lowest, leftmost, key in the tree. We're done.
+ */
+ return 1;
+ }
+ }
+
btrfs_item_key(path->nodes[0], &found_key, 0);
ret = comp_keys(&found_key, &key);
/*
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 64b443aa61ca..22f95a5a58fe 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4339,6 +4339,11 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
if (IS_ERR(sa))
return PTR_ERR(sa);
+ if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
ret = mnt_want_write_file(file);
if (ret)
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index f4edadf1067f..cec6c283a691 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -109,10 +109,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
pr_cont("shared data backref parent %llu count %u\n",
offset, btrfs_shared_data_ref_count(eb, sref));
/*
- * offset is supposed to be a tree block which
- * must be aligned to nodesize.
+ * Offset is supposed to be a tree block which must be
+ * aligned to sectorsize.
*/
- if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
+ if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
pr_info(
"\t\t\t(parent %llu not aligned to sectorsize %u)\n",
offset, eb->fs_info->sectorsize);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index a82d757a2341..cd0030533bf7 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1678,7 +1678,7 @@ smb2_copychunk_range(const unsigned int xid,
pcchunk->SourceOffset = cpu_to_le64(src_off);
pcchunk->TargetOffset = cpu_to_le64(dest_off);
pcchunk->Length =
- cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
+ cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk));
/* Request server copy to target from src identified by key */
kfree(retbuf);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 9efc243e991a..3547388ad60b 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/device.h>
+#include <linux/pm_runtime.h>
#include <linux/poll.h>
#include <linux/security.h>
@@ -1119,7 +1120,14 @@ static int debugfs_show_regset32(struct seq_file *s, void *data)
{
struct debugfs_regset32 *regset = s->private;
+ if (regset->dev)
+ pm_runtime_get_sync(regset->dev);
+
debugfs_print_regs32(s, regset->regs, regset->nregs, regset->base, "");
+
+ if (regset->dev)
+ pm_runtime_put(regset->dev);
+
return 0;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 544a453f3076..cc7a42682814 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -226,7 +226,7 @@ struct erofs_inode {
unsigned char datalayout;
unsigned char inode_isize;
- unsigned short xattr_isize;
+ unsigned int xattr_isize;
unsigned int xattr_shared_count;
unsigned int *xattr_shared_xattrs;
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index fff574100721..b5ee58fdd82f 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -179,6 +179,10 @@ static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
m->clusterofs = le16_to_cpu(di->di_clusterofs);
+ if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
m->pblk = le32_to_cpu(di->di_u.blkaddr);
break;
default:
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 031ff3f19018..b68cee75f5c5 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -303,6 +303,22 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
return desc;
}
+static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
+ ext4_group_t block_group,
+ struct buffer_head *bh)
+{
+ ext4_grpblk_t next_zero_bit;
+ unsigned long bitmap_size = sb->s_blocksize * 8;
+ unsigned int offset = num_clusters_in_group(sb, block_group);
+
+ if (bitmap_size <= offset)
+ return 0;
+
+ next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset);
+
+ return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
+}
+
/*
* Return the block number which was discovered to be invalid, or 0 if
* the block bitmap is valid.
@@ -395,6 +411,15 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
return -EFSCORRUPTED;
}
+ blk = ext4_valid_block_bitmap_padding(sb, block_group, bh);
+ if (unlikely(blk != 0)) {
+ ext4_unlock_group(sb, block_group);
+ ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set",
+ block_group, blk);
+ ext4_mark_group_bitmap_corrupted(sb, block_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+ return -EFSCORRUPTED;
+ }
set_buffer_verified(bh);
verified:
ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 27760c39f70e..478c35d45378 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -6027,7 +6027,8 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
* mapped - no physical clusters have been allocated, and the
* file has no extents
*/
- if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
+ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
+ ext4_has_inline_data(inode))
return 0;
/* search for the extent closest to the first block in the cluster */
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 9d0bed2e4d05..80b5288cc0e9 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -269,14 +269,12 @@ static void __es_find_extent_range(struct inode *inode,
/* see if the extent has been cached */
es->es_lblk = es->es_len = es->es_pblk = 0;
- if (tree->cache_es) {
- es1 = tree->cache_es;
- if (in_range(lblk, es1->es_lblk, es1->es_len)) {
- es_debug("%u cached by [%u/%u) %llu %x\n",
- lblk, es1->es_lblk, es1->es_len,
- ext4_es_pblock(es1), ext4_es_status(es1));
- goto out;
- }
+ es1 = READ_ONCE(tree->cache_es);
+ if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
+ es_debug("%u cached by [%u/%u) %llu %x\n",
+ lblk, es1->es_lblk, es1->es_len,
+ ext4_es_pblock(es1), ext4_es_status(es1));
+ goto out;
}
es1 = __es_tree_search(&tree->root, lblk);
@@ -295,7 +293,7 @@ static void __es_find_extent_range(struct inode *inode,
}
if (es1 && matching_fn(es1)) {
- tree->cache_es = es1;
+ WRITE_ONCE(tree->cache_es, es1);
es->es_lblk = es1->es_lblk;
es->es_len = es1->es_len;
es->es_pblk = es1->es_pblk;
@@ -916,14 +914,12 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
/* find extent in cache firstly */
es->es_lblk = es->es_len = es->es_pblk = 0;
- if (tree->cache_es) {
- es1 = tree->cache_es;
- if (in_range(lblk, es1->es_lblk, es1->es_len)) {
- es_debug("%u cached by [%u/%u)\n",
- lblk, es1->es_lblk, es1->es_len);
- found = 1;
- goto out;
- }
+ es1 = READ_ONCE(tree->cache_es);
+ if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
+ es_debug("%u cached by [%u/%u)\n",
+ lblk, es1->es_lblk, es1->es_len);
+ found = 1;
+ goto out;
}
node = tree->root.rb_node;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 8b8f7f00b835..549ceac9099c 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -32,6 +32,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_entry *entry;
struct ext4_inode *raw_inode;
+ void *end;
int free, min_offs;
if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
@@ -55,14 +56,23 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
raw_inode = ext4_raw_inode(iloc);
header = IHDR(inode, raw_inode);
entry = IFIRST(header);
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
/* Compute min_offs. */
- for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+ while (!IS_LAST_ENTRY(entry)) {
+ void *next = EXT4_XATTR_NEXT(entry);
+
+ if (next >= end) {
+ EXT4_ERROR_INODE(inode,
+ "corrupt xattr in inline inode");
+ return 0;
+ }
if (!entry->e_value_inum && entry->e_value_size) {
size_t offs = le16_to_cpu(entry->e_value_offs);
if (offs < min_offs)
min_offs = offs;
}
+ entry = next;
}
free = min_offs -
((void *)entry - (void *)IFIRST(header)) - sizeof(__u32);
@@ -348,7 +358,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
value, len);
- if (error == -ENODATA)
+ if (error < 0)
goto out;
BUFFER_TRACE(is.iloc.bh, "get_write_access");
@@ -1172,6 +1182,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
ext4_initialize_dirent_tail(dir_block,
inode->i_sb->s_blocksize);
set_buffer_uptodate(dir_block);
+ unlock_buffer(dir_block);
err = ext4_handle_dirty_dirblock(handle, inode, dir_block);
if (err)
return err;
@@ -1245,6 +1256,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
if (!S_ISDIR(inode->i_mode)) {
memcpy(data_bh->b_data, buf, inline_size);
set_buffer_uptodate(data_bh);
+ unlock_buffer(data_bh);
error = ext4_handle_dirty_metadata(handle,
inode, data_bh);
} else {
@@ -1252,7 +1264,6 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
buf, inline_size);
}
- unlock_buffer(data_bh);
out_restore:
if (error)
ext4_restore_inline_data(handle, inode, iloc, buf, inline_size);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 3c3166ba4364..caab9781bee7 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3895,7 +3895,11 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
trace_ext4_mb_release_group_pa(sb, pa);
BUG_ON(pa->pa_deleted == 0);
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
- BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+ if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
+ ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
+ e4b->bd_group, group, pa->pa_pstart);
+ return 0;
+ }
mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 789a9f6a2ec6..9bbd52508656 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2475,11 +2475,9 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
crc = crc16(crc, (__u8 *)gdp, offset);
offset += sizeof(gdp->bg_checksum); /* skip checksum */
/* for checksum of struct ext4_group_desc do the rest...*/
- if (ext4_has_feature_64bit(sb) &&
- offset < le16_to_cpu(sbi->s_es->s_desc_size))
+ if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size)
crc = crc16(crc, (__u8 *)gdp + offset,
- le16_to_cpu(sbi->s_es->s_desc_size) -
- offset);
+ sbi->s_desc_size - offset);
out:
return cpu_to_le16(crc);
@@ -5607,9 +5605,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
#ifdef CONFIG_QUOTA
- /* Release old quota file names */
- for (i = 0; i < EXT4_MAXQUOTAS; i++)
- kfree(old_opts.s_qf_names[i]);
if (enable_quota) {
if (sb_any_quota_suspended(sb))
dquot_resume(sb, -1);
@@ -5619,6 +5614,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
}
+ /* Release old quota file names */
+ for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ kfree(old_opts.s_qf_names[i]);
#endif
if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
ext4_release_system_zone(sb);
@@ -5635,6 +5633,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
return 0;
restore_opts:
+ /*
+ * If there was a failing r/w to ro transition, we may need to
+ * re-enable quota
+ */
+ if ((sb->s_flags & SB_RDONLY) && !(old_sb_flags & SB_RDONLY) &&
+ sb_any_quota_suspended(sb))
+ dquot_resume(sb, -1);
sb->s_flags = old_sb_flags;
sbi->s_mount_opt = old_opts.s_mount_opt;
sbi->s_mount_opt2 = old_opts.s_mount_opt2;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 008d9b897987..498ef733b73c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -2569,6 +2569,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
.in_inode = !!entry->e_value_inum,
};
struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
+ int needs_kvfree = 0;
int error;
is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
@@ -2591,7 +2592,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
error = -ENOMEM;
goto out;
}
-
+ needs_kvfree = 1;
error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
if (error)
goto out;
@@ -2630,7 +2631,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
out:
kfree(b_entry_name);
- if (entry->e_value_inum && buffer)
+ if (needs_kvfree && buffer)
kvfree(buffer);
if (is)
brelse(is->iloc.bh);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ef08ef017030..8d4e66f36cf7 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -2821,15 +2821,16 @@ int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
struct dquot *transfer_to[MAXQUOTAS] = {};
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
- int err = 0;
+ int err;
transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
- if (!IS_ERR(transfer_to[PRJQUOTA])) {
- err = __dquot_transfer(inode, transfer_to);
- if (err)
- set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
- dqput(transfer_to[PRJQUOTA]);
- }
+ if (IS_ERR(transfer_to[PRJQUOTA]))
+ return PTR_ERR(transfer_to[PRJQUOTA]);
+
+ err = __dquot_transfer(inode, transfer_to);
+ if (err)
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ dqput(transfer_to[PRJQUOTA]);
return err;
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index ed95c27e9302..9cb2a87247b2 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -892,12 +892,20 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out;
}
+ /*
+ * Copied from ext4_rename: we need to protect against old.inode
+ * directory getting converted from inline directory format into
+ * a normal one.
+ */
+ if (S_ISDIR(old_inode->i_mode))
+ inode_lock_nested(old_inode, I_MUTEX_NONDIR2);
+
err = -ENOENT;
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_entry) {
if (IS_ERR(old_page))
err = PTR_ERR(old_page);
- goto out;
+ goto out_unlock_old;
}
if (S_ISDIR(old_inode->i_mode)) {
@@ -1025,6 +1033,9 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_unlock_op(sbi);
+ if (S_ISDIR(old_inode->i_mode))
+ inode_unlock(old_inode);
+
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
f2fs_sync_fs(sbi->sb, 1);
@@ -1040,6 +1051,9 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_put_page(old_dir_page, 0);
out_old:
f2fs_put_page(old_page, 0);
+out_unlock_old:
+ if (S_ISDIR(old_inode->i_mode))
+ inode_unlock(old_inode);
out:
if (whiteout)
iput(whiteout);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 5b3a288e0f14..3ec2c16abece 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -700,7 +700,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
* is okay. The main goal is avoiding keeping an inode on
* the wrong wb for an extended period of time.
*/
- if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
+ if (hweight16(history) > WB_FRN_HIST_THR_SLOTS)
inode_switch_wbs(inode, max_id);
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 2ee30ffeb6b9..01b185670594 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -66,6 +66,8 @@
#define OPENOWNER_POOL_SIZE 8
+static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp);
+
const nfs4_stateid zero_stateid = {
{ .data = { 0 } },
.type = NFS4_SPECIAL_STATEID_TYPE,
@@ -329,6 +331,8 @@ int nfs41_init_clientid(struct nfs_client *clp, const struct cred *cred)
status = nfs4_proc_create_session(clp, cred);
if (status != 0)
goto out;
+ if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R))
+ nfs4_state_start_reclaim_reboot(clp);
nfs41_finish_session_reset(clp);
nfs_mark_client_ready(clp, NFS_CS_READY);
out:
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index fb5a9a8a13cf..2ba57e4b4f0a 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -67,20 +67,28 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
down_read(&bmap->b_sem);
ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
- if (ret < 0) {
- ret = nilfs_bmap_convert_error(bmap, __func__, ret);
+ if (ret < 0)
goto out;
- }
+
if (NILFS_BMAP_USE_VBN(bmap)) {
ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
&blocknr);
if (!ret)
*ptrp = blocknr;
+ else if (ret == -ENOENT) {
+ /*
+ * If there was no valid entry in DAT for the block
+ * address obtained by b_ops->bop_lookup, then pass
+ * internal code -EINVAL to nilfs_bmap_convert_error
+ * to treat it as metadata corruption.
+ */
+ ret = -EINVAL;
+ }
}
out:
up_read(&bmap->b_sem);
- return ret;
+ return nilfs_bmap_convert_error(bmap, __func__, ret);
}
int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index b23ed9a35e5e..3091d1a3edde 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2039,6 +2039,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int err;
+ if (sb_rdonly(sci->sc_super))
+ return -EROFS;
+
nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
sci->sc_cno = nilfs->ns_cno;
@@ -2724,7 +2727,7 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
flush_work(&sci->sc_iput_work);
- } while (ret && retrycount-- > 0);
+ } while (ret && ret != -EROFS && retrycount-- > 0);
}
/**
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 589dee962993..a3cbfd38f1f6 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -66,7 +66,7 @@ int inotify_handle_event(struct fsnotify_group *group,
struct inotify_event_info *event;
struct fsnotify_event *fsn_event;
int ret;
- int len = 0;
+ int len = 0, wd;
int alloc_len = sizeof(struct inotify_event_info);
if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info)))
@@ -90,6 +90,13 @@ int inotify_handle_event(struct fsnotify_group *group,
i_mark = container_of(inode_mark, struct inotify_inode_mark,
fsn_mark);
+ /*
+ * We can be racing with mark being detached. Don't report event with
+ * invalid wd.
+ */
+ wd = READ_ONCE(i_mark->wd);
+ if (wd == -1)
+ return 0;
/*
* Whoever is interested in the event, pays for the allocation. Do not
* trigger OOM killer in the target monitoring memcg as it may have
@@ -120,7 +127,7 @@ int inotify_handle_event(struct fsnotify_group *group,
fsn_event = &event->fse;
fsnotify_init_event(fsn_event, (unsigned long)inode);
event->mask = mask;
- event->wd = i_mark->wd;
+ event->wd = wd;
event->sync_cookie = cookie;
event->name_len = len;
if (len)
diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
index 18cf94b597e0..d8542ec2f38c 100644
--- a/fs/pstore/pmsg.c
+++ b/fs/pstore/pmsg.c
@@ -7,10 +7,9 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
-#include <linux/rtmutex.h>
#include "internal.h"
-static DEFINE_RT_MUTEX(pmsg_lock);
+static DEFINE_MUTEX(pmsg_lock);
static ssize_t write_pmsg(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@@ -29,9 +28,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf,
if (!access_ok(buf, count))
return -EFAULT;
- rt_mutex_lock(&pmsg_lock);
+ mutex_lock(&pmsg_lock);
ret = psinfo->write_user(&record, buf);
- rt_mutex_unlock(&pmsg_lock);
+ mutex_unlock(&pmsg_lock);
return ret ? ret : count;
}
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index 59d87f9f72fb..159af6c26f4b 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -81,11 +81,15 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
struct inode *inode,
struct reiserfs_security_handle *sec)
{
+ char xattr_name[XATTR_NAME_MAX + 1] = XATTR_SECURITY_PREFIX;
int error;
- if (strlen(sec->name) < sizeof(XATTR_SECURITY_PREFIX))
+
+ if (XATTR_SECURITY_PREFIX_LEN + strlen(sec->name) > XATTR_NAME_MAX)
return -EINVAL;
- error = reiserfs_xattr_set_handle(th, inode, sec->name, sec->value,
+ strlcat(xattr_name, sec->name, sizeof(xattr_name));
+
+ error = reiserfs_xattr_set_handle(th, inode, xattr_name, sec->value,
sec->length, XATTR_CREATE);
if (error == -ENODATA || error == -EOPNOTSUPP)
error = 0;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 332c0b02a0ad..a5294e737909 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -433,6 +433,7 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
mutex_unlock(&dir_ui->ui_mutex);
ubifs_release_budget(c, &req);
+ fscrypt_free_filename(&nm);
return 0;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 6461f61449b1..91543e3b5b3f 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -44,6 +44,33 @@ enum {
NOT_ON_MEDIA = 3,
};
+static void do_insert_old_idx(struct ubifs_info *c,
+ struct ubifs_old_idx *old_idx)
+{
+ struct ubifs_old_idx *o;
+ struct rb_node **p, *parent = NULL;
+
+ p = &c->old_idx.rb_node;
+ while (*p) {
+ parent = *p;
+ o = rb_entry(parent, struct ubifs_old_idx, rb);
+ if (old_idx->lnum < o->lnum)
+ p = &(*p)->rb_left;
+ else if (old_idx->lnum > o->lnum)
+ p = &(*p)->rb_right;
+ else if (old_idx->offs < o->offs)
+ p = &(*p)->rb_left;
+ else if (old_idx->offs > o->offs)
+ p = &(*p)->rb_right;
+ else {
+ ubifs_err(c, "old idx added twice!");
+ kfree(old_idx);
+ }
+ }
+ rb_link_node(&old_idx->rb, parent, p);
+ rb_insert_color(&old_idx->rb, &c->old_idx);
+}
+
/**
* insert_old_idx - record an index node obsoleted since the last commit start.
* @c: UBIFS file-system description object
@@ -69,35 +96,15 @@ enum {
*/
static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
{
- struct ubifs_old_idx *old_idx, *o;
- struct rb_node **p, *parent = NULL;
+ struct ubifs_old_idx *old_idx;
old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
if (unlikely(!old_idx))
return -ENOMEM;
old_idx->lnum = lnum;
old_idx->offs = offs;
+ do_insert_old_idx(c, old_idx);
- p = &c->old_idx.rb_node;
- while (*p) {
- parent = *p;
- o = rb_entry(parent, struct ubifs_old_idx, rb);
- if (lnum < o->lnum)
- p = &(*p)->rb_left;
- else if (lnum > o->lnum)
- p = &(*p)->rb_right;
- else if (offs < o->offs)
- p = &(*p)->rb_left;
- else if (offs > o->offs)
- p = &(*p)->rb_right;
- else {
- ubifs_err(c, "old idx added twice!");
- kfree(old_idx);
- return 0;
- }
- }
- rb_link_node(&old_idx->rb, parent, p);
- rb_insert_color(&old_idx->rb, &c->old_idx);
return 0;
}
@@ -199,23 +206,6 @@ static struct ubifs_znode *copy_znode(struct ubifs_info *c,
__set_bit(DIRTY_ZNODE, &zn->flags);
__clear_bit(COW_ZNODE, &zn->flags);
- ubifs_assert(c, !ubifs_zn_obsolete(znode));
- __set_bit(OBSOLETE_ZNODE, &znode->flags);
-
- if (znode->level != 0) {
- int i;
- const int n = zn->child_cnt;
-
- /* The children now have new parent */
- for (i = 0; i < n; i++) {
- struct ubifs_zbranch *zbr = &zn->zbranch[i];
-
- if (zbr->znode)
- zbr->znode->parent = zn;
- }
- }
-
- atomic_long_inc(&c->dirty_zn_cnt);
return zn;
}
@@ -233,6 +223,42 @@ static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
return ubifs_add_dirt(c, lnum, dirt);
}
+/**
+ * replace_znode - replace old znode with new znode.
+ * @c: UBIFS file-system description object
+ * @new_zn: new znode
+ * @old_zn: old znode
+ * @zbr: the branch of parent znode
+ *
+ * Replace old znode with new znode in TNC.
+ */
+static void replace_znode(struct ubifs_info *c, struct ubifs_znode *new_zn,
+ struct ubifs_znode *old_zn, struct ubifs_zbranch *zbr)
+{
+ ubifs_assert(c, !ubifs_zn_obsolete(old_zn));
+ __set_bit(OBSOLETE_ZNODE, &old_zn->flags);
+
+ if (old_zn->level != 0) {
+ int i;
+ const int n = new_zn->child_cnt;
+
+ /* The children now have new parent */
+ for (i = 0; i < n; i++) {
+ struct ubifs_zbranch *child = &new_zn->zbranch[i];
+
+ if (child->znode)
+ child->znode->parent = new_zn;
+ }
+ }
+
+ zbr->znode = new_zn;
+ zbr->lnum = 0;
+ zbr->offs = 0;
+ zbr->len = 0;
+
+ atomic_long_inc(&c->dirty_zn_cnt);
+}
+
/**
* dirty_cow_znode - ensure a znode is not being committed.
* @c: UBIFS file-system description object
@@ -265,28 +291,32 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
return zn;
if (zbr->len) {
- err = insert_old_idx(c, zbr->lnum, zbr->offs);
- if (unlikely(err))
- /*
- * Obsolete znodes will be freed by tnc_destroy_cnext()
- * or free_obsolete_znodes(), copied up znodes should
- * be added back to tnc and freed by
- * ubifs_destroy_tnc_subtree().
- */
+ struct ubifs_old_idx *old_idx;
+
+ old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
+ if (unlikely(!old_idx)) {
+ err = -ENOMEM;
goto out;
+ }
+ old_idx->lnum = zbr->lnum;
+ old_idx->offs = zbr->offs;
+
err = add_idx_dirt(c, zbr->lnum, zbr->len);
- } else
- err = 0;
+ if (err) {
+ kfree(old_idx);
+ goto out;
+ }
-out:
- zbr->znode = zn;
- zbr->lnum = 0;
- zbr->offs = 0;
- zbr->len = 0;
+ do_insert_old_idx(c, old_idx);
+ }
+
+ replace_znode(c, zn, znode, zbr);
- if (unlikely(err))
- return ERR_PTR(err);
return zn;
+
+out:
+ kfree(zn);
+ return ERR_PTR(err);
}
/**
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d02806513670..3dd3416f1df0 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -190,7 +190,7 @@ static inline u64 readq(const volatile void __iomem *addr)
u64 val;
__io_br();
- val = __le64_to_cpu(__raw_readq(addr));
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
__io_ar(val);
return val;
}
@@ -233,7 +233,7 @@ static inline void writel(u32 value, volatile void __iomem *addr)
static inline void writeq(u64 value, volatile void __iomem *addr)
{
__io_bw();
- __raw_writeq(__cpu_to_le64(value), addr);
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
__io_aw();
}
#endif
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index d0238d3b2f31..8565c8842cde 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -35,6 +35,7 @@ struct debugfs_regset32 {
const struct debugfs_reg32 *regs;
int nregs;
void __iomem *base;
+ struct device *dev; /* Optional device for Runtime PM */
};
extern struct dentry *arch_debugfs_dir;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 30e92536c78c..01517747214a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -61,6 +61,9 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
+ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
+ * Users will enable it explicitly by enable_irq() or enable_nmi()
+ * later.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -74,6 +77,7 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
+#define IRQF_NO_AUTOEN 0x00080000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
index 9542b41eacfd..41c6bd711940 100644
--- a/include/linux/mailbox/zynqmp-ipi-message.h
+++ b/include/linux/mailbox/zynqmp-ipi-message.h
@@ -9,7 +9,7 @@
* @data: message payload
*
* This is the structure for data used in mbox_send_message
- * the maximum length of data buffer is fixed to 12 bytes.
+ * the maximum length of data buffer is fixed to 32 bytes.
* Client is supposed to be aware of this.
*/
struct zynqmp_ipi_message {
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index f6267e2883f2..41de4156540a 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -39,7 +39,6 @@ struct nfnetlink_subsystem {
int (*commit)(struct net *net, struct sk_buff *skb);
int (*abort)(struct net *net, struct sk_buff *skb,
enum nfnl_abort_action action);
- void (*cleanup)(struct net *net);
bool (*valid_genid)(struct net *net, u32 genid);
};
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index ff0ee07b1e8f..dd2801c28b99 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -522,6 +522,10 @@ enum {
NVME_AER_VS = 7,
};
+enum {
+ NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
+};
+
enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index baf02ff91a31..4476994d40c9 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -95,8 +95,8 @@ struct generic_pm_domain {
struct device dev;
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
- struct list_head master_links; /* Links with PM domain as a master */
- struct list_head slave_links; /* Links with PM domain as a slave */
+ struct list_head parent_links; /* Links with PM domain as a parent */
+ struct list_head child_links; /* Links with PM domain as a child */
struct list_head dev_list; /* List of devices */
struct dev_power_governor *gov;
struct work_struct power_off_work;
@@ -151,10 +151,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
}
struct gpd_link {
- struct generic_pm_domain *master;
- struct list_head master_node;
- struct generic_pm_domain *slave;
- struct list_head slave_node;
+ struct generic_pm_domain *parent;
+ struct list_head parent_node;
+ struct generic_pm_domain *child;
+ struct list_head child_node;
/* Sub-domain's per-master domain performance state */
unsigned int performance_state;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 3b5cb66d8bc1..eb2a093ba042 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -529,4 +529,23 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
+#ifdef CONFIG_PRINTK
+extern void __printk_safe_enter(void);
+extern void __printk_safe_exit(void);
+/*
+ * The printk_deferred_enter/exit macros are available only as a hack for
+ * some code paths that need to defer all printk console printing. Interrupts
+ * must be disabled for the deferred duration.
+ */
+#define printk_deferred_enter __printk_safe_enter
+#define printk_deferred_exit __printk_safe_exit
+#else
+static inline void printk_deferred_enter(void)
+{
+}
+static inline void printk_deferred_exit(void)
+{
+}
+#endif
+
#endif
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index a6ef35184ef1..5c37fabdec10 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -90,8 +90,7 @@ struct rpc_task {
#endif
unsigned char tk_priority : 2,/* Task priority */
tk_garb_retry : 2,
- tk_cred_retry : 2,
- tk_rebind_retry : 2;
+ tk_cred_retry : 2;
};
typedef void (*rpc_action)(struct rpc_task *);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f92a10b5e112..cf6c92060929 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -108,7 +108,8 @@ enum tick_dep_bits {
TICK_DEP_BIT_POSIX_TIMER = 0,
TICK_DEP_BIT_PERF_EVENTS = 1,
TICK_DEP_BIT_SCHED = 2,
- TICK_DEP_BIT_CLOCK_UNSTABLE = 3
+ TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
+ TICK_DEP_BIT_RCU = 4
};
#define TICK_DEP_MASK_NONE 0
@@ -116,6 +117,7 @@ enum tick_dep_bits {
#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
+#define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU)
#ifdef CONFIG_NO_HZ_COMMON
extern bool tick_nohz_enabled;
@@ -206,6 +208,7 @@ extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
enum tick_dep_bits bit);
extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit);
+extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
/*
* The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
@@ -268,6 +271,10 @@ static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
+static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
+
static inline void tick_dep_set(enum tick_dep_bits bit) { }
static inline void tick_dep_clear(enum tick_dep_bits bit) { }
static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
diff --git a/include/linux/tty.h b/include/linux/tty.h
index eb33d948788c..8be25caa97f6 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -480,6 +480,8 @@ extern void __stop_tty(struct tty_struct *tty);
extern void stop_tty(struct tty_struct *tty);
extern void __start_tty(struct tty_struct *tty);
extern void start_tty(struct tty_struct *tty);
+void tty_write_unlock(struct tty_struct *tty);
+int tty_write_lock(struct tty_struct *tty, int ndelay);
extern int tty_register_driver(struct tty_driver *driver);
extern int tty_unregister_driver(struct tty_driver *driver);
extern struct device *tty_register_device(struct tty_driver *driver,
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index 848db1b1569f..919d999a8c1d 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -16,7 +16,7 @@
#include <linux/string.h>
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
+#if IS_ENABLED(CONFIG_VGA_CONSOLE) || IS_ENABLED(CONFIG_MDA_CONSOLE)
#include <asm/vga.h>
#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 886866bee8b2..ad2a52a6c478 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -493,6 +493,7 @@ struct nft_set_binding {
};
enum nft_trans_phase;
+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase);
diff --git a/include/net/scm.h b/include/net/scm.h
index 1ce365f4c256..585adc1346bd 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -105,16 +105,27 @@ static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct sc
}
}
}
+
+static inline bool scm_has_secdata(struct socket *sock)
+{
+ return test_bit(SOCK_PASSSEC, &sock->flags);
+}
#else
static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
{ }
+
+static inline bool scm_has_secdata(struct socket *sock)
+{
+ return false;
+}
#endif /* CONFIG_SECURITY_NETWORK */
static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm, int flags)
{
if (!msg->msg_control) {
- if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp)
+ if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp ||
+ scm_has_secdata(sock))
msg->msg_flags |= MSG_CTRUNC;
scm_destroy(scm);
return;
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 7800e12ee042..cf6e58fa5266 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -140,7 +140,10 @@ int rpi_firmware_property(struct rpi_firmware *fw,
u32 tag, void *data, size_t len);
int rpi_firmware_property_list(struct rpi_firmware *fw,
void *data, size_t tag_size);
+void rpi_firmware_put(struct rpi_firmware *fw);
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
+struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node);
#else
static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
void *data, size_t len)
@@ -154,10 +157,17 @@ static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
return -ENOSYS;
}
+static inline void rpi_firmware_put(struct rpi_firmware *fw) { }
static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
{
return NULL;
}
+
+static inline struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node)
+{
+ return NULL;
+}
#endif
#endif /* __SOC_RASPBERRY_FIRMWARE_H__ */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index b7a904825e7d..295517f109d7 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -367,7 +367,8 @@ TRACE_EVENT(itimer_expire,
tick_dep_name(POSIX_TIMER) \
tick_dep_name(PERF_EVENTS) \
tick_dep_name(SCHED) \
- tick_dep_name_end(CLOCK_UNSTABLE)
+ tick_dep_name(CLOCK_UNSTABLE) \
+ tick_dep_name_end(RCU)
#undef tick_dep_name
#undef tick_dep_mask_name
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 3ee0678c0a83..72cd9f61f054 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -162,6 +162,7 @@ struct btrfs_scrub_progress {
};
#define BTRFS_SCRUB_READONLY 1
+#define BTRFS_SCRUB_SUPPORTED_FLAGS (BTRFS_SCRUB_READONLY)
struct btrfs_ioctl_scrub_args {
__u64 devid; /* in */
__u64 start; /* in */
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index af2a44c08683..a429381e7ca5 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -28,7 +28,7 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 16c1fa2d89a4..052c8308b995 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -155,4 +155,12 @@
/* HSV - next is 0x6002 */
#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001
+/*
+ * This format should be used when the same driver handles
+ * both sides of the link and the bus format is a fixed
+ * metadata format that is not configurable from userspace.
+ * Width and height will be set to 0 for this format.
+ */
+#define MEDIA_BUS_FMT_METADATA_FIXED 0x7001
+
#endif /* __LINUX_MEDIA_BUS_FORMAT_H */
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index c2f0aa818b7a..08c1246c758e 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1131,7 +1131,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
goto out;
}
- if (ctx.optlen > max_optlen || ctx.optlen < 0) {
+ if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
ret = -EFAULT;
goto out;
}
@@ -1145,8 +1145,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
}
if (ctx.optlen != 0) {
- if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
- put_user(ctx.optlen, optlen)) {
+ if (optval && copy_to_user(optval, ctx.optval, ctx.optlen)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (put_user(ctx.optlen, optlen)) {
ret = -EFAULT;
goto out;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1ef924d6a385..41a8a5f74940 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8166,8 +8166,8 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle)
hwc->interrupts = 1;
} else {
hwc->interrupts++;
- if (unlikely(throttle
- && hwc->interrupts >= max_samples_per_tick)) {
+ if (unlikely(throttle &&
+ hwc->interrupts > max_samples_per_tick)) {
__this_cpu_inc(perf_throttled_count);
tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
hwc->interrupts = MAX_INTERRUPTS;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 79214f983624..2a8a5e1779c9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1610,7 +1610,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
- if (irq_settings_can_autoenable(desc)) {
+ if (!(new->flags & IRQF_NO_AUTOEN) &&
+ irq_settings_can_autoenable(desc)) {
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
} else {
/*
@@ -2041,10 +2042,15 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
* which interrupt is which (messes up the interrupt freeing
* logic etc).
*
+ * Also shared interrupts do not go well with disabling auto enable.
+ * The sharing interrupt might request it while it's still disabled
+ * and then wait for interrupts forever.
+ *
* Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
* it cannot be set along with IRQF_NO_SUSPEND.
*/
if (((irqflags & IRQF_SHARED) && !dev_id) ||
+ ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
(!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
return -EINVAL;
@@ -2200,7 +2206,8 @@ int request_nmi(unsigned int irq, irq_handler_t handler,
desc = irq_to_desc(irq);
- if (!desc || irq_settings_can_autoenable(desc) ||
+ if (!desc || (irq_settings_can_autoenable(desc) &&
+ !(irqflags & IRQF_NO_AUTOEN)) ||
!irq_settings_can_request(desc) ||
WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
!irq_supports_nmi(desc))
diff --git a/kernel/kheaders.c b/kernel/kheaders.c
index 8f69772af77b..42163c9e94e5 100644
--- a/kernel/kheaders.c
+++ b/kernel/kheaders.c
@@ -26,15 +26,15 @@ asm (
" .popsection \n"
);
-extern char kernel_headers_data;
-extern char kernel_headers_data_end;
+extern char kernel_headers_data[];
+extern char kernel_headers_data_end[];
static ssize_t
ikheaders_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t len)
{
- memcpy(buf, &kernel_headers_data + off, len);
+ memcpy(buf, &kernel_headers_data[off], len);
return len;
}
@@ -48,8 +48,8 @@ static struct bin_attribute kheaders_attr __ro_after_init = {
static int __init ikheaders_init(void)
{
- kheaders_attr.size = (&kernel_headers_data_end -
- &kernel_headers_data);
+ kheaders_attr.size = (kernel_headers_data_end -
+ kernel_headers_data);
return sysfs_create_bin_file(kernel_kobj, &kheaders_attr);
}
diff --git a/kernel/relay.c b/kernel/relay.c
index 9b1cfcd8dc6b..1e11199c7d7c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -997,14 +997,14 @@ static void relay_file_read_consume(struct rchan_buf *buf,
/*
* relay_file_read_avail - boolean, are there unconsumed bytes available?
*/
-static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
+static int relay_file_read_avail(struct rchan_buf *buf)
{
size_t subbuf_size = buf->chan->subbuf_size;
size_t n_subbufs = buf->chan->n_subbufs;
size_t produced = buf->subbufs_produced;
size_t consumed = buf->subbufs_consumed;
- relay_file_read_consume(buf, read_pos, 0);
+ relay_file_read_consume(buf, 0, 0);
consumed = buf->subbufs_consumed;
@@ -1065,23 +1065,21 @@ static size_t relay_file_read_subbuf_avail(size_t read_pos,
/**
* relay_file_read_start_pos - find the first available byte to read
- * @read_pos: file read position
* @buf: relay channel buffer
*
- * If the @read_pos is in the middle of padding, return the
+ * If the read_pos is in the middle of padding, return the
* position of the first actually available byte, otherwise
* return the original value.
*/
-static size_t relay_file_read_start_pos(size_t read_pos,
- struct rchan_buf *buf)
+static size_t relay_file_read_start_pos(struct rchan_buf *buf)
{
size_t read_subbuf, padding, padding_start, padding_end;
size_t subbuf_size = buf->chan->subbuf_size;
size_t n_subbufs = buf->chan->n_subbufs;
size_t consumed = buf->subbufs_consumed % n_subbufs;
+ size_t read_pos = (consumed * subbuf_size + buf->bytes_consumed)
+ % (n_subbufs * subbuf_size);
- if (!read_pos)
- read_pos = consumed * subbuf_size + buf->bytes_consumed;
read_subbuf = read_pos / subbuf_size;
padding = buf->padding[read_subbuf];
padding_start = (read_subbuf + 1) * subbuf_size - padding;
@@ -1137,10 +1135,10 @@ static ssize_t relay_file_read(struct file *filp,
do {
void *from;
- if (!relay_file_read_avail(buf, *ppos))
+ if (!relay_file_read_avail(buf))
break;
- read_start = relay_file_read_start_pos(*ppos, buf);
+ read_start = relay_file_read_start_pos(buf);
avail = relay_file_read_subbuf_avail(read_start, buf);
if (!avail)
break;
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index d23b434c2ca7..eddcf4970444 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -58,7 +58,8 @@ static struct clocksource clocksource_jiffies = {
.max_cycles = 10,
};
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
+__cacheline_aligned_in_smp seqcount_t jiffies_seq;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
@@ -67,9 +68,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
ret = jiffies_64;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index e51778c312f1..ce7339ff10d2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -331,7 +331,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
bc_local = tick_do_periodic_broadcast();
if (clockevent_state_oneshot(dev)) {
- ktime_t next = ktime_add(dev->next_event, tick_period);
+ ktime_t next = ktime_add_ns(dev->next_event, TICK_NSEC);
clockevents_program_event(dev, next, true);
}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 7e5d3524e924..2b7448ae5b47 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -30,7 +30,6 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
* Tick next event: keeps track of the tick time
*/
ktime_t tick_next_period;
-ktime_t tick_period;
/*
* tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
@@ -84,13 +83,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
/* Keep track of the next tick event */
- tick_next_period = ktime_add(tick_next_period, tick_period);
+ tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC);
do_timer(1);
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
@@ -125,7 +126,7 @@ void tick_handle_periodic(struct clock_event_device *dev)
* Setup the next period for devices, which do not have
* periodic mode:
*/
- next = ktime_add(next, tick_period);
+ next = ktime_add_ns(next, TICK_NSEC);
if (!clockevents_program_event(dev, next, false))
return;
@@ -162,16 +163,16 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
next = tick_next_period;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
for (;;) {
if (!clockevents_program_event(dev, next, false))
return;
- next = ktime_add(next, tick_period);
+ next = ktime_add_ns(next, TICK_NSEC);
}
}
}
@@ -215,10 +216,19 @@ static void tick_setup_device(struct tick_device *td,
* this cpu:
*/
if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
+ ktime_t next_p;
+ u32 rem;
+
tick_do_timer_cpu = cpu;
- tick_next_period = ktime_get();
- tick_period = NSEC_PER_SEC / HZ;
+ next_p = ktime_get();
+ div_u64_rem(next_p, TICK_NSEC, &rem);
+ if (rem) {
+ next_p -= rem;
+ next_p += TICK_NSEC;
+ }
+
+ tick_next_period = next_p;
#ifdef CONFIG_NO_HZ_FULL
/*
* The boot CPU may be nohz_full, in which case set
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 5294f5b1f955..e61c1244e7d4 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -15,7 +15,6 @@
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
extern ktime_t tick_next_period;
-extern ktime_t tick_period;
extern int tick_do_timer_cpu __read_mostly;
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5eb04bb59802..a70c611df137 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -53,48 +53,69 @@ static ktime_t last_jiffies_update;
*/
static void tick_do_update_jiffies64(ktime_t now)
{
- unsigned long ticks = 0;
+ unsigned long ticks = 1;
ktime_t delta;
/*
- * Do a quick check without holding jiffies_lock:
- * The READ_ONCE() pairs with two updates done later in this function.
+ * Do a quick check without holding jiffies_lock. The READ_ONCE()
+ * pairs with the update done later in this function.
+ *
+ * This is also an intentional data race which is even safe on
+ * 32bit in theory. If there is a concurrent update then the check
+ * might give a random answer. It does not matter because if it
+ * returns then the concurrent update is already taking care, if it
+ * falls through then it will pointlessly contend on jiffies_lock.
+ *
+ * Though there is one nasty case on 32bit due to store tearing of
+ * the 64bit value. If the first 32bit store makes the quick check
+ * return on all other CPUs and the writing CPU context gets
+ * delayed to complete the second store (scheduled out on virt)
+ * then jiffies can become stale for up to ~2^32 nanoseconds
+ * without noticing. After that point all CPUs will wait for
+ * jiffies lock.
+ *
+ * OTOH, this is not any different than the situation with NOHZ=off
+ * where one CPU is responsible for updating jiffies and
+ * timekeeping. If that CPU goes out for lunch then all other CPUs
+ * will operate on stale jiffies until it decides to come back.
*/
- delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
- if (delta < tick_period)
+ if (ktime_before(now, READ_ONCE(tick_next_period)))
return;
/* Reevaluate with jiffies_lock held */
- write_seqlock(&jiffies_lock);
-
- delta = ktime_sub(now, last_jiffies_update);
- if (delta >= tick_period) {
+ raw_spin_lock(&jiffies_lock);
+ if (ktime_before(now, tick_next_period)) {
+ raw_spin_unlock(&jiffies_lock);
+ return;
+ }
- delta = ktime_sub(delta, tick_period);
- /* Pairs with the lockless read in this function. */
- WRITE_ONCE(last_jiffies_update,
- ktime_add(last_jiffies_update, tick_period));
+ write_seqcount_begin(&jiffies_seq);
- /* Slow path for long timeouts */
- if (unlikely(delta >= tick_period)) {
- s64 incr = ktime_to_ns(tick_period);
+ delta = ktime_sub(now, tick_next_period);
+ if (unlikely(delta >= TICK_NSEC)) {
+ /* Slow path for long idle sleep times */
+ s64 incr = TICK_NSEC;
- ticks = ktime_divns(delta, incr);
+ ticks += ktime_divns(delta, incr);
- /* Pairs with the lockless read in this function. */
- WRITE_ONCE(last_jiffies_update,
- ktime_add_ns(last_jiffies_update,
- incr * ticks));
- }
- do_timer(++ticks);
-
- /* Keep the tick_next_period variable up to date */
- tick_next_period = ktime_add(last_jiffies_update, tick_period);
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
+ incr * ticks);
} else {
- write_sequnlock(&jiffies_lock);
- return;
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
+ TICK_NSEC);
}
- write_sequnlock(&jiffies_lock);
+
+ do_timer(ticks);
+
+ /*
+ * Keep the tick_next_period variable up to date. WRITE_ONCE()
+ * pairs with the READ_ONCE() in the lockless quick check above.
+ */
+ WRITE_ONCE(tick_next_period,
+ ktime_add_ns(last_jiffies_update, TICK_NSEC));
+
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
@@ -105,12 +126,14 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
return period;
}
@@ -202,6 +225,11 @@ static bool check_tick_dependency(atomic_t *dep)
return true;
}
+ if (val & TICK_DEP_MASK_RCU) {
+ trace_tick_stop(0, TICK_DEP_MASK_RCU);
+ return true;
+ }
+
return false;
}
@@ -328,6 +356,7 @@ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
{
@@ -335,6 +364,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
atomic_andnot(BIT(bit), &ts->tick_dep_mask);
}
+EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
/*
* Set a per-task tick dependency. Posix CPU timers need this in order to elapse
@@ -402,7 +432,7 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
tick_nohz_full_running = true;
}
-static int tick_nohz_cpu_down(unsigned int cpu)
+bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
{
/*
* The tick_do_timer_cpu CPU handles housekeeping duty (unbound
@@ -410,8 +440,13 @@ static int tick_nohz_cpu_down(unsigned int cpu)
* CPUs. It must remain online when nohz full is enabled.
*/
if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
- return -EBUSY;
- return 0;
+ return false;
+ return true;
+}
+
+static int tick_nohz_cpu_down(unsigned int cpu)
+{
+ return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
}
void __init tick_nohz_init(void)
@@ -636,7 +671,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
/* Forward the time to expire in the future */
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start_expires(&ts->sched_timer,
@@ -665,10 +700,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
basemono = last_jiffies_update;
basejiff = jiffies;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
ts->last_jiffies = basejiff;
ts->timer_expires_base = basemono;
@@ -1198,7 +1233,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
if (unlikely(ts->tick_stopped))
return;
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
}
@@ -1235,7 +1270,7 @@ static void tick_nohz_switch_to_nohz(void)
next = tick_init_jiffy_update();
hrtimer_set_expires(&ts->sched_timer, next);
- hrtimer_forward_now(&ts->sched_timer, tick_period);
+ hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
@@ -1301,7 +1336,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
if (unlikely(ts->tick_stopped))
return HRTIMER_NORESTART;
- hrtimer_forward(timer, now, tick_period);
+ hrtimer_forward(timer, now, TICK_NSEC);
return HRTIMER_RESTART;
}
@@ -1335,13 +1370,13 @@ void tick_setup_sched_timer(void)
/* Offset the tick to avert jiffies_lock contention. */
if (sched_skew_tick) {
- u64 offset = ktime_to_ns(tick_period) >> 1;
+ u64 offset = TICK_NSEC >> 1;
do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
hrtimer_add_expires_ns(&ts->sched_timer, offset);
}
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2bc278dd9854..105dd0b66329 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -2415,8 +2415,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
do_timer(ticks);
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
index 141ab3ab0354..099737f6f10c 100644
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
@@ -25,7 +25,8 @@ static inline void sched_clock_resume(void) { }
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
-extern seqlock_t jiffies_lock;
+extern raw_spinlock_t jiffies_lock;
+extern seqcount_t jiffies_seq;
#define CS_NAME_LEN 32
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 869eeba2005a..43f452b5deff 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1393,6 +1393,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head = cpu_buffer->pages;
struct buffer_page *bpage, *tmp;
+ irq_work_sync(&cpu_buffer->irq_work.work);
+
free_buffer_page(cpu_buffer->reader_page);
if (head) {
@@ -1498,6 +1500,8 @@ ring_buffer_free(struct ring_buffer *buffer)
cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
+ irq_work_sync(&buffer->irq_work.work);
+
for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 48054dbf1b51..bfb3eb8c9800 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -218,10 +218,6 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
return obj;
}
-/*
- * Allocate a new object. If the pool is empty, switch off the debugger.
- * Must be called with interrupts disabled.
- */
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
@@ -528,31 +524,74 @@ static void debug_object_is_on_stack(void *addr, int onstack)
WARN_ON(1);
}
+static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
+ struct debug_obj_descr *descr,
+ bool onstack, bool alloc_ifstatic)
+{
+ struct debug_obj *obj = lookup_object(addr, b);
+ enum debug_obj_state state = ODEBUG_STATE_NONE;
+
+ if (likely(obj))
+ return obj;
+
+ /*
+ * debug_object_init() unconditionally allocates untracked
+ * objects. It does not matter whether it is a static object or
+ * not.
+ *
+ * debug_object_assert_init() and debug_object_activate() allow
+ * allocation only if the descriptor callback confirms that the
+ * object is static and considered initialized. For non-static
+ * objects the allocation needs to be done from the fixup callback.
+ */
+ if (unlikely(alloc_ifstatic)) {
+ if (!descr->is_static_object || !descr->is_static_object(addr))
+ return ERR_PTR(-ENOENT);
+ /* Statically allocated objects are considered initialized */
+ state = ODEBUG_STATE_INIT;
+ }
+
+ obj = alloc_object(addr, b, descr);
+ if (likely(obj)) {
+ obj->state = state;
+ debug_object_is_on_stack(addr, onstack);
+ return obj;
+ }
+
+ /* Out of memory. Do the cleanup outside of the locked region */
+ debug_objects_enabled = 0;
+ return NULL;
+}
+
+static void debug_objects_fill_pool(void)
+{
+ /*
+ * On RT enabled kernels the pool refill must happen in preemptible
+ * context:
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+ fill_pool();
+}
+
static void
__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
{
enum debug_obj_state state;
- bool check_stack = false;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+ debug_objects_fill_pool();
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object(addr, db);
- if (!obj) {
- obj = alloc_object(addr, db, descr);
- if (!obj) {
- debug_objects_enabled = 0;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_objects_oom();
- return;
- }
- check_stack = true;
+ obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return;
}
switch (obj->state) {
@@ -578,8 +617,6 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (check_stack)
- debug_object_is_on_stack(addr, onstack);
}
/**
@@ -619,24 +656,24 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
*/
int debug_object_activate(void *addr, struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
int ret;
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
if (!debug_objects_enabled)
return 0;
+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object(addr, db);
- if (obj) {
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ if (likely(!IS_ERR_OR_NULL(obj))) {
bool print_object = false;
switch (obj->state) {
@@ -669,24 +706,16 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * We are here when a static object is activated. We
- * let the type specific code confirm whether this is
- * true or not. if true, we just make sure that the
- * static object is tracked in the object tracker. If
- * not, this must be a bug, so we try to fix it up.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* track this static object */
- debug_object_init(addr, descr);
- debug_object_activate(addr, descr);
- } else {
- debug_print_object(&o, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- return ret ? 0 : -EINVAL;
+ /* If NULL the allocation has hit OOM */
+ if (!obj) {
+ debug_objects_oom();
+ return 0;
}
- return 0;
+
+ /* Object is neither static nor tracked. It's not initialized */
+ debug_print_object(&o, "activate");
+ ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
+ return ret ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(debug_object_activate);
@@ -840,6 +869,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
*/
void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@@ -847,34 +877,25 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
if (!debug_objects_enabled)
return;
+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (likely(!IS_ERR_OR_NULL(obj)))
+ return;
- obj = lookup_object(addr, db);
+ /* If NULL the allocation has hit OOM */
if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * Maybe the object is static, and we let the type specific
- * code confirm. Track this static object if true, else invoke
- * fixup.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* Track this static object */
- debug_object_init(addr, descr);
- } else {
- debug_print_object(&o, "assert_init");
- debug_object_fixup(descr->fixup_assert_init, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- }
+ debug_objects_oom();
return;
}
- raw_spin_unlock_irqrestore(&db->lock, flags);
+ /* Object is neither tracked nor static. It's not initialized. */
+ debug_print_object(&o, "assert_init");
+ debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
}
EXPORT_SYMBOL_GPL(debug_object_assert_init);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c30a8bac5c13..a3fca320e35a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5816,7 +5816,21 @@ static void __build_all_zonelists(void *data)
int nid;
int __maybe_unused cpu;
pg_data_t *self = data;
+ unsigned long flags;
+ /*
+ * Explicitly disable this CPU's interrupts before taking seqlock
+ * to prevent any IRQ handler from calling into the page allocator
+ * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock.
+ */
+ local_irq_save(flags);
+ /*
+ * Explicitly disable this CPU's synchronous printk() before taking
+ * seqlock to prevent any printk() from trying to hold port->lock, for
+ * tty_insert_flip_string_and_push_buffer() on other CPU might be
+ * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
+ */
+ printk_deferred_enter();
write_seqlock(&zonelist_update_seq);
#ifdef CONFIG_NUMA
@@ -5851,6 +5865,8 @@ static void __build_all_zonelists(void *data)
}
write_sequnlock(&zonelist_update_seq);
+ printk_deferred_exit();
+ local_irq_restore(flags);
}
static noinline void __init
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 589615ec490b..b10f31f98cb8 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -366,7 +366,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- if (!net_eq(dev_net(dev), &init_net))
+ if (!net_eq(dev_net(dev), dev_net(real_dev)))
break;
/* fall through */
case SIOCGMIIPHY:
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index a5205dc95e8f..4f8f5204ae7a 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -989,7 +989,14 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
if (hci_sock_gen_cookie(sk)) {
struct sk_buff *skb;
- if (capable(CAP_NET_ADMIN))
+ /* Perform careful checks before setting the HCI_SOCK_TRUSTED
+ * flag. Make sure that not only the current task but also
+ * the socket opener has the required capability, since
+ * privileged programs can be tricked into making ioctl calls
+ * on HCI sockets, and the socket should not be marked as
+ * trusted simply because the ioctl caller is privileged.
+ */
+ if (sk_capable(sk, CAP_NET_ADMIN))
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
/* Send event to monitor */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0547aa2c8b13..291d7a9e0483 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4632,6 +4632,9 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
skb = alloc_skb(0, GFP_ATOMIC);
} else {
skb = skb_clone(orig_skb, GFP_ATOMIC);
+
+ if (skb_orphan_frags_rx(skb, GFP_ATOMIC))
+ return;
}
if (!skb)
return;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 418e93987800..08ccb501ff0c 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1559,9 +1559,19 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
cork->dst = NULL;
skb_dst_set(skb, &rt->dst);
- if (iph->protocol == IPPROTO_ICMP)
- icmp_out_count(net, ((struct icmphdr *)
- skb_transport_header(skb))->type);
+ if (iph->protocol == IPPROTO_ICMP) {
+ u8 icmp_type;
+
+ /* For such sockets, transhdrlen is zero when do ip_append_data(),
+ * so icmphdr does not in skb linear region and can not get icmp_type
+ * by icmp_hdr(skb)->type.
+ */
+ if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl)
+ icmp_type = fl4->fl4_icmp_type;
+ else
+ icmp_type = icmp_hdr(skb)->type;
+ icmp_out_count(net, icmp_type);
+ }
ip_cork_release(cork);
out:
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1179608955f5..8d704ea94693 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1054,12 +1054,13 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
static void ipip6_tunnel_bind_dev(struct net_device *dev)
{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
struct net_device *tdev = NULL;
- struct ip_tunnel *tunnel;
+ int hlen = LL_MAX_HEADER;
const struct iphdr *iph;
struct flowi4 fl4;
- tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
if (iph->daddr) {
@@ -1082,14 +1083,15 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
if (tdev && !netif_is_l3_master(tdev)) {
- int t_hlen = tunnel->hlen + sizeof(struct iphdr);
int mtu;
mtu = tdev->mtu - t_hlen;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
WRITE_ONCE(dev->mtu, mtu);
+ hlen = tdev->hard_header_len + tdev->needed_headroom;
}
+ dev->needed_headroom = t_hlen + hlen;
}
static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index b635c194f0a8..62fb1031763d 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -165,6 +165,7 @@ static int ncsi_aen_handler_cr(struct ncsi_dev_priv *ndp,
nc->state = NCSI_CHANNEL_INACTIVE;
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
+ nc->modes[NCSI_MODE_TX_ENABLE].enable = 0;
return ncsi_process_next_channel(ndp);
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 140c24f1b6c6..7794fa4c669d 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3909,12 +3909,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
}
}
+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ if (nft_set_is_anonymous(set))
+ nft_clear(ctx->net, set);
+
+ set->use++;
+}
+EXPORT_SYMBOL_GPL(nf_tables_activate_set);
+
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase)
{
switch (phase) {
case NFT_TRANS_PREPARE:
+ if (nft_set_is_anonymous(set))
+ nft_deactivate_next(ctx->net, set);
+
set->use--;
return;
case NFT_TRANS_ABORT:
@@ -6491,6 +6503,8 @@ static int nf_tables_validate(struct net *net)
if (nft_table_validate(net, table) < 0)
return -EAGAIN;
}
+
+ nft_validate_state_update(net, NFT_VALIDATE_SKIP);
break;
}
@@ -7172,11 +7186,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
return 0;
}
-static void nf_tables_cleanup(struct net *net)
-{
- nft_validate_state_update(net, NFT_VALIDATE_SKIP);
-}
-
static int nf_tables_abort(struct net *net, struct sk_buff *skb,
enum nfnl_abort_action action)
{
@@ -7208,7 +7217,6 @@ static const struct nfnetlink_subsystem nf_tables_subsys = {
.cb = nf_tables_cb,
.commit = nf_tables_commit,
.abort = nf_tables_abort,
- .cleanup = nf_tables_cleanup,
.valid_genid = nf_tables_valid_genid,
.owner = THIS_MODULE,
};
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 81c86a156c6c..e3f6e27a9a06 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -512,8 +512,6 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
goto replay_abort;
}
}
- if (ss->cleanup)
- ss->cleanup(net);
nfnl_err_deliver(&err_list, oskb);
kfree_skb(skb);
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 6bcc18124e5b..7f9e6c90f727 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -259,7 +259,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
{
struct nft_dynset *priv = nft_expr_priv(expr);
- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}
static void nft_dynset_destroy(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 660bad688e2b..4eb4d076927e 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -129,7 +129,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
{
struct nft_lookup *priv = nft_expr_priv(expr);
- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}
static void nft_lookup_destroy(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index bfd18d2b65a2..74c61278e6bd 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -180,7 +180,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
{
struct nft_objref_map *priv = nft_expr_priv(expr);
- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a232fcbd721c..00f040fb46b9 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1743,7 +1743,8 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- int len, val, err;
+ unsigned int flag;
+ int len, val;
if (level != SOL_NETLINK)
return -ENOPROTOOPT;
@@ -1755,39 +1756,17 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case NETLINK_PKTINFO:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_RECV_PKTINFO;
break;
case NETLINK_BROADCAST_ERROR:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_BROADCAST_SEND_ERROR;
break;
case NETLINK_NO_ENOBUFS:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_RECV_NO_ENOBUFS;
break;
case NETLINK_LIST_MEMBERSHIPS: {
- int pos, idx, shift;
+ int pos, idx, shift, err = 0;
- err = 0;
netlink_lock_table();
for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
if (len - pos < sizeof(u32))
@@ -1804,40 +1783,32 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
err = -EFAULT;
netlink_unlock_table();
- break;
+ return err;
}
case NETLINK_CAP_ACK:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_CAP_ACK;
break;
case NETLINK_EXT_ACK:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_EXT_ACK ? 1 : 0;
- if (put_user(len, optlen) || put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_EXT_ACK;
break;
case NETLINK_GET_STRICT_CHK:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0;
- if (put_user(len, optlen) || put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_STRICT_CHK;
break;
default:
- err = -ENOPROTOOPT;
+ return -ENOPROTOOPT;
}
- return err;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ val = nlk->flags & flag ? 1 : 0;
+
+ if (put_user(len, optlen) ||
+ copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
}
static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 450dc0334772..109a848aca15 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -266,7 +266,8 @@ static void packet_cached_dev_reset(struct packet_sock *po)
static bool packet_use_direct_xmit(const struct packet_sock *po)
{
- return po->xmit == packet_direct_xmit;
+ /* Paired with WRITE_ONCE() in packet_setsockopt() */
+ return READ_ONCE(po->xmit) == packet_direct_xmit;
}
static u16 packet_pick_tx_queue(struct sk_buff *skb)
@@ -1974,7 +1975,7 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
goto retry;
}
- if (!dev_validate_header(dev, skb->data, len)) {
+ if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
err = -EINVAL;
goto out_unlock;
}
@@ -2124,7 +2125,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
sll = &PACKET_SKB_CB(skb)->sa.ll;
sll->sll_hatype = dev->type;
sll->sll_pkttype = skb->pkt_type;
- if (unlikely(po->origdev))
+ if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
@@ -2392,7 +2393,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
- if (unlikely(po->origdev))
+ if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
@@ -2799,7 +2800,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
packet_inc_pending(&po->tx_ring);
status = TP_STATUS_SEND_REQUEST;
- err = po->xmit(skb);
+ /* Paired with WRITE_ONCE() in packet_setsockopt() */
+ err = READ_ONCE(po->xmit)(skb);
if (unlikely(err != 0)) {
if (err > 0)
err = net_xmit_errno(err);
@@ -3002,7 +3004,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
virtio_net_hdr_set_proto(skb, &vnet_hdr);
}
- err = po->xmit(skb);
+ /* Paired with WRITE_ONCE() in packet_setsockopt() */
+ err = READ_ONCE(po->xmit)(skb);
if (unlikely(err != 0)) {
if (err > 0)
err = net_xmit_errno(err);
@@ -3453,7 +3456,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}
- if (pkt_sk(sk)->auxdata) {
+ if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
struct tpacket_auxdata aux;
aux.tp_status = TP_STATUS_USER;
@@ -3836,9 +3839,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- lock_sock(sk);
- po->auxdata = !!val;
- release_sock(sk);
+ packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
return 0;
}
case PACKET_ORIGDEV:
@@ -3850,9 +3851,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- lock_sock(sk);
- po->origdev = !!val;
- release_sock(sk);
+ packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
return 0;
}
case PACKET_VNET_HDR:
@@ -3949,7 +3948,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
+ /* Paired with all lockless reads of po->xmit */
+ WRITE_ONCE(po->xmit, val ? packet_direct_xmit : dev_queue_xmit);
return 0;
}
default:
@@ -4000,10 +4000,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
break;
case PACKET_AUXDATA:
- val = po->auxdata;
+ val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
break;
case PACKET_ORIGDEV:
- val = po->origdev;
+ val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
break;
case PACKET_VNET_HDR:
val = po->has_vnet_hdr;
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 07812ae5ca07..d704c7bf51b2 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -23,9 +23,9 @@ static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
pinfo.pdi_flags = 0;
if (po->running)
pinfo.pdi_flags |= PDI_RUNNING;
- if (po->auxdata)
+ if (packet_sock_flag(po, PACKET_SOCK_AUXDATA))
pinfo.pdi_flags |= PDI_AUXDATA;
- if (po->origdev)
+ if (packet_sock_flag(po, PACKET_SOCK_ORIGDEV))
pinfo.pdi_flags |= PDI_ORIGDEV;
if (po->has_vnet_hdr)
pinfo.pdi_flags |= PDI_VNETHDR;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 907f4cd2a718..2b2b85dadf8e 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -115,10 +115,9 @@ struct packet_sock {
int copy_thresh;
spinlock_t bind_lock;
struct mutex pg_vec_lock;
+ unsigned long flags;
unsigned int running; /* bind_lock must be held */
- unsigned int auxdata:1, /* writer must hold sock lock */
- origdev:1,
- has_vnet_hdr:1,
+ unsigned int has_vnet_hdr:1, /* writer must hold sock lock */
tp_loss:1,
tp_tx_has_off:1;
int pressure;
@@ -143,4 +142,25 @@ static struct packet_sock *pkt_sk(struct sock *sk)
return (struct packet_sock *)sk;
}
+enum packet_sock_flags {
+ PACKET_SOCK_ORIGDEV,
+ PACKET_SOCK_AUXDATA,
+};
+
+static inline void packet_sock_flag_set(struct packet_sock *po,
+ enum packet_sock_flags flag,
+ bool val)
+{
+ if (val)
+ set_bit(flag, &po->flags);
+ else
+ clear_bit(flag, &po->flags);
+}
+
+static inline bool packet_sock_flag(const struct packet_sock *po,
+ enum packet_sock_flags flag)
+{
+ return test_bit(flag, &po->flags);
+}
+
#endif
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 1cb90d32d553..3439d14168e8 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -738,7 +738,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
/* Fall through */
case 1:
if (p.call.timeouts.hard > 0) {
- j = msecs_to_jiffies(p.call.timeouts.hard);
+ j = p.call.timeouts.hard * HZ;
now = jiffies;
j += now;
WRITE_ONCE(call->expect_term_by, j);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index b87d2a1ee0b1..e3f28cb03f7e 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -244,7 +244,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
goto out;
}
- if (unlikely(!(dev->flags & IFF_UP))) {
+ if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
dev->name);
goto out;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 77a1988d5ddc..49777da9f263 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1500,6 +1500,7 @@ static int tcf_block_bind(struct tcf_block *block,
err_unroll:
list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
+ list_del(&block_cb->driver_list);
if (i-- > 0) {
list_del(&block_cb->list);
tcf_block_playback_offloads(block, block_cb->cb,
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 012b0504264d..629c05ff1f3e 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2003,9 +2003,6 @@ call_bind_status(struct rpc_task *task)
status = -EOPNOTSUPP;
break;
}
- if (task->tk_rebind_retry == 0)
- break;
- task->tk_rebind_retry--;
rpc_delay(task, 3*HZ);
goto retry_timeout;
case -ENOBUFS:
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 32ffa801a5b9..a5c6a3d05741 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -827,7 +827,6 @@ rpc_init_task_statistics(struct rpc_task *task)
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
- task->tk_rebind_retry = 2;
/* starting timestamp */
task->tk_start = ktime_get();
diff --git a/scripts/gdb/linux/clk.py b/scripts/gdb/linux/clk.py
index 061aecfa294e..7a01fdc3e844 100644
--- a/scripts/gdb/linux/clk.py
+++ b/scripts/gdb/linux/clk.py
@@ -41,6 +41,8 @@ are cached and potentially out of date"""
self.show_subtree(child, level + 1)
def invoke(self, arg, from_tty):
+ if utils.gdb_eval_or_none("clk_root_list") is None:
+ raise gdb.GdbError("No clocks registered")
gdb.write(" enable prepare protect \n")
gdb.write(" clock count count count rate \n")
gdb.write("------------------------------------------------------------------------\n")
diff --git a/scripts/gdb/linux/genpd.py b/scripts/gdb/linux/genpd.py
index 6ca93bd2949e..b53649c0a77a 100644
--- a/scripts/gdb/linux/genpd.py
+++ b/scripts/gdb/linux/genpd.py
@@ -5,7 +5,7 @@
import gdb
import sys
-from linux.utils import CachedType
+from linux.utils import CachedType, gdb_eval_or_none
from linux.lists import list_for_each_entry
generic_pm_domain_type = CachedType('struct generic_pm_domain')
@@ -49,17 +49,17 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
else:
status_string = 'off-{}'.format(genpd['state_idx'])
- slave_names = []
+ child_names = []
for link in list_for_each_entry(
- genpd['master_links'],
+ genpd['parent_links'],
device_link_type.get_type().pointer(),
- 'master_node'):
- slave_names.apend(link['slave']['name'])
+ 'parent_node'):
+ child_names.append(link['child']['name'])
gdb.write('%-30s %-15s %s\n' % (
genpd['name'].string(),
status_string,
- ', '.join(slave_names)))
+ ', '.join(child_names)))
# Print devices in domain
for pm_data in list_for_each_entry(genpd['dev_list'],
@@ -70,7 +70,9 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev)))
def invoke(self, arg, from_tty):
- gdb.write('domain status slaves\n');
+ if gdb_eval_or_none("&gpd_list") is None:
+ raise gdb.GdbError("No power domain(s) registered")
+ gdb.write('domain status children\n');
gdb.write(' /device runtime status\n');
gdb.write('----------------------------------------------------------------------\n');
for genpd in list_for_each_entry(
diff --git a/scripts/gdb/linux/timerlist.py b/scripts/gdb/linux/timerlist.py
index 071d0dd5a634..51def847f1ef 100644
--- a/scripts/gdb/linux/timerlist.py
+++ b/scripts/gdb/linux/timerlist.py
@@ -73,7 +73,7 @@ def print_cpu(hrtimer_bases, cpu, max_clock_bases):
ts = cpus.per_cpu(tick_sched_ptr, cpu)
text = "cpu: {}\n".format(cpu)
- for i in xrange(max_clock_bases):
+ for i in range(max_clock_bases):
text += " clock {}:\n".format(i)
text += print_base(cpu_base['clock_base'][i])
@@ -158,6 +158,8 @@ def pr_cpumask(mask):
num_bytes = (nr_cpu_ids + 7) / 8
buf = utils.read_memoryview(inf, bits, num_bytes).tobytes()
buf = binascii.b2a_hex(buf)
+ if type(buf) is not str:
+ buf=buf.decode()
chunks = []
i = num_bytes
diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
index ea94221dbd39..8c33be7985b6 100644
--- a/scripts/gdb/linux/utils.py
+++ b/scripts/gdb/linux/utils.py
@@ -89,7 +89,10 @@ def get_target_endianness():
def read_memoryview(inf, start, length):
- return memoryview(inf.read_memory(start, length))
+ m = inf.read_memory(start, length)
+ if type(m) is memoryview:
+ return m
+ return memoryview(m)
def read_u16(buffer, offset):
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index ccf950409384..c371634f35a3 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -19,8 +19,8 @@ ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
- cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
+ cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h
targets += flask.h av_permissions.h
-$(obj)/flask.h: $(src)/include/classmap.h FORCE
+$(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/genheaders/genheaders FORCE
$(call if_changed,flask)
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index b781b28de012..efeffa0bf2d7 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -806,15 +806,14 @@ static int es8316_i2c_probe(struct i2c_client *i2c_client,
es8316->irq = i2c_client->irq;
mutex_init(&es8316->lock);
- ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "es8316", es8316);
- if (ret == 0) {
- /* Gets re-enabled by es8316_set_jack() */
- disable_irq(es8316->irq);
- } else {
- dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
- es8316->irq = -ENXIO;
+ if (es8316->irq > 0) {
+ ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ "es8316", es8316);
+ if (ret) {
+ dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
+ es8316->irq = -ENXIO;
+ }
}
return devm_snd_soc_register_component(&i2c_client->dev,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 6a8edb0a559d..df3b370fe729 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -391,6 +391,18 @@ static int byt_rt5640_aif1_hw_params(struct snd_pcm_substream *substream,
/* Please keep this list alphabetically sorted */
static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ { /* Acer Iconia One 7 B1-750 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
+ },
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD1_IN4P |
+ BYT_RT5640_OVCD_TH_1500UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Acer Iconia Tab 8 W1-810 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
index 533eb69fe4e6..cd5a8584dbe3 100644
--- a/sound/usb/caiaq/input.c
+++ b/sound/usb/caiaq/input.c
@@ -804,6 +804,7 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
default:
/* no input methods supported on this device */
+ ret = -EINVAL;
goto exit_free_idev;
}
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index 86501cd3c763..f2be9ef2c2d5 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -80,9 +80,6 @@ static void jsonw_puts(json_writer_t *self, const char *str)
case '"':
fputs("\\\"", self->out);
break;
- case '\'':
- fputs("\\\'", self->out);
- break;
default:
putc(*str, self->out);
}
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 5b91ee65a080..762ca450d198 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -363,8 +363,15 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
struct bpf_insn *insn_start = buf_start;
struct bpf_insn *insn_end = buf_end;
struct bpf_insn *cur = insn_start;
+ bool double_insn = false;
for (; cur <= insn_end; cur++) {
+ if (double_insn) {
+ double_insn = false;
+ continue;
+ }
+ double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
+
printf("% 4d: ", (int)(cur - insn_start + start_idx));
print_bpf_insn(&cbs, cur, true);
if (cur != insn_end)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 5cacc4f84c8d..0826893098ce 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -666,7 +666,7 @@ static void create_tasks(struct perf_sched *sched)
err = pthread_attr_init(&attr);
BUG_ON(err);
err = pthread_attr_setstacksize(&attr,
- (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
+ (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
BUG_ON(err);
err = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(err);
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/other.json b/tools/perf/pmu-events/arch/powerpc/power9/other.json
index 62b864269623..ce75652e9051 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/other.json
@@ -1417,7 +1417,7 @@
{,
"EventCode": "0x45054",
"EventName": "PM_FMA_CMPL",
- "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only. "
+ "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only."
},
{,
"EventCode": "0x201E8",
@@ -2017,7 +2017,7 @@
{,
"EventCode": "0xC0BC",
"EventName": "PM_LSU_FLUSH_OTHER",
- "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the “bad dval” back and flush all younger ops)"
+ "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the 'bad dval' back and flush all younger ops)"
},
{,
"EventCode": "0x5094",
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
index b4772f54a271..e2f2ed0a3549 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
@@ -442,7 +442,7 @@
{,
"EventCode": "0x4D052",
"EventName": "PM_2FLOP_CMPL",
- "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg "
+ "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg"
},
{,
"EventCode": "0x1F142",
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index df4c575a0d94..3d5cd16ca4de 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1825,6 +1825,7 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
char type, u64 start)
{
struct sym_args *args = arg;
+ u64 size;
if (!kallsyms__is_function(type))
return 0;
@@ -1834,7 +1835,9 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
args->start = start;
}
/* Don't know exactly where the kernel ends, so we add a page */
- args->size = round_up(start, page_size) + page_size - args->start;
+ size = round_up(start, page_size) + page_size - args->start;
+ if (size > args->size)
+ args->size = size;
return 0;
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index eab7e8ef6789..52474e44fb28 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1592,6 +1592,8 @@ static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
decoder->cbr = cbr;
decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
+ decoder->cyc_ref_timestamp = decoder->timestamp;
+ decoder->cycle_cnt = 0;
intel_pt_mtc_cyc_cnt_cbr(decoder);
}
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 4027906fd3e3..baf73ca66a2b 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -830,8 +830,7 @@ static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
static int64_t
sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
- struct addr_map_symbol *from_l = &left->branch_info->from;
- struct addr_map_symbol *from_r = &right->branch_info->from;
+ struct addr_map_symbol *from_l, *from_r;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 4fef8d6bc225..73f890664be5 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -546,7 +546,7 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
size_t sz = min(size, descsz);
memcpy(bf, ptr, sz);
memset(bf + sz, 0, size - sz);
- err = descsz;
+ err = sz;
break;
}
}
Powered by blists - more mailing lists