[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <2025101549-margarine-urethane-1e03@gregkh>
Date: Wed, 15 Oct 2025 12:25:49 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org,
akpm@...ux-foundation.org,
torvalds@...ux-foundation.org,
stable@...r.kernel.org
Cc: lwn@....net,
jslaby@...e.cz,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Subject: Re: Linux 6.17.3
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 9ec8947dfcad..ed7fec614473 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -86,6 +86,8 @@ patternProperties:
description: Allegro DVT
"^allegromicro,.*":
description: Allegro MicroSystems, Inc.
+ "^alliedtelesis,.*":
+ description: Allied Telesis, Inc.
"^alliedvision,.*":
description: Allied Vision Technologies GmbH
"^allo,.*":
@@ -229,6 +231,8 @@ patternProperties:
description: Bitmain Technologies
"^blaize,.*":
description: Blaize, Inc.
+ "^bluegiga,.*":
+ description: Bluegiga Technologies Ltd.
"^blutek,.*":
description: BluTek Power
"^boe,.*":
@@ -247,6 +251,8 @@ patternProperties:
description: Bticino International
"^buffalo,.*":
description: Buffalo, Inc.
+ "^buglabs,.*":
+ description: Bug Labs, Inc.
"^bur,.*":
description: B&R Industrial Automation GmbH
"^bytedance,.*":
@@ -325,6 +331,8 @@ patternProperties:
description: Conexant Systems, Inc.
"^colorfly,.*":
description: Colorful GRP, Shenzhen Xueyushi Technology Ltd.
+ "^compal,.*":
+ description: Compal Electronics, Inc.
"^compulab,.*":
description: CompuLab Ltd.
"^comvetia,.*":
@@ -353,6 +361,8 @@ patternProperties:
description: Guangzhou China Star Optoelectronics Technology Co., Ltd
"^csq,.*":
description: Shenzen Chuangsiqi Technology Co.,Ltd.
+ "^csr,.*":
+ description: Cambridge Silicon Radio
"^ctera,.*":
description: CTERA Networks Intl.
"^ctu,.*":
@@ -455,6 +465,8 @@ patternProperties:
description: Emtop Embedded Solutions
"^eeti,.*":
description: eGalax_eMPIA Technology Inc
+ "^egnite,.*":
+ description: egnite GmbH
"^einfochips,.*":
description: Einfochips
"^eink,.*":
@@ -485,8 +497,12 @@ patternProperties:
description: Empire Electronix
"^emtrion,.*":
description: emtrion GmbH
+ "^enbw,.*":
+ description: Energie Baden-Württemberg AG
"^enclustra,.*":
description: Enclustra GmbH
+ "^endian,.*":
+ description: Endian SRL
"^endless,.*":
description: Endless Mobile, Inc.
"^ene,.*":
@@ -554,6 +570,8 @@ patternProperties:
description: FocalTech Systems Co.,Ltd
"^forlinx,.*":
description: Baoding Forlinx Embedded Technology Co., Ltd.
+ "^foxlink,.*":
+ description: Foxlink Group
"^freebox,.*":
description: Freebox SAS
"^freecom,.*":
@@ -642,6 +660,10 @@ patternProperties:
description: Haoyu Microelectronic Co. Ltd.
"^hardkernel,.*":
description: Hardkernel Co., Ltd
+ "^hce,.*":
+ description: HCE Engineering SRL
+ "^headacoustics,.*":
+ description: HEAD acoustics
"^hechuang,.*":
description: Shenzhen Hechuang Intelligent Co.
"^hideep,.*":
@@ -725,6 +747,8 @@ patternProperties:
description: Shenzhen INANBO Electronic Technology Co., Ltd.
"^incircuit,.*":
description: In-Circuit GmbH
+ "^incostartec,.*":
+ description: INCOstartec GmbH
"^indiedroid,.*":
description: Indiedroid
"^inet-tek,.*":
@@ -933,6 +957,8 @@ patternProperties:
description: Maxim Integrated Products
"^maxlinear,.*":
description: MaxLinear Inc.
+ "^maxtor,.*":
+ description: Maxtor Corporation
"^mbvl,.*":
description: Mobiveil Inc.
"^mcube,.*":
@@ -1096,6 +1122,8 @@ patternProperties:
description: Nordic Semiconductor
"^nothing,.*":
description: Nothing Technology Limited
+ "^novatech,.*":
+ description: NovaTech Automation
"^novatek,.*":
description: Novatek
"^novtech,.*":
@@ -1191,6 +1219,8 @@ patternProperties:
description: Pervasive Displays, Inc.
"^phicomm,.*":
description: PHICOMM Co., Ltd.
+ "^phontech,.*":
+ description: Phontech
"^phytec,.*":
description: PHYTEC Messtechnik GmbH
"^picochip,.*":
@@ -1275,6 +1305,8 @@ patternProperties:
description: Ramtron International
"^raspberrypi,.*":
description: Raspberry Pi Foundation
+ "^raumfeld,.*":
+ description: Raumfeld GmbH
"^raydium,.*":
description: Raydium Semiconductor Corp.
"^rda,.*":
@@ -1313,6 +1345,8 @@ patternProperties:
description: ROHM Semiconductor Co., Ltd
"^ronbo,.*":
description: Ronbo Electronics
+ "^ronetix,.*":
+ description: Ronetix GmbH
"^roofull,.*":
description: Shenzhen Roofull Technology Co, Ltd
"^roseapplepi,.*":
@@ -1339,8 +1373,12 @@ patternProperties:
description: Schindler
"^schneider,.*":
description: Schneider Electric
+ "^schulercontrol,.*":
+ description: Schuler Group
"^sciosense,.*":
description: ScioSense B.V.
+ "^sdmc,.*":
+ description: SDMC Technology Co., Ltd
"^seagate,.*":
description: Seagate Technology PLC
"^seeed,.*":
@@ -1379,6 +1417,8 @@ patternProperties:
description: Si-En Technology Ltd.
"^si-linux,.*":
description: Silicon Linux Corporation
+ "^sielaff,.*":
+ description: Sielaff GmbH & Co.
"^siemens,.*":
description: Siemens AG
"^sifive,.*":
@@ -1447,6 +1487,8 @@ patternProperties:
description: SolidRun
"^solomon,.*":
description: Solomon Systech Limited
+ "^somfy,.*":
+ description: Somfy Systems Inc.
"^sony,.*":
description: Sony Corporation
"^sophgo,.*":
@@ -1517,6 +1559,8 @@ patternProperties:
"^synopsys,.*":
description: Synopsys, Inc. (deprecated, use snps)
deprecated: true
+ "^taos,.*":
+ description: Texas Advanced Optoelectronic Solutions Inc.
"^tbs,.*":
description: TBS Technologies
"^tbs-biometrics,.*":
@@ -1547,6 +1591,8 @@ patternProperties:
description: Teltonika Networks
"^tempo,.*":
description: Tempo Semiconductor
+ "^tenda,.*":
+ description: Shenzhen Tenda Technology Co., Ltd.
"^terasic,.*":
description: Terasic Inc.
"^tesla,.*":
@@ -1650,6 +1696,8 @@ patternProperties:
description: V3 Semiconductor
"^vaisala,.*":
description: Vaisala
+ "^valve,.*":
+ description: Valve Corporation
"^vamrs,.*":
description: Vamrs Ltd.
"^variscite,.*":
@@ -1750,6 +1798,8 @@ patternProperties:
description: Extreme Engineering Solutions (X-ES)
"^xiaomi,.*":
description: Xiaomi Technology Co., Ltd.
+ "^xicor,.*":
+ description: Xicor Inc.
"^xillybus,.*":
description: Xillybus Ltd.
"^xingbangda,.*":
diff --git a/Documentation/iio/ad3552r.rst b/Documentation/iio/ad3552r.rst
index f5d59e4e86c7..4274e35f503d 100644
--- a/Documentation/iio/ad3552r.rst
+++ b/Documentation/iio/ad3552r.rst
@@ -64,7 +64,8 @@ specific debugfs path ``/sys/kernel/debug/iio/iio:deviceX``.
Usage examples
--------------
-. code-block:: bash
+.. code-block:: bash
+
root:/sys/bus/iio/devices/iio:device0# cat data_source
normal
root:/sys/bus/iio/devices/iio:device0# echo -n ramp-16bit > data_source
diff --git a/Documentation/trace/histogram-design.rst b/Documentation/trace/histogram-design.rst
index 5765eb3e9efa..a30f4bed11b4 100644
--- a/Documentation/trace/histogram-design.rst
+++ b/Documentation/trace/histogram-design.rst
@@ -380,7 +380,9 @@ entry, ts0, corresponding to the ts0 variable in the sched_waking
trigger above.
sched_waking histogram
-----------------------::
+----------------------
+
+.. code-block::
+------------------+
| hist_data |<-------------------------------------------------------+
diff --git a/Makefile b/Makefile
index a04d0223dc84..22ee632f9104 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 17
-SUBLEVEL = 2
+SUBLEVEL = 3
EXTRAVERSION =
NAME = Baby Opossum Posse
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 582d96548385..06522451f018 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -231,7 +231,7 @@ flush_thread(void)
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
extern void ret_from_fork(void);
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 186ceab661eb..8166d0908713 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -166,7 +166,7 @@ asmlinkage void ret_from_fork(void);
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *c_regs; /* child's pt_regs */
diff --git a/arch/arm/boot/dts/renesas/r8a7791-porter.dts b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
index f518eadd8b9c..81b3c5d74e9b 100644
--- a/arch/arm/boot/dts/renesas/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
@@ -289,7 +289,7 @@ vin0_pins: vin0 {
};
can0_pins: can0 {
- groups = "can0_data";
+ groups = "can0_data_b";
function = "can0";
};
diff --git a/arch/arm/boot/dts/st/stm32mp151c-plyaqm.dts b/arch/arm/boot/dts/st/stm32mp151c-plyaqm.dts
index 39a3211c6133..55fe916740d7 100644
--- a/arch/arm/boot/dts/st/stm32mp151c-plyaqm.dts
+++ b/arch/arm/boot/dts/st/stm32mp151c-plyaqm.dts
@@ -239,7 +239,7 @@ &i2s1 {
i2s1_port: port {
i2s1_endpoint: endpoint {
- format = "i2s";
+ dai-format = "i2s";
mclk-fs = <256>;
remote-endpoint = <&codec_endpoint>;
};
diff --git a/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi b/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi
index ae2e8dffbe04..ea47f9960c35 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/ti/omap/am335x-baltos.dtsi
@@ -269,7 +269,7 @@ &tps {
vcc7-supply = <&vbat>;
vccio-supply = <&vbat>;
- ti,en-ck32k-xtal = <1>;
+ ti,en-ck32k-xtal;
regulators {
vrtc_reg: regulator@0 {
diff --git a/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts b/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts
index 06767ea164b5..ece7f7854f6a 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts
+++ b/arch/arm/boot/dts/ti/omap/am335x-cm-t335.dts
@@ -483,8 +483,6 @@ &mcasp1 {
op-mode = <0>; /* MCASP_IIS_MODE */
tdm-slots = <2>;
- /* 16 serializers */
- num-serializer = <16>;
serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
0 0 2 1 0 0 0 0 0 0 0 0 0 0 0 0
>;
diff --git a/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi b/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi
index a7f99ae0c1fe..78c657429f64 100644
--- a/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap3-devkit8000-lcd-common.dtsi
@@ -65,7 +65,7 @@ ads7846@0 {
ti,debounce-max = /bits/ 16 <10>;
ti,debounce-tol = /bits/ 16 <5>;
ti,debounce-rep = /bits/ 16 <1>;
- ti,keep-vref-on = <1>;
+ ti,keep-vref-on;
ti,settle-delay-usec = /bits/ 16 <150>;
wakeup-source;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index e16ed102960c..d7aa95225c70 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -234,7 +234,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long stack_start = args->stack;
unsigned long tls = args->tls;
struct thread_info *thread = task_thread_info(p);
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index e23b86834096..7e6c94f8edee 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -904,7 +904,7 @@ e_done:
/**
* at91_mckx_ps_restore: restore MCKx settings
*
- * Side effects: overwrites tmp1, tmp2
+ * Side effects: overwrites tmp1, tmp2 and tmp3
*/
.macro at91_mckx_ps_restore
#ifdef CONFIG_SOC_SAMA7
@@ -980,7 +980,7 @@ r_ps:
bic tmp3, tmp3, #AT91_PMC_MCR_V2_ID_MSK
orr tmp3, tmp3, tmp1
orr tmp3, tmp3, #AT91_PMC_MCR_V2_CMD
- str tmp2, [pmc, #AT91_PMC_MCR_V2]
+ str tmp3, [pmc, #AT91_PMC_MCR_V2]
wait_mckrdy tmp1
diff --git a/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts b/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts
index 553ad774ed13..514c221a7a86 100644
--- a/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts
+++ b/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts
@@ -6,6 +6,7 @@
#include "sun55i-a523.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
/ {
model = "Radxa Cubie A5E";
@@ -20,11 +21,22 @@ chosen {
stdout-path = "serial0:115200n8";
};
- ext_osc32k: ext-osc32k-clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- clock-output-names = "ext_osc32k";
+ leds {
+ compatible = "gpio-leds";
+
+ power-led {
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&r_pio 0 4 GPIO_ACTIVE_LOW>; /* PL4 */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ use-led {
+ function = LED_FUNCTION_ACTIVITY;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL5 */
+ };
};
reg_vcc5v: vcc5v {
@@ -75,6 +87,9 @@ &mdio0 {
ext_rgmii_phy: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <1>;
+ reset-gpios = <&pio 7 8 GPIO_ACTIVE_LOW>; /* PH8 */
+ reset-assert-us = <10000>;
+ reset-deassert-us = <150000>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun55i-t527-avaota-a1.dts b/arch/arm64/boot/dts/allwinner/sun55i-t527-avaota-a1.dts
index b9eeb6753e9e..4e71055fbd15 100644
--- a/arch/arm64/boot/dts/allwinner/sun55i-t527-avaota-a1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun55i-t527-avaota-a1.dts
@@ -85,6 +85,9 @@ &mdio0 {
ext_rgmii_phy: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <1>;
+ reset-gpios = <&pio 7 8 GPIO_ACTIVE_LOW>; /* PH8 */
+ reset-assert-us = <10000>;
+ reset-deassert-us = <150000>;
};
};
@@ -306,6 +309,14 @@ &r_pio {
vcc-pm-supply = <®_aldo3>;
};
+&rtc {
+ clocks = <&r_ccu CLK_BUS_R_RTC>, <&osc24M>,
+ <&r_ccu CLK_R_AHB>, <&ext_osc32k>;
+ clock-names = "bus", "hosc", "ahb", "ext-osc32k";
+ assigned-clocks = <&rtc CLK_OSC32K>;
+ assigned-clock-rates = <32768>;
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pb_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts b/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts
index d07bb9193b43..b5483bd7b8d5 100644
--- a/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts
+++ b/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts
@@ -346,6 +346,14 @@ &r_pio {
vcc-pm-supply = <®_bldo2>;
};
+&rtc {
+ clocks = <&r_ccu CLK_BUS_R_RTC>, <&osc24M>,
+ <&r_ccu CLK_R_AHB>, <&ext_osc32k>;
+ clock-names = "bus", "hosc", "ahb", "ext-osc32k";
+ assigned-clocks = <&rtc CLK_OSC32K>;
+ assigned-clock-rates = <32768>;
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pb_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
index cb9ea3ca6ee0..71b2b3b547f7 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi
@@ -792,7 +792,7 @@ spicc1: spi@...00 {
pwm_mn: pwm@...00 {
compatible = "amlogic,c3-pwm",
"amlogic,meson-s4-pwm";
- reg = <0x0 54000 0x0 0x24>;
+ reg = <0x0 0x54000 0x0 0x24>;
clocks = <&clkc_periphs CLKID_PWM_M>,
<&clkc_periphs CLKID_PWM_N>;
#pwm-cells = <3>;
diff --git a/arch/arm64/boot/dts/apple/t6000-j314s.dts b/arch/arm64/boot/dts/apple/t6000-j314s.dts
index c9e192848fe3..1430b91ff1b1 100644
--- a/arch/arm64/boot/dts/apple/t6000-j314s.dts
+++ b/arch/arm64/boot/dts/apple/t6000-j314s.dts
@@ -16,3 +16,11 @@ / {
compatible = "apple,j314s", "apple,t6000", "apple,arm-platform";
model = "Apple MacBook Pro (14-inch, M1 Pro, 2021)";
};
+
+&wifi0 {
+ brcm,board-type = "apple,maldives";
+};
+
+&bluetooth0 {
+ brcm,board-type = "apple,maldives";
+};
diff --git a/arch/arm64/boot/dts/apple/t6000-j316s.dts b/arch/arm64/boot/dts/apple/t6000-j316s.dts
index ff1803ce2300..da0cbe7d9673 100644
--- a/arch/arm64/boot/dts/apple/t6000-j316s.dts
+++ b/arch/arm64/boot/dts/apple/t6000-j316s.dts
@@ -16,3 +16,11 @@ / {
compatible = "apple,j316s", "apple,t6000", "apple,arm-platform";
model = "Apple MacBook Pro (16-inch, M1 Pro, 2021)";
};
+
+&wifi0 {
+ brcm,board-type = "apple,madagascar";
+};
+
+&bluetooth0 {
+ brcm,board-type = "apple,madagascar";
+};
diff --git a/arch/arm64/boot/dts/apple/t6001-j314c.dts b/arch/arm64/boot/dts/apple/t6001-j314c.dts
index 1761d15b98c1..c37097dcfdb3 100644
--- a/arch/arm64/boot/dts/apple/t6001-j314c.dts
+++ b/arch/arm64/boot/dts/apple/t6001-j314c.dts
@@ -16,3 +16,11 @@ / {
compatible = "apple,j314c", "apple,t6001", "apple,arm-platform";
model = "Apple MacBook Pro (14-inch, M1 Max, 2021)";
};
+
+&wifi0 {
+ brcm,board-type = "apple,maldives";
+};
+
+&bluetooth0 {
+ brcm,board-type = "apple,maldives";
+};
diff --git a/arch/arm64/boot/dts/apple/t6001-j316c.dts b/arch/arm64/boot/dts/apple/t6001-j316c.dts
index 750e9beeffc0..3bc6e0c3294c 100644
--- a/arch/arm64/boot/dts/apple/t6001-j316c.dts
+++ b/arch/arm64/boot/dts/apple/t6001-j316c.dts
@@ -16,3 +16,11 @@ / {
compatible = "apple,j316c", "apple,t6001", "apple,arm-platform";
model = "Apple MacBook Pro (16-inch, M1 Max, 2021)";
};
+
+&wifi0 {
+ brcm,board-type = "apple,madagascar";
+};
+
+&bluetooth0 {
+ brcm,board-type = "apple,madagascar";
+};
diff --git a/arch/arm64/boot/dts/apple/t6001-j375c.dts b/arch/arm64/boot/dts/apple/t6001-j375c.dts
index 62ea437b58b2..2e7c23714d4d 100644
--- a/arch/arm64/boot/dts/apple/t6001-j375c.dts
+++ b/arch/arm64/boot/dts/apple/t6001-j375c.dts
@@ -16,3 +16,11 @@ / {
compatible = "apple,j375c", "apple,t6001", "apple,arm-platform";
model = "Apple Mac Studio (M1 Max, 2022)";
};
+
+&wifi0 {
+ brcm,board-type = "apple,okinawa";
+};
+
+&bluetooth0 {
+ brcm,board-type = "apple,okinawa";
+};
diff --git a/arch/arm64/boot/dts/apple/t6002-j375d.dts b/arch/arm64/boot/dts/apple/t6002-j375d.dts
index 3365429bdc8b..2b7f80119618 100644
--- a/arch/arm64/boot/dts/apple/t6002-j375d.dts
+++ b/arch/arm64/boot/dts/apple/t6002-j375d.dts
@@ -38,6 +38,14 @@ hpm5: usb-pd@3a {
};
};
+&wifi0 {
+ brcm,board-type = "apple,okinawa";
+};
+
+&bluetooth0 {
+ brcm,board-type = "apple,okinawa";
+};
+
/* delete unused always-on power-domains on die 1 */
/delete-node/ &ps_atc2_usb_aon_die1;
diff --git a/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi b/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi
index 22ebc78e120b..c0aac59a6fae 100644
--- a/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi
@@ -13,6 +13,7 @@
/ {
aliases {
+ bluetooth0 = &bluetooth0;
serial0 = &serial0;
wifi0 = &wifi0;
};
@@ -99,9 +100,18 @@ &port00 {
/* WLAN */
bus-range = <1 1>;
wifi0: wifi@0,0 {
+ compatible = "pci14e4,4433";
reg = <0x10000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */
local-mac-address = [00 10 18 00 00 10];
+ apple,antenna-sku = "XX";
+ };
+
+ bluetooth0: bluetooth@0,1 {
+ compatible = "pci14e4,5f71";
+ reg = <0x10100 0x0 0x0 0x0 0x0>;
+ /* To be filled by the loader */
+ local-bd-address = [00 00 00 00 00 00];
};
};
diff --git a/arch/arm64/boot/dts/apple/t600x-j375.dtsi b/arch/arm64/boot/dts/apple/t600x-j375.dtsi
index d5b985ad5679..c0fb93ae72f4 100644
--- a/arch/arm64/boot/dts/apple/t600x-j375.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-j375.dtsi
@@ -11,6 +11,8 @@
/ {
aliases {
+ bluetooth0 = &bluetooth0;
+ ethernet0 = ðernet0;
serial0 = &serial0;
wifi0 = &wifi0;
};
@@ -84,9 +86,18 @@ &port00 {
/* WLAN */
bus-range = <1 1>;
wifi0: wifi@0,0 {
+ compatible = "pci14e4,4433";
reg = <0x10000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */
local-mac-address = [00 10 18 00 00 10];
+ apple,antenna-sku = "XX";
+ };
+
+ bluetooth0: bluetooth@0,1 {
+ compatible = "pci14e4,5f71";
+ reg = <0x10100 0x0 0x0 0x0 0x0>;
+ /* To be filled by the loader */
+ local-bd-address = [00 00 00 00 00 00];
};
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j457.dts b/arch/arm64/boot/dts/apple/t8103-j457.dts
index 152f95fd49a2..7089ccf3ce55 100644
--- a/arch/arm64/boot/dts/apple/t8103-j457.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j457.dts
@@ -21,6 +21,14 @@ aliases {
};
};
+/*
+ * Adjust pcie0's iommu-map to account for the disabled port01.
+ */
+&pcie0 {
+ iommu-map = <0x100 &pcie0_dart_0 1 1>,
+ <0x200 &pcie0_dart_2 1 1>;
+};
+
&bluetooth0 {
brcm,board-type = "apple,santorini";
};
@@ -36,10 +44,10 @@ &wifi0 {
*/
&port02 {
- bus-range = <3 3>;
+ bus-range = <2 2>;
status = "okay";
ethernet0: ethernet@0,0 {
- reg = <0x30000 0x0 0x0 0x0 0x0>;
+ reg = <0x20000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */
local-mac-address = [00 10 18 00 00 00];
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts
index 89e97c604bd3..c3d2ddd887fd 100644
--- a/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts
@@ -33,7 +33,9 @@ pwm-beeper {
reg_vcc_panel: regulator-vcc-panel {
compatible = "regulator-fixed";
- gpio = <&gpio4 3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_vcc_panel>;
+ gpio = <&gpio2 21 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-max-microvolt = <3300000>;
regulator-min-microvolt = <3300000>;
@@ -135,6 +137,16 @@ &tpm6 {
};
&usbotg1 {
+ adp-disable;
+ hnp-disable;
+ srp-disable;
+ disable-over-current;
+ dr_mode = "otg";
+ usb-role-switch;
+ status = "okay";
+};
+
+&usbotg2 {
#address-cells = <1>;
#size-cells = <0>;
disable-over-current;
@@ -147,17 +159,15 @@ usb1@1 {
};
};
-&usbotg2 {
- adp-disable;
- hnp-disable;
- srp-disable;
- disable-over-current;
- dr_mode = "otg";
- usb-role-switch;
- status = "okay";
-};
-
&usdhc2 {
vmmc-supply = <®_vdd_3v3>;
status = "okay";
};
+
+&iomuxc {
+ pinctrl_reg_vcc_panel: regvccpanelgrp {
+ fsl,pins = <
+ MX93_PAD_GPIO_IO21__GPIO2_IO21 0x31e /* PWM_2 */
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
index 8296888bce59..4521da02d169 100644
--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
@@ -913,7 +913,7 @@ lpuart7: serial@...90000 {
interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_LPUART7>;
clock-names = "ipg";
- dmas = <&edma2 26 0 FSL_EDMA_RX>, <&edma2 25 0 0>;
+ dmas = <&edma2 88 0 FSL_EDMA_RX>, <&edma2 87 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
@@ -925,7 +925,7 @@ lpuart8: serial@...a0000 {
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_LPUART8>;
clock-names = "ipg";
- dmas = <&edma2 28 0 FSL_EDMA_RX>, <&edma2 27 0 0>;
+ dmas = <&edma2 90 0 FSL_EDMA_RX>, <&edma2 89 0 0>;
dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt6331.dtsi b/arch/arm64/boot/dts/mediatek/mt6331.dtsi
index d89858c73ab1..243afbffa21f 100644
--- a/arch/arm64/boot/dts/mediatek/mt6331.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6331.dtsi
@@ -6,12 +6,12 @@
#include <dt-bindings/input/input.h>
&pwrap {
- pmic: mt6331 {
+ pmic: pmic {
compatible = "mediatek,mt6331";
interrupt-controller;
#interrupt-cells = <2>;
- mt6331regulator: mt6331regulator {
+ mt6331regulator: regulators {
compatible = "mediatek,mt6331-regulator";
mt6331_vdvfs11_reg: buck-vdvfs11 {
@@ -258,7 +258,7 @@ mt6331_vrtc_reg: ldo-vrtc {
};
mt6331_vdig18_reg: ldo-vdig18 {
- regulator-name = "dvdd18_dig";
+ regulator-name = "vdig18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-ramp-delay = <0>;
@@ -266,11 +266,11 @@ mt6331_vdig18_reg: ldo-vdig18 {
};
};
- mt6331rtc: mt6331rtc {
+ mt6331rtc: rtc {
compatible = "mediatek,mt6331-rtc";
};
- mt6331keys: mt6331keys {
+ mt6331keys: keys {
compatible = "mediatek,mt6331-keys";
power {
linux,keycodes = <KEY_POWER>;
diff --git a/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts b/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts
index 91de920c2245..03cc48321a3f 100644
--- a/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts
+++ b/arch/arm64/boot/dts/mediatek/mt6795-sony-xperia-m5.dts
@@ -212,7 +212,7 @@ proximity@48 {
&mmc0 {
/* eMMC controller */
- mediatek,latch-ck = <0x14>; /* hs400 */
+ mediatek,latch-ck = <4>; /* hs400 */
mediatek,hs200-cmd-int-delay = <1>;
mediatek,hs400-cmd-int-delay = <1>;
mediatek,hs400-ds-dly3 = <0x1a>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index 559990dcd1d1..3211905b6f86 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -428,16 +428,16 @@ pcie_intc: interrupt-controller {
};
};
- pcie_phy: t-phy {
+ pcie_phy: t-phy@...00000 {
compatible = "mediatek,mt7986-tphy",
"mediatek,generic-tphy-v2";
- ranges;
- #address-cells = <2>;
- #size-cells = <2>;
+ ranges = <0 0 0x11c00000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
status = "disabled";
- pcie_port: pcie-phy@...00000 {
- reg = <0 0x11c00000 0 0x20000>;
+ pcie_port: pcie-phy@0 {
+ reg = <0 0x20000>;
clocks = <&clk40m>;
clock-names = "ref";
#phy-cells = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index 400c61d11035..fff93e26eb76 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -580,7 +580,7 @@ pins-cmd-dat {
pins-clk {
pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
drive-strength = <MTK_DRIVE_14mA>;
- mediatek,pull-down-adv = <10>;
+ mediatek,pull-down-adv = <2>;
};
pins-rst {
@@ -609,13 +609,13 @@ pins-cmd-dat {
pins-clk {
pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
drive-strength = <MTK_DRIVE_14mA>;
- mediatek,pull-down-adv = <10>;
+ mediatek,pull-down-adv = <2>;
};
pins-ds {
pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>;
drive-strength = <MTK_DRIVE_14mA>;
- mediatek,pull-down-adv = <10>;
+ mediatek,pull-down-adv = <2>;
};
pins-rst {
@@ -633,13 +633,13 @@ pins-cmd-dat {
<PINMUX_GPIO33__FUNC_MSDC1_DAT2>,
<PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
input-enable;
- mediatek,pull-up-adv = <10>;
+ mediatek,pull-up-adv = <2>;
};
pins-clk {
pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
input-enable;
- mediatek,pull-down-adv = <10>;
+ mediatek,pull-down-adv = <2>;
};
};
@@ -652,13 +652,13 @@ pins-cmd-dat {
<PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
drive-strength = <6>;
input-enable;
- mediatek,pull-up-adv = <10>;
+ mediatek,pull-up-adv = <2>;
};
pins-clk {
pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
drive-strength = <8>;
- mediatek,pull-down-adv = <10>;
+ mediatek,pull-down-adv = <2>;
input-enable;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
index dbdee604edab..7c3010889ae7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
@@ -324,7 +324,7 @@ pins_cmd_dat {
pins_clk {
pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
drive-strength = <MTK_DRIVE_14mA>;
- mediatek,pull-down-adv = <10>;
+ mediatek,pull-down-adv = <2>;
};
pins_rst {
@@ -353,13 +353,13 @@ pins_cmd_dat {
pins_clk {
pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
drive-strength = <MTK_DRIVE_14mA>;
- mediatek,pull-down-adv = <10>;
+ mediatek,pull-down-adv = <2>;
};
pins_ds {
pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>;
drive-strength = <MTK_DRIVE_14mA>;
- mediatek,pull-down-adv = <10>;
+ mediatek,pull-down-adv = <2>;
};
pins_rst {
@@ -377,13 +377,13 @@ pins_cmd_dat {
<PINMUX_GPIO33__FUNC_MSDC1_DAT2>,
<PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
input-enable;
- mediatek,pull-up-adv = <10>;
+ mediatek,pull-up-adv = <2>;
};
pins_clk {
pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
input-enable;
- mediatek,pull-down-adv = <10>;
+ mediatek,pull-down-adv = <2>;
};
pins_pmu {
@@ -401,13 +401,13 @@ pins_cmd_dat {
<PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
drive-strength = <6>;
input-enable;
- mediatek,pull-up-adv = <10>;
+ mediatek,pull-up-adv = <2>;
};
pins_clk {
pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
drive-strength = <8>;
- mediatek,pull-down-adv = <10>;
+ mediatek,pull-down-adv = <2>;
input-enable;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi
index 7c971198fa95..72a2a2bff0a9 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-krabby.dtsi
@@ -71,14 +71,14 @@ &i2c1 {
i2c-scl-internal-delay-ns = <10000>;
touchscreen: touchscreen@10 {
- compatible = "hid-over-i2c";
+ compatible = "elan,ekth6915";
reg = <0x10>;
interrupts-extended = <&pio 12 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&touchscreen_pins>;
- post-power-on-delay-ms = <10>;
- hid-descr-addr = <0x0001>;
- vdd-supply = <&pp3300_s3>;
+ reset-gpios = <&pio 60 GPIO_ACTIVE_LOW>;
+ vcc33-supply = <&pp3300_s3>;
+ no-reset-on-power-off;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts b/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts
index 26d3451a5e47..24d9ede63eaa 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola-tentacruel-sku262144.dts
@@ -42,3 +42,7 @@ MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP)
CROS_STD_MAIN_KEYMAP
>;
};
+
+&touchscreen {
+ compatible = "elan,ekth6a12nay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188.dtsi b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
index 202478407727..90c388f1890f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8188.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
@@ -2183,7 +2183,7 @@ imp_iic_wrap_en: clock-controller@...c2000 {
};
efuse: efuse@...20000 {
- compatible = "mediatek,mt8188-efuse", "mediatek,efuse";
+ compatible = "mediatek,mt8188-efuse", "mediatek,mt8186-efuse";
reg = <0 0x11f20000 0 0x1000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index 8877953ce292..ab0b2f606eb4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -1588,9 +1588,6 @@ pcie0: pcie@...f0000 {
power-domains = <&spm MT8195_POWER_DOMAIN_PCIE_MAC_P0>;
- resets = <&infracfg_ao MT8195_INFRA_RST2_PCIE_P0_SWRST>;
- reset-names = "mac";
-
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
index 4985b65925a9..d16f545cbbb2 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts
@@ -352,7 +352,7 @@ regulator {
LDO_VIN2-supply = <&vsys>;
LDO_VIN3-supply = <&vsys>;
- mt6360_buck1: BUCK1 {
+ mt6360_buck1: buck1 {
regulator-name = "emi_vdd2";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <1800000>;
@@ -362,7 +362,7 @@ MT6360_OPMODE_LP
regulator-always-on;
};
- mt6360_buck2: BUCK2 {
+ mt6360_buck2: buck2 {
regulator-name = "emi_vddq";
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1300000>;
@@ -372,7 +372,7 @@ MT6360_OPMODE_LP
regulator-always-on;
};
- mt6360_ldo1: LDO1 {
+ mt6360_ldo1: ldo1 {
regulator-name = "mt6360_ldo1"; /* Test point */
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3600000>;
@@ -380,7 +380,7 @@ mt6360_ldo1: LDO1 {
MT6360_OPMODE_LP>;
};
- mt6360_ldo2: LDO2 {
+ mt6360_ldo2: ldo2 {
regulator-name = "panel1_p1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -388,7 +388,7 @@ mt6360_ldo2: LDO2 {
MT6360_OPMODE_LP>;
};
- mt6360_ldo3: LDO3 {
+ mt6360_ldo3: ldo3 {
regulator-name = "vmc_pmu";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
@@ -396,7 +396,7 @@ mt6360_ldo3: LDO3 {
MT6360_OPMODE_LP>;
};
- mt6360_ldo5: LDO5 {
+ mt6360_ldo5: ldo5 {
regulator-name = "vmch_pmu";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -404,7 +404,7 @@ mt6360_ldo5: LDO5 {
MT6360_OPMODE_LP>;
};
- mt6360_ldo6: LDO6 {
+ mt6360_ldo6: ldo6 {
regulator-name = "mt6360_ldo6"; /* Test point */
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <2100000>;
@@ -412,7 +412,7 @@ mt6360_ldo6: LDO6 {
MT6360_OPMODE_LP>;
};
- mt6360_ldo7: LDO7 {
+ mt6360_ldo7: ldo7 {
regulator-name = "emi_vmddr_en";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts
index cce642c53812..3d3db33a64dc 100644
--- a/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts
@@ -11,7 +11,7 @@
/ {
model = "Pumpkin MT8516";
- compatible = "mediatek,mt8516";
+ compatible = "mediatek,mt8516-pumpkin", "mediatek,mt8516";
memory@...00000 {
device_type = "memory";
diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
index fa24b77a31a7..6b7070dad3df 100644
--- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
@@ -1454,6 +1454,7 @@ usb_dwc3: usb@...0000 {
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
snps,usb3_lpm_capable;
+ snps,parkmode-disable-ss-quirk;
maximum-speed = "super-speed";
dr_mode = "otg";
usb-role-switch;
diff --git a/arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk.dts b/arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk.dts
index 9ba23129e65e..2c1ab75e4d91 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk.dts
@@ -185,7 +185,7 @@ vcc_sdhi: regulator-vcc-sdhi {
regulator-max-microvolt = <3300000>;
gpios = <&gpio8 13 GPIO_ACTIVE_HIGH>;
gpios-states = <1>;
- states = <3300000 0>, <1800000 1>;
+ states = <1800000 0>, <3300000 1>;
};
};
@@ -556,6 +556,10 @@ pins-mii {
drive-strength = <21>;
};
+ pins-vddq18-25-avb {
+ pins = "PIN_VDDQ_AVB0", "PIN_VDDQ_AVB1", "PIN_VDDQ_AVB2", "PIN_VDDQ_TSN0";
+ power-source = <1800>;
+ };
};
/* Page 28 / CANFD_IF */
diff --git a/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts b/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts
index 1e67f0a2a945..9f6716fa1086 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts
+++ b/arch/arm64/boot/dts/renesas/r9a09g047e57-smarc.dts
@@ -90,10 +90,10 @@ &i2c0 {
};
&keys {
- key-sleep {
- pinctrl-0 = <&nmi_pins>;
- pinctrl-names = "default";
+ pinctrl-0 = <&nmi_pins>;
+ pinctrl-names = "default";
+ key-sleep {
interrupts-extended = <&icu 0 IRQ_TYPE_EDGE_FALLING>;
linux,code = <KEY_SLEEP>;
label = "SLEEP";
diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
index 345b779e4f60..f3d7eff0d2f2 100644
--- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
@@ -48,7 +48,10 @@ sound_card {
#if (SW_SCIF_CAN || SW_RSPI_CAN)
&canfd {
pinctrl-0 = <&can1_pins>;
- /delete-node/ channel@0;
+
+ channel0 {
+ status = "disabled";
+ };
};
#else
&canfd {
diff --git a/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
index 56527c56830e..012c21b58a5a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
@@ -232,6 +232,20 @@ vcc_ufs_s0: regulator-vcc-ufs-s0 {
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc_sys>;
};
+
+ vcc_wifi_reg_on: regulator-wifi-reg-on {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&wifi_reg_on>;
+ pinctrl-names = "default";
+ regulator-name = "wifi_reg_on";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_1v8_s3>;
+ };
};
&cpu_l0 {
@@ -242,6 +256,10 @@ &cpu_b0 {
cpu-supply = <&vdd_cpu_big_s0>;
};
+&combphy0_ps {
+ status = "okay";
+};
+
&combphy1_psu {
status = "okay";
};
@@ -257,9 +275,6 @@ ð0m0_rx_bus2
ð0m0_rgmii_clk
ð0m0_rgmii_bus
ðm0_clk0_25m_out>;
- snps,reset-gpio = <&gpio2 RK_PB5 GPIO_ACTIVE_LOW>;
- snps,reset-active-low;
- snps,reset-delays-us = <0 20000 100000>;
tx_delay = <0x21>;
status = "okay";
};
@@ -275,9 +290,6 @@ ð1m0_rx_bus2
ð1m0_rgmii_clk
ð1m0_rgmii_bus
ðm0_clk1_25m_out>;
- snps,reset-gpio = <&gpio3 RK_PA3 GPIO_ACTIVE_LOW>;
- snps,reset-active-low;
- snps,reset-delays-us = <0 20000 100000>;
tx_delay = <0x20>;
status = "okay";
};
@@ -680,19 +692,73 @@ regulator-state-mem {
};
};
+&i2c2 {
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ clock-output-names = "hym8563";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtc_int>;
+ wakeup-source;
+ #clock-cells = <0>;
+ };
+};
+
&mdio0 {
- rgmii_phy0: phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
+ rgmii_phy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-id001c.c916";
reg = <0x1>;
clocks = <&cru REFCLKO25M_GMAC0_OUT>;
+ assigned-clocks = <&cru REFCLKO25M_GMAC0_OUT>;
+ assigned-clock-rates = <25000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_phy0_rst>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio2 RK_PB5 GPIO_ACTIVE_LOW>;
};
};
&mdio1 {
- rgmii_phy1: phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
+ rgmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-id001c.c916";
reg = <0x1>;
clocks = <&cru REFCLKO25M_GMAC1_OUT>;
+ assigned-clocks = <&cru REFCLKO25M_GMAC1_OUT>;
+ assigned-clock-rates = <25000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_phy1_rst>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio3 RK_PA3 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_rst>;
+ reset-gpios = <&gpio2 RK_PB4 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_s3>;
+ status = "okay";
+
+ pcie@0,0 {
+ reg = <0x0 0 0 0 0>;
+ bus-range = <0x0 0xf>;
+ device_type = "pci";
+ ranges;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ wifi: wifi@0,0 {
+ compatible = "pci14e4,449d";
+ reg = <0x10000 0 0 0 0>;
+ clocks = <&hym8563>;
+ clock-names = "lpo";
+ };
};
};
@@ -708,6 +774,28 @@ &pcie1 {
};
&pinctrl {
+ hym8563 {
+ rtc_int: rtc-int {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ network {
+ rgmii_phy0_rst: rgmii-phy0-rst {
+ rockchip,pins = <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ rgmii_phy1_rst: rgmii-phy1-rst {
+ rockchip,pins = <3 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie0 {
+ pcie0_rst: pcie0-rst {
+ rockchip,pins = <2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
usb {
usb_host_pwren: usb-host-pwren {
rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -721,6 +809,16 @@ usbc0_int: usbc0-int {
rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
+
+ wifi {
+ wifi_reg_on: wifi-reg-on {
+ rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ wifi_wake_host: wifi-wake-host {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
};
&sdmmc {
diff --git a/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
index 10e6b5c08619..737ff54c3cd2 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
@@ -46,31 +46,31 @@ ramoops@...00000 {
pmsg-size = <0x8000>;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9c800000 0x00 0x00300000>;
no-map;
};
- mcu_m4fss_dma_memory_region: m4f-dma-memory@...00000 {
+ mcu_m4fss_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9cb00000 0x00 0x100000>;
no-map;
};
- mcu_m4fss_memory_region: m4f-memory@...00000 {
+ mcu_m4fss_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9cc00000 0x00 0xe00000>;
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9da00000 0x00 0x100000>;
no-map;
};
- wkup_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ wkup_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9db00000 0x00 0xc00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am62-pocketbeagle2.dts b/arch/arm64/boot/dts/ti/k3-am62-pocketbeagle2.dts
index 2e4cf65ee323..1c95947430d3 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-pocketbeagle2.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62-pocketbeagle2.dts
@@ -54,13 +54,13 @@ linux,cma {
linux,cma-default;
};
- mcu_m4fss_dma_memory_region: m4f-dma-memory@...00000 {
+ mcu_m4fss_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9cb00000 0x00 0x100000>;
no-map;
};
- mcu_m4fss_memory_region: m4f-memory@...00000 {
+ mcu_m4fss_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9cc00000 0x00 0xe00000>;
no-map;
@@ -78,7 +78,7 @@ secure_ddr: optee@...00000 {
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9db00000 0x00 0xc00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
index bc2289d74774..2b8b2c76e994 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
@@ -206,7 +206,7 @@ secure_ddr: optee@...00000 {
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9db00000 0x00 0xc00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
index 72b09f9c69d8..7028d9835c4a 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
@@ -83,7 +83,7 @@ secure_ddr: optee@...00000 {
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9db00000 0x00 0xc00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
index 5dc5d2cb20cc..175fa5048a0b 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
@@ -59,37 +59,37 @@ linux,cma {
linux,cma-default;
};
- c7x_0_dma_memory_region: c7x-dma-memory@...00000 {
+ c7x_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x99800000 0x00 0x100000>;
no-map;
};
- c7x_0_memory_region: c7x-memory@...00000 {
+ c7x_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x99900000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9b800000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9b900000 0x00 0xf00000>;
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9c800000 0x00 0x100000>;
no-map;
};
- wkup_r5fss0_core0_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9c900000 0x00 0xf00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
index bceead5e288e..4761c3dc2d8e 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
@@ -53,37 +53,37 @@ linux,cma {
linux,cma-default;
};
- c7x_0_dma_memory_region: c7x-dma-memory@...00000 {
+ c7x_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x99800000 0x00 0x100000>;
no-map;
};
- c7x_0_memory_region: c7x-memory@...00000 {
+ c7x_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x99900000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9b800000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9b900000 0x00 0xf00000>;
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9c800000 0x00 0x100000>;
no-map;
};
- wkup_r5fss0_core0_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9c900000 0x00 0xf00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am62d2-evm.dts b/arch/arm64/boot/dts/ti/k3-am62d2-evm.dts
index daea18b0bc61..19a7ca7ee173 100644
--- a/arch/arm64/boot/dts/ti/k3-am62d2-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62d2-evm.dts
@@ -58,37 +58,37 @@ secure_tfa_ddr: tfa@...00000 {
no-map;
};
- c7x_0_dma_memory_region: c7x-dma-memory@...00000 {
+ c7x_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x99800000 0x00 0x100000>;
no-map;
};
- c7x_0_memory_region: c7x-memory@...00000 {
+ c7x_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x99900000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9b800000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9b900000 0x00 0xf00000>;
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9c800000 0x00 0x100000>;
no-map;
};
- wkup_r5fss0_core0_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9c900000 0x00 0xf00000>;
no-map;
@@ -100,7 +100,7 @@ secure_ddr: optee@...00000 {
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x01000000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-verdin.dtsi
index a2fdc6741da2..3963dbc1faef 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-verdin.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-verdin.dtsi
@@ -162,7 +162,7 @@ secure_ddr: optee@...00000 {
no-map;
};
- wkup_r5fss0_core0_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9c900000 0x00 0x01e00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
index 899da7896563..2e081c329d6c 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
@@ -49,25 +49,25 @@ reserved-memory {
#size-cells = <2>;
ranges;
- mcu_r5fss0_core0_dma_memory_region: mcu-r5fss-dma-memory-region@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9b800000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: mcu-r5fss-memory-region@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9b900000 0x00 0xf00000>;
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9c800000 0x00 0x100000>;
no-map;
};
- wkup_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ wkup_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9c900000 0x00 0xf00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
index 13e1d36123d5..8eed8be2e8ba 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
@@ -58,25 +58,25 @@ linux,cma {
linux,cma-default;
};
- mcu_m4fss_dma_memory_region: m4f-dma-memory@...00000 {
+ mcu_m4fss_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9cb00000 0x00 0x100000>;
no-map;
};
- mcu_m4fss_memory_region: m4f-memory@...00000 {
+ mcu_m4fss_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9cc00000 0x00 0xe00000>;
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9da00000 0x00 0x100000>;
no-map;
};
- wkup_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ wkup_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9db00000 0x00 0xc00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
index d9d491b12c33..97ad433e4939 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
@@ -41,67 +41,67 @@ secure_ddr: optee@...00000 {
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- mcu_m4fss_dma_memory_region: m4f-dma-memory@...00000 {
+ mcu_m4fss_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- mcu_m4fss_memory_region: m4f-memory@...00000 {
+ mcu_m4fss_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa5000000 0x00 0x00800000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
index e01866372293..ccb04a3d97c9 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
@@ -53,67 +53,67 @@ secure_ddr: optee@...00000 {
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- mcu_m4fss_dma_memory_region: m4f-dma-memory@...00000 {
+ mcu_m4fss_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- mcu_m4fss_memory_region: m4f-memory@...00000 {
+ mcu_m4fss_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa5000000 0x00 0x00800000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
index 1deaa0be0085..1982608732ee 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
@@ -51,67 +51,67 @@ secure_ddr: optee@...00000 {
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- mcu_m4fss_dma_memory_region: m4f-dma-memory@...00000 {
+ mcu_m4fss_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- mcu_m4fss_memory_region: m4f-memory@...00000 {
+ mcu_m4fss_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa5000000 0x00 0x00800000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi b/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi
index a5cec9a07510..dfe570e0b707 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi
@@ -115,49 +115,49 @@ secure_ddr: optee@...00000 {
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
index 828d815d6bdf..a8d5144ab1b3 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
@@ -31,55 +31,55 @@ secure_ddr: optee@...00000 {
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa5000000 0x00 0x00800000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
index e5136ed94765..211eb9d93159 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
@@ -47,31 +47,31 @@ secure_ddr: secure-ddr@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0 0xa0000000 0 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0 0xa0100000 0 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0 0xa1000000 0 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0 0xa1100000 0 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa2000000 0x00 0x00200000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index e589690c7c82..dac36ca77a30 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -50,31 +50,31 @@ secure_ddr: secure-ddr@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0 0xa0000000 0 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0 0xa0100000 0 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0 0xa1000000 0 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0 0xa1100000 0 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa2000000 0x00 0x00100000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts b/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
index bf9b23df1da2..859294b9a2f3 100644
--- a/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
+++ b/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts
@@ -50,67 +50,67 @@ secure_ddr: optee@...00000 {
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- wkup_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ wkup_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: mcu-r5fss-dma-memory-region@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: mcu-r5fss-memory-region@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: main-r5fss-dma-memory-region@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: main-r5fss-memory-region@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- c7x_0_dma_memory_region: c7x-dma-memory@...00000 {
+ c7x_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- c7x_0_memory_region: c7x-memory@...00000 {
+ c7x_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- c7x_1_dma_memory_region: c7x-dma-memory@...00000 {
+ c7x_1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- c7x_1_memory_region: c7x-memory@...00000 {
+ c7x_1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa5000000 0x00 0x1c00000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am68-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am68-phycore-som.dtsi
index fd715fee8170..71f56f0f5363 100644
--- a/arch/arm64/boot/dts/ti/k3-am68-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am68-phycore-som.dtsi
@@ -49,103 +49,103 @@ secure_ddr: optee@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5100000 0x00 0xf00000>;
no-map;
};
- c71_0_dma_memory_region: c71-dma-memory@...00000 {
+ c71_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
};
- c71_0_memory_region: c71-memory@...00000 {
+ c71_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6100000 0x00 0xf00000>;
no-map;
};
- c71_1_dma_memory_region: c71-dma-memory@...00000 {
+ c71_1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;
};
- c71_1_memory_region: c71-memory@...00000 {
+ c71_1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa8000000 0x00 0x01c00000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi b/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi
index 4ca2d4e2fb9b..ecc7b3a100d0 100644
--- a/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi
@@ -27,103 +27,103 @@ secure_ddr: optee@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5100000 0x00 0xf00000>;
no-map;
};
- c71_0_dma_memory_region: c71-dma-memory@...00000 {
+ c71_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
};
- c71_0_memory_region: c71-memory@...00000 {
+ c71_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6100000 0x00 0xf00000>;
no-map;
};
- c71_1_dma_memory_region: c71-dma-memory@...00000 {
+ c71_1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;
};
- c71_1_memory_region: c71-memory@...00000 {
+ c71_1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa8000000 0x00 0x01c00000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk.dts b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
index 612ac27643d2..922866b96e66 100644
--- a/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
@@ -49,145 +49,145 @@ secure_ddr: optee@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5100000 0x00 0xf00000>;
no-map;
};
- main_r5fss2_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss2_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
};
- main_r5fss2_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss2_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6100000 0x00 0xf00000>;
no-map;
};
- main_r5fss2_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss2_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;
};
- main_r5fss2_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss2_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7100000 0x00 0xf00000>;
no-map;
};
- c71_0_dma_memory_region: c71-dma-memory@...00000 {
+ c71_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa8000000 0x00 0x100000>;
no-map;
};
- c71_0_memory_region: c71-memory@...00000 {
+ c71_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa8100000 0x00 0xf00000>;
no-map;
};
- c71_1_dma_memory_region: c71-dma-memory@...00000 {
+ c71_1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa9000000 0x00 0x100000>;
no-map;
};
- c71_1_memory_region: c71-memory@...00000 {
+ c71_1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa9100000 0x00 0xf00000>;
no-map;
};
- c71_2_dma_memory_region: c71-dma-memory@...00000 {
+ c71_2_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xaa000000 0x00 0x100000>;
no-map;
};
- c71_2_memory_region: c71-memory@...00000 {
+ c71_2_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xaa100000 0x00 0xf00000>;
no-map;
};
- c71_3_dma_memory_region: c71-dma-memory@...00000 {
+ c71_3_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xab000000 0x00 0x100000>;
no-map;
};
- c71_3_memory_region: c71-memory@...00000 {
+ c71_3_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xab100000 0x00 0xf00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
index 291ab9bb414d..e8cec315e381 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
@@ -29,55 +29,55 @@ secure_ddr: optee@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa4000000 0x00 0x00800000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
index fb899c99753e..bb771ce823ec 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
@@ -51,115 +51,117 @@ secure_ddr: optee@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5100000 0x00 0xf00000>;
no-map;
};
- c66_0_dma_memory_region: c66-dma-memory@...00000 {
+ /* Carveout locations are flipped due to caching */
+ c66_1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
};
- c66_0_memory_region: c66-memory@...00000 {
+ c66_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6100000 0x00 0xf00000>;
no-map;
};
- c66_1_dma_memory_region: c66-dma-memory@...00000 {
+ /* Carveout locations are flipped due to caching */
+ c66_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;
};
- c66_1_memory_region: c66-memory@...00000 {
+ c66_1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7100000 0x00 0xf00000>;
no-map;
};
- c71_0_dma_memory_region: c71-dma-memory@...00000 {
+ c71_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa8000000 0x00 0x100000>;
no-map;
};
- c71_0_memory_region: c71-memory@...00000 {
+ c71_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa8100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xaa000000 0x00 0x01c00000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
index ffef3d1cfd55..488c5ebe9e27 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
@@ -48,115 +48,117 @@ secure_ddr: optee@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5100000 0x00 0xf00000>;
no-map;
};
- c66_0_dma_memory_region: c66-dma-memory@...00000 {
+ /* Carveout locations are flipped due to caching */
+ c66_1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
};
- c66_0_memory_region: c66-memory@...00000 {
+ c66_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6100000 0x00 0xf00000>;
no-map;
};
- c66_1_dma_memory_region: c66-dma-memory@...00000 {
+ /* Carveout locations are flipped due to caching */
+ c66_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;
};
- c66_1_memory_region: c66-memory@...00000 {
+ c66_1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7100000 0x00 0xf00000>;
no-map;
};
- c71_0_dma_memory_region: c71-dma-memory@...00000 {
+ c71_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa8000000 0x00 0x100000>;
no-map;
};
- c71_0_memory_region: c71-memory@...00000 {
+ c71_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa8100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xaa000000 0x00 0x01c00000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
index 0722f6361cc8..ef11a5fb6ad5 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
@@ -29,115 +29,115 @@ secure_ddr: optee@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5100000 0x00 0xf00000>;
no-map;
};
- c66_1_dma_memory_region: c66-dma-memory@...00000 {
+ c66_1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
};
- c66_0_memory_region: c66-memory@...00000 {
+ c66_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6100000 0x00 0xf00000>;
no-map;
};
- c66_0_dma_memory_region: c66-dma-memory@...00000 {
+ c66_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;
};
- c66_1_memory_region: c66-memory@...00000 {
+ c66_1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7100000 0x00 0xf00000>;
no-map;
};
- c71_0_dma_memory_region: c71-dma-memory@...00000 {
+ c71_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa8000000 0x00 0x100000>;
no-map;
};
- c71_0_memory_region: c71-memory@...00000 {
+ c71_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa8100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xaa000000 0x00 0x01c00000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
index 54fc5c4f8c3f..391e8e3ac268 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
@@ -31,103 +31,103 @@ secure_ddr: optee@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5100000 0x00 0xf00000>;
no-map;
};
- c71_0_dma_memory_region: c71-dma-memory@...00000 {
+ c71_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
};
- c71_0_memory_region: c71-memory@...00000 {
+ c71_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6100000 0x00 0xf00000>;
no-map;
};
- c71_1_dma_memory_region: c71-dma-memory@...00000 {
+ c71_1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;
};
- c71_1_memory_region: c71-memory@...00000 {
+ c71_1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa8000000 0x00 0x01c00000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
index 9d8abfa9afd2..4cfe5c88e48f 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
@@ -52,67 +52,67 @@ secure_ddr: optee@...00000 {
no-map;
};
- wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ wkup_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- wkup_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ wkup_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: mcu-r5fss-dma-memory-region@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: mcu-r5fss-memory-region@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: main-r5fss-dma-memory-region@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: main-r5fss-memory-region@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- c7x_0_dma_memory_region: c7x-dma-memory@...00000 {
+ c7x_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- c7x_0_memory_region: c7x-memory@...00000 {
+ c7x_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- c7x_1_dma_memory_region: c7x-dma-memory@...00000 {
+ c7x_1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- c7x_1_memory_region: c7x-memory@...00000 {
+ c7x_1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- rtos_ipc_memory_region: ipc-memories@...00000 {
+ rtos_ipc_memory_region: memory@...00000 {
reg = <0x00 0xa5000000 0x00 0x1c00000>;
alignment = <0x1000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-j742s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j742s2-mcu-wakeup.dtsi
new file mode 100644
index 000000000000..61db2348d6a4
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-j742s2-mcu-wakeup.dtsi
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Device Tree Source for J742S2 SoC Family
+ *
+ * TRM: https://www.ti.com/lit/pdf/spruje3
+ *
+ * Copyright (C) 2025 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+&mcu_r5fss0_core0 {
+ firmware-name = "j742s2-mcu-r5f0_0-fw";
+};
+
+&mcu_r5fss0_core1 {
+ firmware-name = "j742s2-mcu-r5f0_1-fw";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j742s2.dtsi b/arch/arm64/boot/dts/ti/k3-j742s2.dtsi
index 7a72f82f56d6..d265df1abade 100644
--- a/arch/arm64/boot/dts/ti/k3-j742s2.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j742s2.dtsi
@@ -96,3 +96,4 @@ cpu3: cpu@3 {
};
#include "k3-j742s2-main.dtsi"
+#include "k3-j742s2-mcu-wakeup.dtsi"
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
index a84bde08f85e..2ed1ec6d53c8 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
@@ -28,13 +28,13 @@ reserved_memory: reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
- c71_3_dma_memory_region: c71-dma-memory@...00000 {
+ c71_3_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xab000000 0x00 0x100000>;
no-map;
};
- c71_3_memory_region: c71-memory@...00000 {
+ c71_3_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xab100000 0x00 0xf00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi
index fa656b7b13a1..877b50991ee6 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-evm-common.dtsi
@@ -35,133 +35,133 @@ secure_ddr: optee@...00000 {
no-map;
};
- mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa0100000 0x00 0xf00000>;
no-map;
};
- mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ mcu_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1000000 0x00 0x100000>;
no-map;
};
- mcu_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ mcu_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa1100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa2100000 0x00 0xf00000>;
no-map;
};
- main_r5fss0_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss0_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3000000 0x00 0x100000>;
no-map;
};
- main_r5fss0_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss0_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa3100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
- main_r5fss1_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss1_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5000000 0x00 0x100000>;
no-map;
};
- main_r5fss1_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss1_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa5100000 0x00 0xf00000>;
no-map;
};
- main_r5fss2_core0_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss2_core0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
};
- main_r5fss2_core0_memory_region: r5f-memory@...00000 {
+ main_r5fss2_core0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6100000 0x00 0xf00000>;
no-map;
};
- main_r5fss2_core1_dma_memory_region: r5f-dma-memory@...00000 {
+ main_r5fss2_core1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;
};
- main_r5fss2_core1_memory_region: r5f-memory@...00000 {
+ main_r5fss2_core1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7100000 0x00 0xf00000>;
no-map;
};
- c71_0_dma_memory_region: c71-dma-memory@...00000 {
+ c71_0_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa8000000 0x00 0x100000>;
no-map;
};
- c71_0_memory_region: c71-memory@...00000 {
+ c71_0_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa8100000 0x00 0xf00000>;
no-map;
};
- c71_1_dma_memory_region: c71-dma-memory@...00000 {
+ c71_1_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa9000000 0x00 0x100000>;
no-map;
};
- c71_1_memory_region: c71-memory@...00000 {
+ c71_1_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa9100000 0x00 0xf00000>;
no-map;
};
- c71_2_dma_memory_region: c71-dma-memory@...00000 {
+ c71_2_dma_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xaa000000 0x00 0x100000>;
no-map;
};
- c71_2_memory_region: c71-memory@...00000 {
+ c71_2_memory_region: memory@...00000 {
compatible = "shared-dma-pool";
reg = <0x00 0xaa100000 0x00 0xf00000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-pinctrl.h b/arch/arm64/boot/dts/ti/k3-pinctrl.h
index c0f09be8d3f9..146b780f3bd4 100644
--- a/arch/arm64/boot/dts/ti/k3-pinctrl.h
+++ b/arch/arm64/boot/dts/ti/k3-pinctrl.h
@@ -55,8 +55,8 @@
#define PIN_DS_FORCE_DISABLE (0 << FORCE_DS_EN_SHIFT)
#define PIN_DS_FORCE_ENABLE (1 << FORCE_DS_EN_SHIFT)
-#define PIN_DS_IO_OVERRIDE_DISABLE (0 << DS_IO_OVERRIDE_EN_SHIFT)
-#define PIN_DS_IO_OVERRIDE_ENABLE (1 << DS_IO_OVERRIDE_EN_SHIFT)
+#define PIN_DS_ISO_OVERRIDE_DISABLE (0 << ISO_OVERRIDE_EN_SHIFT)
+#define PIN_DS_ISO_OVERRIDE_ENABLE (1 << ISO_OVERRIDE_EN_SHIFT)
#define PIN_DS_OUT_ENABLE (0 << DS_OUT_DIS_SHIFT)
#define PIN_DS_OUT_DISABLE (1 << DS_OUT_DIS_SHIFT)
#define PIN_DS_OUT_VALUE_ZERO (0 << DS_OUT_VAL_SHIFT)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 96482a1412c6..fba7ca102a8c 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -409,7 +409,7 @@ asmlinkage void ret_from_fork(void) asm("ret_from_fork");
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long stack_start = args->stack;
unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 52ffe115a8c4..4ef9b7b8fb40 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -3064,8 +3064,7 @@ void bpf_jit_free(struct bpf_prog *prog)
* before freeing it.
*/
if (jit_data) {
- bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
- sizeof(jit_data->header->size));
+ bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header);
kfree(jit_data);
}
prog->bpf_func -= cfi_get_offset();
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index 0c6e4b17fe00..a7a90340042a 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -32,7 +32,7 @@ void flush_thread(void){}
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct switch_stack *childstack;
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 2a77bfd75694..15b4992bfa29 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -52,7 +52,7 @@ void arch_cpu_idle(void)
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index 3582f591bab2..efd9edf65603 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -167,7 +167,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
unsigned long childksp;
unsigned long tls = args->tls;
unsigned long usp = args->stack;
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
struct pt_regs *childregs, *regs = current_pt_regs();
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index 50c469067f3a..b5e2312a2fca 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -166,6 +166,10 @@ static inline __init bool kaslr_disabled(void)
return true;
#endif
+ str = strstr(boot_command_line, "kexec_file");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return true;
+
return false;
}
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index abfdb6bb5c38..99f55c0bb5e0 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1294,8 +1294,10 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
- if (!is_kernel_text((unsigned long)ip) &&
- !is_bpf_text_address((unsigned long)ip))
+ /* Only poking bpf text is supported. Since kernel function entry
+ * is set up by ftrace, we rely on ftrace to poke kernel functions.
+ */
+ if (!is_bpf_text_address((unsigned long)ip))
return -ENOTSUPP;
ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
@@ -1448,12 +1450,43 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
+/*
+ * Sign-extend the register if necessary
+ */
+static void sign_extend(struct jit_ctx *ctx, int rd, int rj, u8 size, bool sign)
+{
+ /* ABI requires unsigned char/short to be zero-extended */
+ if (!sign && (size == 1 || size == 2)) {
+ if (rd != rj)
+ move_reg(ctx, rd, rj);
+ return;
+ }
+
+ switch (size) {
+ case 1:
+ emit_insn(ctx, extwb, rd, rj);
+ break;
+ case 2:
+ emit_insn(ctx, extwh, rd, rj);
+ break;
+ case 4:
+ emit_insn(ctx, addiw, rd, rj, 0);
+ break;
+ case 8:
+ if (rd != rj)
+ move_reg(ctx, rd, rj);
+ break;
+ default:
+ pr_warn("bpf_jit: invalid size %d for sign_extend\n", size);
+ }
+}
+
static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
const struct btf_func_model *m, struct bpf_tramp_links *tlinks,
void *func_addr, u32 flags)
{
int i, ret, save_ret;
- int stack_size = 0, nargs = 0;
+ int stack_size, nargs;
int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off;
bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
void *orig_call = func_addr;
@@ -1462,9 +1495,6 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
u32 **branches = NULL;
- if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
- return -ENOTSUPP;
-
/*
* FP + 8 [ RA to parent func ] return address to parent
* function
@@ -1495,20 +1525,23 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
if (m->nr_args > LOONGARCH_MAX_REG_ARGS)
return -ENOTSUPP;
+ /* FIXME: No support of struct argument */
+ for (i = 0; i < m->nr_args; i++) {
+ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
+ return -ENOTSUPP;
+ }
+
if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
return -ENOTSUPP;
- stack_size = 0;
-
/* Room of trampoline frame to store return address and frame pointer */
- stack_size += 16;
+ stack_size = 16;
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
- if (save_ret) {
- /* Save BPF R0 and A0 */
- stack_size += 16;
- retval_off = stack_size;
- }
+ if (save_ret)
+ stack_size += 16; /* Save BPF R0 and A0 */
+
+ retval_off = stack_size;
/* Room of trampoline frame to store args */
nargs = m->nr_args;
@@ -1595,7 +1628,7 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false);
+ move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
ret = emit_call(ctx, (const u64)__bpf_tramp_enter);
if (ret)
return ret;
@@ -1645,7 +1678,7 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = ctx->ro_image + ctx->idx;
- move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false);
+ move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
ret = emit_call(ctx, (const u64)__bpf_tramp_exit);
if (ret)
goto out;
@@ -1655,8 +1688,12 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
restore_args(ctx, m->nr_args, args_off);
if (save_ret) {
- emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
+ if (is_struct_ops)
+ sign_extend(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0],
+ m->ret_size, m->ret_flags & BTF_FMODEL_SIGNED_ARG);
+ else
+ emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
}
emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
@@ -1715,7 +1752,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image));
ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags);
- if (ret > 0 && validate_code(&ctx) < 0) {
+ if (ret < 0)
+ goto out;
+
+ if (validate_code(&ctx) < 0) {
ret = -EINVAL;
goto out;
}
@@ -1726,7 +1766,6 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
goto out;
}
- bpf_flush_icache(ro_image, ro_image_end);
out:
kvfree(image);
return ret < 0 ? ret : size;
@@ -1744,8 +1783,7 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags);
- /* Page align */
- return ret < 0 ? ret : round_up(ret * LOONGARCH_INSN_SIZE, PAGE_SIZE);
+ return ret < 0 ? ret : ret * LOONGARCH_INSN_SIZE;
}
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index fda7eac23f87..f5a07a70e938 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -141,7 +141,7 @@ asmlinkage int m68k_clone3(struct pt_regs *regs)
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct fork_frame {
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 56342e11442d..6cbf642d7b80 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -54,7 +54,7 @@ void flush_thread(void)
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 02aa6a04a21d..29191fa1801e 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -107,7 +107,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index f84021303f6a..151404139085 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -101,7 +101,7 @@ void flush_thread(void)
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index eef99fee2110..73ffb9fa3118 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -165,7 +165,7 @@ extern asmlinkage void ret_from_fork(void);
int
copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *userregs;
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index ed93bd8c1545..e64ab5d2a40d 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -201,7 +201,7 @@ arch_initcall(parisc_idle_init);
int
copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *cregs = &(p->thread.regs);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 93402a1d9c9f..e51a595a0622 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -971,6 +971,10 @@ config SCHED_SMT
when dealing with POWER5 cpus at a cost of slightly increased
overhead in some places. If unsure say N here.
+config SCHED_MC
+ def_bool y
+ depends on SMP
+
config PPC_DENORMALISATION
bool "PowerPC denormalisation exception handling"
depends on PPC_BOOK3S_64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 9753fb87217c..a58b1029592c 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -58,7 +58,7 @@ ifeq ($(CONFIG_PPC64)$(CONFIG_LD_IS_BFD),yy)
# There is a corresponding test in arch/powerpc/lib/Makefile
KBUILD_LDFLAGS_MODULE += --save-restore-funcs
else
-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+KBUILD_LDFLAGS_MODULE += $(objtree)/arch/powerpc/lib/crtsavres.o
endif
ifdef CONFIG_CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index dd4eb3063175..f4390704d5ba 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -7,8 +7,14 @@
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+
+#ifdef CONFIG_PPC_BOOK3S_603
+ memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
+ (MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+#endif
+ return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
index bb5f3e8ea912..4ef780b291bc 100644
--- a/arch/powerpc/include/asm/nohash/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/pgalloc.h
@@ -22,7 +22,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));
-#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603)
+#ifdef CONFIG_PPC_8xx
memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
(MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
#endif
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index da15b5efe807..f19ca44512d1 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -131,6 +131,8 @@ static inline int cpu_to_coregroup_id(int cpu)
#ifdef CONFIG_SMP
#include <asm/cputable.h>
+struct cpumask *cpu_coregroup_mask(int cpu);
+
#ifdef CONFIG_PPC64
#include <asm/smp.h>
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 56c5ebe21b99..613606400ee9 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -162,7 +162,7 @@ instruction_counter:
* For the MPC8xx, this is a software tablewalk to load the instruction
* TLB. The task switch loads the M_TWB register with the pointer to the first
* level table.
- * If we discover there is no second level table (value is zero) or if there
+ * If there is no second level table (value is zero) or if there
* is an invalid pte, we load that into the TLB, which causes another fault
* into the TLB Error interrupt where we can handle such problems.
* We have to use the MD_xxx registers for the tablewalk because the
@@ -183,9 +183,6 @@ instruction_counter:
mtspr SPRN_SPRG_SCRATCH2, r10
mtspr SPRN_M_TW, r11
- /* If we are faulting a kernel address, we have to use the
- * kernel page tables.
- */
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
mtspr SPRN_MD_EPN, r10
@@ -228,10 +225,6 @@ instruction_counter:
mtspr SPRN_SPRG_SCRATCH2, r10
mtspr SPRN_M_TW, r11
- /* If we are faulting a kernel address, we have to use the
- * kernel page tables.
- */
- mfspr r10, SPRN_MD_EPN
mfspr r10, SPRN_M_TWB /* Get level 1 table */
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 126bf3b06ab7..0e45cac4de76 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -1139,7 +1139,7 @@ static int setup_ftrace_ool_stubs(const Elf64_Shdr *sechdrs, unsigned long addr,
/* reserve stubs */
for (i = 0; i < num_stubs; i++)
- if (patch_u32((void *)&stub->funcdata, PPC_RAW_NOP()))
+ if (patch_u32((void *)&stub[i].funcdata, PPC_RAW_NOP()))
return -1;
#endif
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 855e09886503..eb23966ac0a9 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1805,7 +1805,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
f = ret_from_kernel_user_thread;
} else {
struct pt_regs *regs = current_pt_regs();
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
/* Copy registers */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index f59e4b9cc207..68edb66c2964 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1028,19 +1028,19 @@ static int powerpc_shared_proc_flags(void)
* We can't just pass cpu_l2_cache_mask() directly because
* returns a non-const pointer and the compiler barfs on that.
*/
-static const struct cpumask *shared_cache_mask(int cpu)
+static const struct cpumask *tl_cache_mask(struct sched_domain_topology_level *tl, int cpu)
{
return per_cpu(cpu_l2_cache_map, cpu);
}
#ifdef CONFIG_SCHED_SMT
-static const struct cpumask *smallcore_smt_mask(int cpu)
+static const struct cpumask *tl_smallcore_smt_mask(struct sched_domain_topology_level *tl, int cpu)
{
return cpu_smallcore_mask(cpu);
}
#endif
-static struct cpumask *cpu_coregroup_mask(int cpu)
+struct cpumask *cpu_coregroup_mask(int cpu)
{
return per_cpu(cpu_coregroup_map, cpu);
}
@@ -1054,11 +1054,6 @@ static bool has_coregroup_support(void)
return coregroup_enabled;
}
-static const struct cpumask *cpu_mc_mask(int cpu)
-{
- return cpu_coregroup_mask(cpu);
-}
-
static int __init init_big_cores(void)
{
int cpu;
@@ -1448,7 +1443,7 @@ static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
return false;
}
- cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
+ cpumask_and(*mask, cpu_online_mask, cpu_node_mask(cpu));
/* Update l2-cache mask with all the CPUs that are part of submask */
or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
@@ -1538,7 +1533,7 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
return;
}
- cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
+ cpumask_and(*mask, cpu_online_mask, cpu_node_mask(cpu));
/* Update coregroup mask with all the CPUs that are part of submask */
or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
@@ -1601,7 +1596,7 @@ static void add_cpu_to_masks(int cpu)
/* If chip_id is -1; limit the cpu_core_mask to within PKG */
if (chip_id == -1)
- cpumask_and(mask, mask, cpu_cpu_mask(cpu));
+ cpumask_and(mask, mask, cpu_node_mask(cpu));
for_each_cpu(i, mask) {
if (chip_id == cpu_to_chip_id(i)) {
@@ -1701,22 +1696,22 @@ static void __init build_sched_topology(void)
if (has_big_cores) {
pr_info("Big cores detected but using small core scheduling\n");
powerpc_topology[i++] =
- SDTL_INIT(smallcore_smt_mask, powerpc_smt_flags, SMT);
+ SDTL_INIT(tl_smallcore_smt_mask, powerpc_smt_flags, SMT);
} else {
- powerpc_topology[i++] = SDTL_INIT(cpu_smt_mask, powerpc_smt_flags, SMT);
+ powerpc_topology[i++] = SDTL_INIT(tl_smt_mask, powerpc_smt_flags, SMT);
}
#endif
if (shared_caches) {
powerpc_topology[i++] =
- SDTL_INIT(shared_cache_mask, powerpc_shared_cache_flags, CACHE);
+ SDTL_INIT(tl_cache_mask, powerpc_shared_cache_flags, CACHE);
}
if (has_coregroup_support()) {
powerpc_topology[i++] =
- SDTL_INIT(cpu_mc_mask, powerpc_shared_proc_flags, MC);
+ SDTL_INIT(tl_mc_mask, powerpc_shared_proc_flags, MC);
}
- powerpc_topology[i++] = SDTL_INIT(cpu_cpu_mask, powerpc_shared_proc_flags, PKG);
+ powerpc_topology[i++] = SDTL_INIT(tl_pkg_mask, powerpc_shared_proc_flags, PKG);
/* There must be one trailing NULL entry left. */
BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1);
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 6dca92d5a6e8..841d077e2825 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -488,8 +488,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
return ret;
/* Set up out-of-line stub */
- if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
- return ftrace_init_ool_stub(mod, rec);
+ if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
+ ret = ftrace_init_ool_stub(mod, rec);
+ goto out;
+ }
/* Nop-out the ftrace location */
new = ppc_inst(PPC_RAW_NOP());
@@ -520,6 +522,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
return -EINVAL;
}
+out:
+ if (!ret)
+ ret = ftrace_rec_set_nop_ops(rec);
+
return ret;
}
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index a0a40889d79a..31a392993cb4 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -223,7 +223,7 @@ asmlinkage void ret_from_fork_user(struct pt_regs *regs)
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c
index 3b426c800480..5f33625f4070 100644
--- a/arch/riscv/kvm/vmid.c
+++ b/arch/riscv/kvm/vmid.c
@@ -14,6 +14,7 @@
#include <linux/smp.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
+#include <asm/kvm_mmu.h>
#include <asm/kvm_tlb.h>
#include <asm/kvm_vmid.h>
@@ -28,7 +29,7 @@ void __init kvm_riscv_gstage_vmid_detect(void)
/* Figure-out number of VMID bits in HW */
old = csr_read(CSR_HGATP);
- csr_write(CSR_HGATP, old | HGATP_VMID);
+ csr_write(CSR_HGATP, (kvm_riscv_gstage_mode << HGATP_MODE_SHIFT) | HGATP_VMID);
vmid_bits = csr_read(CSR_HGATP);
vmid_bits = (vmid_bits & HGATP_VMID) >> HGATP_VMID_SHIFT;
vmid_bits = fls_long(vmid_bits);
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 9883a55d61b5..f1efa4d6b27f 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -765,6 +765,39 @@ static int emit_atomic_rmw(u8 rd, u8 rs, const struct bpf_insn *insn,
return 0;
}
+/*
+ * Sign-extend the register if necessary
+ */
+static int sign_extend(u8 rd, u8 rs, u8 sz, bool sign, struct rv_jit_context *ctx)
+{
+ if (!sign && (sz == 1 || sz == 2)) {
+ if (rd != rs)
+ emit_mv(rd, rs, ctx);
+ return 0;
+ }
+
+ switch (sz) {
+ case 1:
+ emit_sextb(rd, rs, ctx);
+ break;
+ case 2:
+ emit_sexth(rd, rs, ctx);
+ break;
+ case 4:
+ emit_sextw(rd, rs, ctx);
+ break;
+ case 8:
+ if (rd != rs)
+ emit_mv(rd, rs, ctx);
+ break;
+ default:
+ pr_err("bpf-jit: invalid size %d for sign_extend\n", sz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
#define REG_DONT_CLEAR_MARKER 0 /* RV_REG_ZERO unused in pt_regmap */
@@ -1226,8 +1259,15 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx);
if (save_ret) {
- emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
emit_ld(regmap[BPF_REG_0], -(retval_off - 8), RV_REG_FP, ctx);
+ if (is_struct_ops) {
+ ret = sign_extend(RV_REG_A0, regmap[BPF_REG_0], m->ret_size,
+ m->ret_flags & BTF_FMODEL_SIGNED_ARG, ctx);
+ if (ret)
+ goto out;
+ } else {
+ emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
+ }
}
emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index f55f09cda6f8..b107dbca4ed7 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -106,7 +106,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long new_stackp = args->stack;
unsigned long tls = args->tls;
struct fake_frame
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 46569b8e47dd..1594c80e9bc4 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -509,33 +509,27 @@ int topology_cpu_init(struct cpu *cpu)
return rc;
}
-static const struct cpumask *cpu_thread_mask(int cpu)
-{
- return &cpu_topology[cpu].thread_mask;
-}
-
-
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_mask;
}
-static const struct cpumask *cpu_book_mask(int cpu)
+static const struct cpumask *tl_book_mask(struct sched_domain_topology_level *tl, int cpu)
{
return &cpu_topology[cpu].book_mask;
}
-static const struct cpumask *cpu_drawer_mask(int cpu)
+static const struct cpumask *tl_drawer_mask(struct sched_domain_topology_level *tl, int cpu)
{
return &cpu_topology[cpu].drawer_mask;
}
static struct sched_domain_topology_level s390_topology[] = {
- SDTL_INIT(cpu_thread_mask, cpu_smt_flags, SMT),
- SDTL_INIT(cpu_coregroup_mask, cpu_core_flags, MC),
- SDTL_INIT(cpu_book_mask, NULL, BOOK),
- SDTL_INIT(cpu_drawer_mask, NULL, DRAWER),
- SDTL_INIT(cpu_cpu_mask, NULL, PKG),
+ SDTL_INIT(tl_smt_mask, cpu_smt_flags, SMT),
+ SDTL_INIT(tl_mc_mask, cpu_core_flags, MC),
+ SDTL_INIT(tl_book_mask, NULL, BOOK),
+ SDTL_INIT(tl_drawer_mask, NULL, DRAWER),
+ SDTL_INIT(tl_pkg_mask, NULL, PKG),
{ NULL, },
};
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index bb17efe29d65..b2b8eb62b82e 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1790,20 +1790,21 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
REG_SET_SEEN(BPF_REG_5);
jit->seen |= SEEN_FUNC;
+
/*
* Copy the tail call counter to where the callee expects it.
- *
- * Note 1: The callee can increment the tail call counter, but
- * we do not load it back, since the x86 JIT does not do this
- * either.
- *
- * Note 2: We assume that the verifier does not let us call the
- * main program, which clears the tail call counter on entry.
*/
- /* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */
- _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
- 0xf000 | (jit->frame_off +
- offsetof(struct prog_frame, tail_call_cnt)));
+
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ /*
+ * mvc tail_call_cnt(4,%r15),
+ * frame_off+tail_call_cnt(%r15)
+ */
+ _EMIT6(0xd203f000 | offsetof(struct prog_frame,
+ tail_call_cnt),
+ 0xf000 | (jit->frame_off +
+ offsetof(struct prog_frame,
+ tail_call_cnt)));
/* Sign-extend the kfunc arguments. */
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
@@ -1825,6 +1826,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
call_r1(jit);
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
+
+ /*
+ * Copy the potentially updated tail call counter back.
+ */
+
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ /*
+ * mvc frame_off+tail_call_cnt(%r15),
+ * tail_call_cnt(4,%r15)
+ */
+ _EMIT6(0xd203f000 | (jit->frame_off +
+ offsetof(struct prog_frame,
+ tail_call_cnt)),
+ 0xf000 | offsetof(struct prog_frame,
+ tail_call_cnt));
+
break;
}
case BPF_JMP | BPF_TAIL_CALL: {
@@ -2822,6 +2839,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
/* stg %r2,retval_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
tjit->retval_off);
+ /* mvc tccnt_off(%r15),tail_call_cnt(4,%r15) */
+ _EMIT6(0xd203f000 | tjit->tccnt_off,
+ 0xf000 | offsetof(struct prog_frame, tail_call_cnt));
im->ip_after_call = jit->prg_buf + jit->prg;
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 92b6649d4929..62f753a85b89 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -89,7 +89,7 @@ asmlinkage void ret_from_kernel_thread(void);
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 9c7c662cb565..5a28c0e91bf1 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -260,7 +260,7 @@ extern void ret_from_kernel_thread(void);
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long sp = args->stack;
unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 529adfecd58c..25781923788a 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -567,7 +567,7 @@ void fault_in_user_windows(struct pt_regs *regs)
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long sp = args->stack;
unsigned long tls = args->tls;
struct thread_info *t = task_thread_info(p);
diff --git a/arch/sparc/lib/M7memcpy.S b/arch/sparc/lib/M7memcpy.S
index cbd42ea7c3f7..99357bfa8e82 100644
--- a/arch/sparc/lib/M7memcpy.S
+++ b/arch/sparc/lib/M7memcpy.S
@@ -696,16 +696,16 @@ FUNC_NAME:
EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40)
faligndata %f24, %f26, %f10
EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40)
- EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40)
+ EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_32)
faligndata %f26, %f28, %f12
- EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40)
+ EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_32)
add %o4, 64, %o4
- EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40)
+ EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_24)
faligndata %f28, %f30, %f14
- EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40)
- EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40)
+ EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_24)
+ EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_16)
add %o0, 64, %o0
- EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40)
+ EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_8)
fsrc2 %f30, %f14
bgu,pt %xcc, .Lunalign_sloop
prefetch [%o4 + (8 * BLOCK_SIZE)], 20
@@ -728,7 +728,7 @@ FUNC_NAME:
add %o4, 8, %o4
faligndata %f0, %f2, %f16
subcc %o5, 8, %o5
- EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5)
+ EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_8)
fsrc2 %f2, %f0
bgu,pt %xcc, .Lunalign_by8
add %o0, 8, %o0
@@ -772,7 +772,7 @@ FUNC_NAME:
subcc %o5, 0x20, %o5
EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32)
EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24)
- EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24)
+ EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_16)
EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8)
bne,pt %xcc, 1b
add %o0, 0x20, %o0
@@ -804,12 +804,12 @@ FUNC_NAME:
brz,pt %o3, 2f
sub %o2, %o3, %o2
-1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1)
+1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_o3)
add %o1, 1, %o1
subcc %o3, 1, %o3
add %o0, 1, %o0
bne,pt %xcc, 1b
- EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1)
+ EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_o3_plus_1)
2:
and %o1, 0x7, %o3
brz,pn %o3, .Lmedium_noprefetch_cp
diff --git a/arch/sparc/lib/Memcpy_utils.S b/arch/sparc/lib/Memcpy_utils.S
index 64fbac28b3db..207343367bb2 100644
--- a/arch/sparc/lib/Memcpy_utils.S
+++ b/arch/sparc/lib/Memcpy_utils.S
@@ -137,6 +137,15 @@ ENTRY(memcpy_retl_o2_plus_63_8)
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_63_8)
+ENTRY(memcpy_retl_o2_plus_o3)
+ ba,pt %xcc, __restore_asi
+ add %o2, %o3, %o0
+ENDPROC(memcpy_retl_o2_plus_o3)
+ENTRY(memcpy_retl_o2_plus_o3_plus_1)
+ add %o3, 1, %o3
+ ba,pt %xcc, __restore_asi
+ add %o2, %o3, %o0
+ENDPROC(memcpy_retl_o2_plus_o3_plus_1)
ENTRY(memcpy_retl_o2_plus_o5)
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 7ad58ebe0d00..df0ec1bd1948 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -281,7 +281,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
subcc %o5, 0x20, %o5
EX_ST(STORE(stx, %g1, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32)
EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24)
- EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24)
+ EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_16)
EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8)
bne,pt %icc, 1b
add %o0, 0x20, %o0
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index ee51c1230689..bbd3ea0a6482 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -79,8 +79,8 @@
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
__restore_asi:
- ret
wr %g0, ASI_AIUS, %asi
+ ret
restore
ENTRY(NG_ret_i2_plus_i4_plus_1)
ba,pt %xcc, __restore_asi
@@ -125,15 +125,16 @@ ENTRY(NG_ret_i2_plus_g1_minus_56)
ba,pt %xcc, __restore_asi
add %i2, %g1, %i0
ENDPROC(NG_ret_i2_plus_g1_minus_56)
-ENTRY(NG_ret_i2_plus_i4)
+ENTRY(NG_ret_i2_plus_i4_plus_16)
+ add %i4, 16, %i4
ba,pt %xcc, __restore_asi
add %i2, %i4, %i0
-ENDPROC(NG_ret_i2_plus_i4)
-ENTRY(NG_ret_i2_plus_i4_minus_8)
- sub %i4, 8, %i4
+ENDPROC(NG_ret_i2_plus_i4_plus_16)
+ENTRY(NG_ret_i2_plus_i4_plus_8)
+ add %i4, 8, %i4
ba,pt %xcc, __restore_asi
add %i2, %i4, %i0
-ENDPROC(NG_ret_i2_plus_i4_minus_8)
+ENDPROC(NG_ret_i2_plus_i4_plus_8)
ENTRY(NG_ret_i2_plus_8)
ba,pt %xcc, __restore_asi
add %i2, 8, %i0
@@ -160,6 +161,12 @@ ENTRY(NG_ret_i2_and_7_plus_i4)
ba,pt %xcc, __restore_asi
add %i2, %i4, %i0
ENDPROC(NG_ret_i2_and_7_plus_i4)
+ENTRY(NG_ret_i2_and_7_plus_i4_plus_8)
+ and %i2, 7, %i2
+ add %i4, 8, %i4
+ ba,pt %xcc, __restore_asi
+ add %i2, %i4, %i0
+ENDPROC(NG_ret_i2_and_7_plus_i4)
#endif
.align 64
@@ -405,13 +412,13 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
andn %i2, 0xf, %i4
and %i2, 0xf, %i2
1: subcc %i4, 0x10, %i4
- EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4)
+ EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4_plus_16)
add %i1, 0x08, %i1
- EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4)
+ EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4_plus_16)
sub %i1, 0x08, %i1
- EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4)
+ EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4_plus_16)
add %i1, 0x8, %i1
- EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8)
+ EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_plus_8)
bgu,pt %XCC, 1b
add %i1, 0x8, %i1
73: andcc %i2, 0x8, %g0
@@ -468,7 +475,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
subcc %i4, 0x8, %i4
srlx %g3, %i3, %i5
or %i5, %g2, %i5
- EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4)
+ EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index 635398ec7540..154fbd35400c 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -164,17 +164,18 @@ ENTRY(U1_gs_40_fp)
retl
add %o0, %o2, %o0
ENDPROC(U1_gs_40_fp)
-ENTRY(U1_g3_0_fp)
- VISExitHalf
- retl
- add %g3, %o2, %o0
-ENDPROC(U1_g3_0_fp)
ENTRY(U1_g3_8_fp)
VISExitHalf
add %g3, 8, %g3
retl
add %g3, %o2, %o0
ENDPROC(U1_g3_8_fp)
+ENTRY(U1_g3_16_fp)
+ VISExitHalf
+ add %g3, 16, %g3
+ retl
+ add %g3, %o2, %o0
+ENDPROC(U1_g3_16_fp)
ENTRY(U1_o2_0_fp)
VISExitHalf
retl
@@ -547,18 +548,18 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
62: FINISH_VISCHUNK(o0, f44, f46)
63: UNEVEN_VISCHUNK_LAST(o0, f46, f0)
-93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
+93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_8_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f0, %f2, %f8
- EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
+ EX_ST_FP(STORE(std, %f8, %o0), U1_g3_16_fp)
bl,pn %xcc, 95f
add %o0, 8, %o0
- EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
+ EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_8_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f2, %f0, %f8
- EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
+ EX_ST_FP(STORE(std, %f8, %o0), U1_g3_16_fp)
bge,pt %xcc, 93b
add %o0, 8, %o0
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 9248d59c734c..bace3a18f836 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -267,6 +267,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
faligndata %f10, %f12, %f26
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
+ and %o2, 0x3f, %o2
subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
add %o1, 0x40, %o1
bgu,pt %XCC, 1f
@@ -336,7 +337,6 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
* Also notice how this code is careful not to perform a
* load past the end of the src buffer.
*/
- and %o2, 0x3f, %o2
andcc %o2, 0x38, %g2
be,pn %XCC, 2f
subcc %g2, 0x8, %g2
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 1be644de9e41..9c9c66dc45f0 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -143,7 +143,7 @@ static void fork_handler(void)
int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long sp = args->stack;
unsigned long tls = args->tls;
void (*handler)(void);
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 61da6b8a3d51..cbac54cb3a9e 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -643,4 +643,4 @@ static __init int bts_init(void)
return perf_pmu_register(&bts_pmu, "intel_bts", -1);
}
-arch_initcall(bts_init);
+early_initcall(bts_init);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index c2fb729c270e..15da60cf69f2 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2997,7 +2997,8 @@ static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc)
if (event->group_leader != leader->group_leader)
break;
for_each_set_bit(idx, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) {
- if (WARN_ON_ONCE(i + idx > cpuc->n_events))
+ if (i + idx >= cpuc->n_events ||
+ !is_acr_event_group(cpuc->event_list[i + idx]))
return;
__set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1);
}
diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h
index c060549c6c94..89004f4ca208 100644
--- a/arch/x86/include/asm/fpu/sched.h
+++ b/arch/x86/include/asm/fpu/sched.h
@@ -11,7 +11,7 @@
extern void save_fpregs_to_fpstate(struct fpu *fpu);
extern void fpu__drop(struct task_struct *tsk);
-extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal,
+extern int fpu_clone(struct task_struct *dst, u64 clone_flags, bool minimal,
unsigned long shstk_addr);
extern void fpu_flush_thread(void);
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 77d8f49b92bd..f59ae7186940 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -244,7 +244,7 @@ static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node)
static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
{
- unsigned int p;
+ unsigned long p;
/*
* Load CPU and node number from the GDT. LSL is faster than RDTSCP
@@ -254,10 +254,10 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
*
* If RDPID is available, use it.
*/
- alternative_io ("lsl %[seg],%[p]",
- ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
+ alternative_io ("lsl %[seg],%k[p]",
+ "rdpid %[p]",
X86_FEATURE_RDPID,
- [p] "=a" (p), [seg] "r" (__CPUNODE_SEG));
+ [p] "=r" (p), [seg] "r" (__CPUNODE_SEG));
if (cpu)
*cpu = (p & VDSO_CPUNODE_MASK);
diff --git a/arch/x86/include/asm/shstk.h b/arch/x86/include/asm/shstk.h
index ba6f2fe43848..0f50e0125943 100644
--- a/arch/x86/include/asm/shstk.h
+++ b/arch/x86/include/asm/shstk.h
@@ -16,7 +16,7 @@ struct thread_shstk {
long shstk_prctl(struct task_struct *task, int option, unsigned long arg2);
void reset_thread_features(void);
-unsigned long shstk_alloc_thread_stack(struct task_struct *p, unsigned long clone_flags,
+unsigned long shstk_alloc_thread_stack(struct task_struct *p, u64 clone_flags,
unsigned long stack_size);
void shstk_free(struct task_struct *p);
int setup_signal_shadow_stack(struct ksignal *ksig);
@@ -28,7 +28,7 @@ static inline long shstk_prctl(struct task_struct *task, int option,
unsigned long arg2) { return -EINVAL; }
static inline void reset_thread_features(void) {}
static inline unsigned long shstk_alloc_thread_stack(struct task_struct *p,
- unsigned long clone_flags,
+ u64 clone_flags,
unsigned long stack_size) { return 0; }
static inline void shstk_free(struct task_struct *p) {}
static inline int setup_signal_shadow_stack(struct ksignal *ksig) { return 0; }
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index aefd412a23dc..1f71cc135e9a 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -631,7 +631,7 @@ static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp)
}
/* Clone current's FPU state on fork */
-int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal,
+int fpu_clone(struct task_struct *dst, u64 clone_flags, bool minimal,
unsigned long ssp)
{
/*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 1b7960cf6eb0..e3a3987b0c4f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -159,7 +159,7 @@ __visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long sp = args->stack;
unsigned long tls = args->tls;
struct inactive_task_frame *frame;
diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
index 2ddf23387c7e..5eba6c5a6775 100644
--- a/arch/x86/kernel/shstk.c
+++ b/arch/x86/kernel/shstk.c
@@ -191,7 +191,7 @@ void reset_thread_features(void)
current->thread.features_locked = 0;
}
-unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long clone_flags,
+unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, u64 clone_flags,
unsigned long stack_size)
{
struct thread_shstk *shstk = &tsk->thread.shstk;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 33e166f6ab12..eb289abece23 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -479,14 +479,14 @@ static int x86_cluster_flags(void)
static bool x86_has_numa_in_package;
static struct sched_domain_topology_level x86_topology[] = {
- SDTL_INIT(cpu_smt_mask, cpu_smt_flags, SMT),
+ SDTL_INIT(tl_smt_mask, cpu_smt_flags, SMT),
#ifdef CONFIG_SCHED_CLUSTER
- SDTL_INIT(cpu_clustergroup_mask, x86_cluster_flags, CLS),
+ SDTL_INIT(tl_cls_mask, x86_cluster_flags, CLS),
#endif
#ifdef CONFIG_SCHED_MC
- SDTL_INIT(cpu_coregroup_mask, x86_core_flags, MC),
+ SDTL_INIT(tl_mc_mask, x86_core_flags, MC),
#endif
- SDTL_INIT(cpu_cpu_mask, x86_sched_itmt_flags, PKG),
+ SDTL_INIT(tl_pkg_mask, x86_sched_itmt_flags, PKG),
{ NULL },
};
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 1bfebe40854f..c813d6cce69f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4180,13 +4180,21 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb_control_area *control = &svm->vmcb->control;
+
+ /*
+ * Next RIP must be provided as IRQs are disabled, and accessing guest
+ * memory to decode the instruction might fault, i.e. might sleep.
+ */
+ if (!nrips || !control->next_rip)
+ return EXIT_FASTPATH_NONE;
if (is_guest_mode(vcpu))
return EXIT_FASTPATH_NONE;
- switch (svm->vmcb->control.exit_code) {
+ switch (control->exit_code) {
case SVM_EXIT_MSR:
- if (!svm->vmcb->control.exit_info_1)
+ if (!control->exit_info_1)
break;
return handle_fastpath_set_msr_irqoff(vcpu);
case SVM_EXIT_HLT:
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 7bd66677f7b6..94d43f44be13 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -267,7 +267,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long usp_thread_fn = args->stack;
unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 50e51047e1fe..4a8d3d96bfe4 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -7109,9 +7109,10 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
* See the comments on bfq_limit_depth for the purpose of
* the depths set in the function. Return minimum shallow depth we'll use.
*/
-static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
+static void bfq_depth_updated(struct request_queue *q)
{
- unsigned int nr_requests = bfqd->queue->nr_requests;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ unsigned int nr_requests = q->nr_requests;
/*
* In-word depths if no bfq_queue is being weight-raised:
@@ -7143,21 +7144,8 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
bfqd->async_depths[1][0] = max((nr_requests * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
bfqd->async_depths[1][1] = max((nr_requests * 6) >> 4, 1U);
-}
-
-static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
-{
- struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
- struct blk_mq_tags *tags = hctx->sched_tags;
- bfq_update_depths(bfqd, &tags->bitmap_tags);
- sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
-}
-
-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
-{
- bfq_depth_updated(hctx);
- return 0;
+ blk_mq_set_min_shallow_depth(q, 1);
}
static void bfq_exit_queue(struct elevator_queue *e)
@@ -7369,6 +7357,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
goto out_free;
bfq_init_root_group(bfqd->root_group, bfqd);
bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
+ bfq_depth_updated(q);
/* We dispatch from request queue wide instead of hw queue */
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
@@ -7628,7 +7617,6 @@ static struct elevator_type iosched_bfq_mq = {
.request_merged = bfq_request_merged,
.has_work = bfq_has_work,
.depth_updated = bfq_depth_updated,
- .init_hctx = bfq_init_hctx,
.init_sched = bfq_init_queue,
.exit_sched = bfq_exit_queue,
},
diff --git a/block/bio.c b/block/bio.c
index 3b371a5da159..1904683f7ab0 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -261,7 +261,7 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
bio->bi_private = NULL;
#ifdef CONFIG_BLK_CGROUP
bio->bi_blkg = NULL;
- bio->bi_issue.value = 0;
+ bio->issue_time_ns = 0;
if (bdev)
bio_associate_blkg(bio);
#ifdef CONFIG_BLK_CGROUP_IOCOST
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index fe9ebd6a2e14..7246fc256315 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -110,12 +110,6 @@ static struct cgroup_subsys_state *blkcg_css(void)
return task_css(current, io_cgrp_id);
}
-static bool blkcg_policy_enabled(struct request_queue *q,
- const struct blkcg_policy *pol)
-{
- return pol && test_bit(pol->plid, q->blkcg_pols);
-}
-
static void blkg_free_workfn(struct work_struct *work)
{
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 81868ad86330..1cce3294634d 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -370,11 +370,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q)))
-static inline void blkcg_bio_issue_init(struct bio *bio)
-{
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-}
-
static inline void blkcg_use_delay(struct blkcg_gq *blkg)
{
if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
@@ -459,6 +454,12 @@ static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
}
+static inline bool blkcg_policy_enabled(struct request_queue *q,
+ const struct blkcg_policy *pol)
+{
+ return pol && test_bit(pol->plid, q->blkcg_pols);
+}
+
void blk_cgroup_bio_start(struct bio *bio);
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
#else /* CONFIG_BLK_CGROUP */
@@ -491,7 +492,6 @@ static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
-static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline void blk_cgroup_bio_start(struct bio *bio) { }
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
diff --git a/block/blk-core.c b/block/blk-core.c
index a27185cd8ede..14ae73eebe0d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -539,7 +539,7 @@ static inline void bio_check_ro(struct bio *bio)
}
}
-static noinline int should_fail_bio(struct bio *bio)
+int should_fail_bio(struct bio *bio)
{
if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
return -EIO;
@@ -727,10 +727,9 @@ static void __submit_bio_noacct_mq(struct bio *bio)
current->bio_list = NULL;
}
-void submit_bio_noacct_nocheck(struct bio *bio)
+void submit_bio_noacct_nocheck(struct bio *bio, bool split)
{
blk_cgroup_bio_start(bio);
- blkcg_bio_issue_init(bio);
if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_queue(bio);
@@ -747,12 +746,16 @@ void submit_bio_noacct_nocheck(struct bio *bio)
* to collect a list of requests submited by a ->submit_bio method while
* it is active, and then process them after it returned.
*/
- if (current->bio_list)
- bio_list_add(¤t->bio_list[0], bio);
- else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
+ if (current->bio_list) {
+ if (split)
+ bio_list_add_head(¤t->bio_list[0], bio);
+ else
+ bio_list_add(¤t->bio_list[0], bio);
+ } else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
__submit_bio_noacct_mq(bio);
- else
+ } else {
__submit_bio_noacct(bio);
+ }
}
static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q,
@@ -873,7 +876,7 @@ void submit_bio_noacct(struct bio *bio)
if (blk_throtl_bio(bio))
return;
- submit_bio_noacct_nocheck(bio);
+ submit_bio_noacct_nocheck(bio, false);
return;
not_supported:
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 2f8fdecdd7a9..554b191a6892 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -485,19 +485,11 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
mod_timer(&blkiolat->timer, jiffies + HZ);
}
-static void iolatency_record_time(struct iolatency_grp *iolat,
- struct bio_issue *issue, u64 now,
- bool issue_as_root)
+static void iolatency_record_time(struct iolatency_grp *iolat, u64 start,
+ u64 now, bool issue_as_root)
{
- u64 start = bio_issue_time(issue);
u64 req_time;
- /*
- * Have to do this so we are truncated to the correct time that our
- * issue is truncated to.
- */
- now = __bio_issue_time(now);
-
if (now <= start)
return;
@@ -625,7 +617,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
* submitted, so do not account for it.
*/
if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
- iolatency_record_time(iolat, &bio->bi_issue, now,
+ iolatency_record_time(iolat, bio->issue_time_ns, now,
issue_as_root);
window_start = atomic64_read(&iolat->window_start);
if (now > window_start &&
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 70d704615be5..77488f11a944 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -104,34 +104,58 @@ static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
}
+/*
+ * bio_submit_split_bioset - Submit a bio, splitting it at a designated sector
+ * @bio: the original bio to be submitted and split
+ * @split_sectors: the sector count at which to split
+ * @bs: the bio set used for allocating the new split bio
+ *
+ * The original bio is modified to contain the remaining sectors and submitted.
+ * The caller is responsible for submitting the returned bio.
+ *
+ * If succeed, the newly allocated bio representing the initial part will be
+ * returned, on failure NULL will be returned and original bio will fail.
+ */
+struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
+ struct bio_set *bs)
+{
+ struct bio *split = bio_split(bio, split_sectors, GFP_NOIO, bs);
+
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return NULL;
+ }
+
+ bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
+ WARN_ON_ONCE(bio_zone_write_plugging(bio));
+
+ if (should_fail_bio(bio))
+ bio_io_error(bio);
+ else if (!blk_throtl_bio(bio))
+ submit_bio_noacct_nocheck(bio, true);
+
+ return split;
+}
+EXPORT_SYMBOL_GPL(bio_submit_split_bioset);
+
static struct bio *bio_submit_split(struct bio *bio, int split_sectors)
{
- if (unlikely(split_sectors < 0))
- goto error;
+ if (unlikely(split_sectors < 0)) {
+ bio->bi_status = errno_to_blk_status(split_sectors);
+ bio_endio(bio);
+ return NULL;
+ }
if (split_sectors) {
- struct bio *split;
-
- split = bio_split(bio, split_sectors, GFP_NOIO,
+ bio = bio_submit_split_bioset(bio, split_sectors,
&bio->bi_bdev->bd_disk->bio_split);
- if (IS_ERR(split)) {
- split_sectors = PTR_ERR(split);
- goto error;
- }
- split->bi_opf |= REQ_NOMERGE;
- blkcg_bio_issue_init(split);
- bio_chain(split, bio);
- trace_block_split(split, bio->bi_iter.bi_sector);
- WARN_ON_ONCE(bio_zone_write_plugging(bio));
- submit_bio_noacct(bio);
- return split;
+ if (bio)
+ bio->bi_opf |= REQ_NOMERGE;
}
return bio;
-error:
- bio->bi_status = errno_to_blk_status(split_sectors);
- bio_endio(bio);
- return NULL;
}
struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index e2ce4a28e6c9..d06bb137a743 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -454,7 +454,7 @@ void blk_mq_free_sched_tags_batch(struct xarray *et_table,
}
struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
- unsigned int nr_hw_queues)
+ unsigned int nr_hw_queues, unsigned int nr_requests)
{
unsigned int nr_tags;
int i;
@@ -470,13 +470,8 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
nr_tags * sizeof(struct blk_mq_tags *), gfp);
if (!et)
return NULL;
- /*
- * Default to double of smaller one between hw queue_depth and
- * 128, since we don't split into sync/async like the old code
- * did. Additionally, this is a per-hw queue depth.
- */
- et->nr_requests = 2 * min_t(unsigned int, set->queue_depth,
- BLKDEV_DEFAULT_RQ);
+
+ et->nr_requests = nr_requests;
et->nr_hw_queues = nr_hw_queues;
if (blk_mq_is_shared_tags(set->flags)) {
@@ -521,7 +516,8 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
* concurrently.
*/
if (q->elevator) {
- et = blk_mq_alloc_sched_tags(set, nr_hw_queues);
+ et = blk_mq_alloc_sched_tags(set, nr_hw_queues,
+ blk_mq_default_nr_requests(set));
if (!et)
goto out_unwind;
if (xa_insert(et_table, q->id, et, gfp))
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index b554e1d55950..8e21a6b1415d 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -24,7 +24,7 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
void blk_mq_sched_free_rqs(struct request_queue *q);
struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
- unsigned int nr_hw_queues);
+ unsigned int nr_hw_queues, unsigned int nr_requests);
int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
void blk_mq_free_sched_tags(struct elevator_tags *et,
@@ -92,4 +92,15 @@ static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
+static inline void blk_mq_set_min_shallow_depth(struct request_queue *q,
+ unsigned int depth)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags,
+ depth);
+}
+
#endif
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 24656980f443..5c399ac562ea 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -150,9 +150,11 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
return;
hctx_for_each_ctx(hctx, ctx, i)
- kobject_del(&ctx->kobj);
+ if (ctx->kobj.state_in_sysfs)
+ kobject_del(&ctx->kobj);
- kobject_del(&hctx->kobj);
+ if (hctx->kobj.state_in_sysfs)
+ kobject_del(&hctx->kobj);
}
static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 5cffa5668d0c..aed84c5d5c2b 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -584,14 +584,10 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
}
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_tags **tagsptr, unsigned int tdepth,
- bool can_grow)
+ struct blk_mq_tags **tagsptr, unsigned int tdepth)
{
struct blk_mq_tags *tags = *tagsptr;
- if (tdepth <= tags->nr_reserved_tags)
- return -EINVAL;
-
/*
* If we are allowed to grow beyond the original size, allocate
* a new set of tags before freeing the old one.
@@ -600,23 +596,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tag_set *set = hctx->queue->tag_set;
struct blk_mq_tags *new;
- if (!can_grow)
- return -EINVAL;
-
- /*
- * We need some sort of upper limit, set it high enough that
- * no valid use cases should require more.
- */
- if (tdepth > MAX_SCHED_RQ)
- return -EINVAL;
-
- /*
- * Only the sbitmap needs resizing since we allocated the max
- * initially.
- */
- if (blk_mq_is_shared_tags(set->flags))
- return 0;
-
new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
if (!new)
return -ENOMEM;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ba3a4b77f578..f8a8a23b9040 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -396,6 +396,13 @@ static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
#endif
}
+static inline void blk_mq_bio_issue_init(struct bio *bio)
+{
+#ifdef CONFIG_BLK_CGROUP
+ bio->issue_time_ns = blk_time_get_ns();
+#endif
+}
+
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
struct blk_mq_tags *tags, unsigned int tag)
{
@@ -3168,6 +3175,7 @@ void blk_mq_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio))
goto queue_exit;
+ blk_mq_bio_issue_init(bio);
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
goto queue_exit;
@@ -4917,57 +4925,59 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
}
EXPORT_SYMBOL(blk_mq_free_tag_set);
-int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
+struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
+ struct elevator_tags *et,
+ unsigned int nr)
{
struct blk_mq_tag_set *set = q->tag_set;
+ struct elevator_tags *old_et = NULL;
struct blk_mq_hw_ctx *hctx;
- int ret;
unsigned long i;
- if (WARN_ON_ONCE(!q->mq_freeze_depth))
- return -EINVAL;
-
- if (!set)
- return -EINVAL;
-
- if (q->nr_requests == nr)
- return 0;
-
blk_mq_quiesce_queue(q);
- ret = 0;
- queue_for_each_hw_ctx(q, hctx, i) {
- if (!hctx->tags)
- continue;
+ if (blk_mq_is_shared_tags(set->flags)) {
/*
- * If we're using an MQ scheduler, just update the scheduler
- * queue depth. This is similar to what the old code would do.
+ * Shared tags, for sched tags, we allocate max initially hence
+ * tags can't grow, see blk_mq_alloc_sched_tags().
*/
- if (hctx->sched_tags) {
- ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
- nr, true);
- } else {
- ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
- false);
+ if (q->elevator)
+ blk_mq_tag_update_sched_shared_tags(q);
+ else
+ blk_mq_tag_resize_shared_tags(set, nr);
+ } else if (!q->elevator) {
+ /*
+ * Non-shared hardware tags, nr is already checked from
+ * queue_requests_store() and tags can't grow.
+ */
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (!hctx->tags)
+ continue;
+ sbitmap_queue_resize(&hctx->tags->bitmap_tags,
+ nr - hctx->tags->nr_reserved_tags);
}
- if (ret)
- break;
- if (q->elevator && q->elevator->type->ops.depth_updated)
- q->elevator->type->ops.depth_updated(hctx);
- }
- if (!ret) {
- q->nr_requests = nr;
- if (blk_mq_is_shared_tags(set->flags)) {
- if (q->elevator)
- blk_mq_tag_update_sched_shared_tags(q);
- else
- blk_mq_tag_resize_shared_tags(set, nr);
+ } else if (nr <= q->elevator->et->nr_requests) {
+ /* Non-shared sched tags, and tags don't grow. */
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (!hctx->sched_tags)
+ continue;
+ sbitmap_queue_resize(&hctx->sched_tags->bitmap_tags,
+ nr - hctx->sched_tags->nr_reserved_tags);
}
+ } else {
+ /* Non-shared sched tags, and tags grow */
+ queue_for_each_hw_ctx(q, hctx, i)
+ hctx->sched_tags = et->tags[i];
+ old_et = q->elevator->et;
+ q->elevator->et = et;
}
- blk_mq_unquiesce_queue(q);
+ q->nr_requests = nr;
+ if (q->elevator && q->elevator->type->ops.depth_updated)
+ q->elevator->type->ops.depth_updated(q);
- return ret;
+ blk_mq_unquiesce_queue(q);
+ return old_et;
}
/*
diff --git a/block/blk-mq.h b/block/blk-mq.h
index affb2e14b56e..6c9d03625ba1 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -6,6 +6,7 @@
#include "blk-stat.h"
struct blk_mq_tag_set;
+struct elevator_tags;
struct blk_mq_ctxs {
struct kobject kobj;
@@ -45,7 +46,9 @@ void blk_mq_submit_bio(struct bio *bio);
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
unsigned int flags);
void blk_mq_exit_queue(struct request_queue *q);
-int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
+ struct elevator_tags *tags,
+ unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
bool);
@@ -109,6 +112,17 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf,
return ctx->hctxs[blk_mq_get_hctx_type(opf)];
}
+/*
+ * Default to double of smaller one between hw queue_depth and
+ * 128, since we don't split into sync/async like the old code
+ * did. Additionally, this is a per-hw queue depth.
+ */
+static inline unsigned int blk_mq_default_nr_requests(
+ struct blk_mq_tag_set *set)
+{
+ return 2 * min_t(unsigned int, set->queue_depth, BLKDEV_DEFAULT_RQ);
+}
+
/*
* sysfs helpers
*/
@@ -171,7 +185,7 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
unsigned int tag);
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
+ struct blk_mq_tags **tags, unsigned int depth);
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
unsigned int size);
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d6438e6c276d..8fa52914e16b 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -56,6 +56,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
lim->max_hw_zone_append_sectors = UINT_MAX;
lim->max_user_discard_sectors = UINT_MAX;
+ lim->atomic_write_hw_max = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -232,6 +233,10 @@ static void blk_validate_atomic_write_limits(struct queue_limits *lim)
if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
goto unsupported;
+ /* UINT_MAX indicates stacked limits in initial state */
+ if (lim->atomic_write_hw_max == UINT_MAX)
+ goto unsupported;
+
if (!lim->atomic_write_hw_max)
goto unsupported;
@@ -643,18 +648,24 @@ static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
struct queue_limits *b)
{
+ unsigned int boundary_sectors;
+
+ if (!b->atomic_write_hw_boundary || !t->chunk_sectors)
+ return true;
+
+ boundary_sectors = b->atomic_write_hw_boundary >> SECTOR_SHIFT;
+
/*
* Ensure atomic write boundary is aligned with chunk sectors. Stacked
- * devices store chunk sectors in t->io_min.
+ * devices store any stripe size in t->chunk_sectors.
*/
- if (b->atomic_write_hw_boundary > t->io_min &&
- b->atomic_write_hw_boundary % t->io_min)
+ if (boundary_sectors > t->chunk_sectors &&
+ boundary_sectors % t->chunk_sectors)
return false;
- if (t->io_min > b->atomic_write_hw_boundary &&
- t->io_min % b->atomic_write_hw_boundary)
+ if (t->chunk_sectors > boundary_sectors &&
+ t->chunk_sectors % boundary_sectors)
return false;
- t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
return true;
}
@@ -695,13 +706,13 @@ static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
static bool blk_stack_atomic_writes_head(struct queue_limits *t,
struct queue_limits *b)
{
- if (b->atomic_write_hw_boundary &&
- !blk_stack_atomic_writes_boundary_head(t, b))
+ if (!blk_stack_atomic_writes_boundary_head(t, b))
return false;
t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
t->atomic_write_hw_max = b->atomic_write_hw_max;
+ t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
return true;
}
@@ -717,18 +728,14 @@ static void blk_stack_atomic_writes_limits(struct queue_limits *t,
if (!blk_atomic_write_start_sect_aligned(start, b))
goto unsupported;
- /*
- * If atomic_write_hw_max is set, we have already stacked 1x bottom
- * device, so check for compliance.
- */
- if (t->atomic_write_hw_max) {
+ /* UINT_MAX indicates no stacking of bottom devices yet */
+ if (t->atomic_write_hw_max == UINT_MAX) {
+ if (!blk_stack_atomic_writes_head(t, b))
+ goto unsupported;
+ } else {
if (!blk_stack_atomic_writes_tail(t, b))
goto unsupported;
- return;
}
-
- if (!blk_stack_atomic_writes_head(t, b))
- goto unsupported;
blk_stack_atomic_writes_chunk_sectors(t);
return;
@@ -763,7 +770,8 @@ static void blk_stack_atomic_writes_limits(struct queue_limits *t,
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t start)
{
- unsigned int top, bottom, alignment, ret = 0;
+ unsigned int top, bottom, alignment;
+ int ret = 0;
t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 4a7f1a349998..9b03261b3e04 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -64,10 +64,12 @@ static ssize_t queue_requests_show(struct gendisk *disk, char *page)
static ssize_t
queue_requests_store(struct gendisk *disk, const char *page, size_t count)
{
- unsigned long nr;
- int ret, err;
- unsigned int memflags;
struct request_queue *q = disk->queue;
+ struct blk_mq_tag_set *set = q->tag_set;
+ struct elevator_tags *et = NULL;
+ unsigned int memflags;
+ unsigned long nr;
+ int ret;
if (!queue_is_mq(q))
return -EINVAL;
@@ -76,16 +78,55 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count)
if (ret < 0)
return ret;
- memflags = blk_mq_freeze_queue(q);
- mutex_lock(&q->elevator_lock);
+ /*
+ * Serialize updating nr_requests with concurrent queue_requests_store()
+ * and switching elevator.
+ */
+ down_write(&set->update_nr_hwq_lock);
+
+ if (nr == q->nr_requests)
+ goto unlock;
+
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
- err = blk_mq_update_nr_requests(disk->queue, nr);
- if (err)
- ret = err;
+ /*
+ * Switching elevator is protected by update_nr_hwq_lock:
+ * - read lock is held from elevator sysfs attribute;
+ * - write lock is held from updating nr_hw_queues;
+ * Hence it's safe to access q->elevator here with write lock held.
+ */
+ if (nr <= set->reserved_tags ||
+ (q->elevator && nr > MAX_SCHED_RQ) ||
+ (!q->elevator && nr > set->queue_depth)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!blk_mq_is_shared_tags(set->flags) && q->elevator &&
+ nr > q->elevator->et->nr_requests) {
+ /*
+ * Tags will grow, allocate memory before freezing queue to
+ * prevent deadlock.
+ */
+ et = blk_mq_alloc_sched_tags(set, q->nr_hw_queues, nr);
+ if (!et) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ }
+
+ memflags = blk_mq_freeze_queue(q);
+ mutex_lock(&q->elevator_lock);
+ et = blk_mq_update_nr_requests(q, et, nr);
mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
+
+ if (et)
+ blk_mq_free_sched_tags(et, set);
+
+unlock:
+ up_write(&set->update_nr_hwq_lock);
return ret;
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 397b6a410f9e..2c5b64b1a724 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1224,7 +1224,7 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
if (!bio_list_empty(&bio_list_on_stack)) {
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bio_list_on_stack)))
- submit_bio_noacct_nocheck(bio);
+ submit_bio_noacct_nocheck(bio, false);
blk_finish_plug(&plug);
}
}
@@ -1327,17 +1327,13 @@ static int blk_throtl_init(struct gendisk *disk)
INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
throtl_service_queue_init(&td->service_queue);
- /*
- * Freeze queue before activating policy, to synchronize with IO path,
- * which is protected by 'q_usage_counter'.
- */
memflags = blk_mq_freeze_queue(disk->queue);
blk_mq_quiesce_queue(disk->queue);
q->td = td;
td->queue = q;
- /* activate policy */
+ /* activate policy, blk_throtl_activated() will return true */
ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
if (ret) {
q->td = NULL;
@@ -1846,12 +1842,15 @@ void blk_throtl_exit(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
- if (!blk_throtl_activated(q))
+ /*
+ * blkg_destroy_all() already deactivate throtl policy, just check and
+ * free throtl data.
+ */
+ if (!q->td)
return;
timer_delete_sync(&q->td->service_queue.pending_timer);
throtl_shutdown_wq(q);
- blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
kfree(q->td);
}
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index 3b27755bfbff..9d7a42c039a1 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -156,7 +156,13 @@ void blk_throtl_cancel_bios(struct gendisk *disk);
static inline bool blk_throtl_activated(struct request_queue *q)
{
- return q->td != NULL;
+ /*
+ * q->td guarantees that the blk-throttle module is already loaded,
+ * and the plid of blk-throttle is assigned.
+ * blkcg_policy_enabled() guarantees that the policy is activated
+ * in the request_queue.
+ */
+ return q->td != NULL && blkcg_policy_enabled(q, &blkcg_policy_throtl);
}
static inline bool blk_should_throtl(struct bio *bio)
@@ -164,11 +170,6 @@ static inline bool blk_should_throtl(struct bio *bio)
struct throtl_grp *tg;
int rw = bio_data_dir(bio);
- /*
- * This is called under bio_queue_enter(), and it's synchronized with
- * the activation of blk-throtl, which is protected by
- * blk_mq_freeze_queue().
- */
if (!blk_throtl_activated(bio->bi_bdev->bd_queue))
return false;
@@ -194,7 +195,10 @@ static inline bool blk_should_throtl(struct bio *bio)
static inline bool blk_throtl_bio(struct bio *bio)
{
-
+ /*
+ * block throttling takes effect if the policy is activated
+ * in the bio's request_queue.
+ */
if (!blk_should_throtl(bio))
return false;
diff --git a/block/blk.h b/block/blk.h
index 46f566f9b126..d9efc8693aa4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -54,7 +54,7 @@ bool blk_queue_start_drain(struct request_queue *q);
bool __blk_freeze_queue_start(struct request_queue *q,
struct task_struct *owner);
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
-void submit_bio_noacct_nocheck(struct bio *bio);
+void submit_bio_noacct_nocheck(struct bio *bio, bool split);
void bio_await_chain(struct bio *bio);
static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
@@ -615,6 +615,7 @@ extern const struct address_space_operations def_blk_aops;
int disk_register_independent_access_ranges(struct gendisk *disk);
void disk_unregister_independent_access_ranges(struct gendisk *disk);
+int should_fail_bio(struct bio *bio);
#ifdef CONFIG_FAIL_MAKE_REQUEST
bool should_fail_request(struct block_device *part, unsigned int bytes);
#else /* CONFIG_FAIL_MAKE_REQUEST */
@@ -680,48 +681,6 @@ static inline ktime_t blk_time_get(void)
return ns_to_ktime(blk_time_get_ns());
}
-/*
- * From most significant bit:
- * 1 bit: reserved for other usage, see below
- * 12 bits: original size of bio
- * 51 bits: issue time of bio
- */
-#define BIO_ISSUE_RES_BITS 1
-#define BIO_ISSUE_SIZE_BITS 12
-#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
-#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
-#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
-#define BIO_ISSUE_SIZE_MASK \
- (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
-#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
-
-/* Reserved bit for blk-throtl */
-#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
-
-static inline u64 __bio_issue_time(u64 time)
-{
- return time & BIO_ISSUE_TIME_MASK;
-}
-
-static inline u64 bio_issue_time(struct bio_issue *issue)
-{
- return __bio_issue_time(issue->value);
-}
-
-static inline sector_t bio_issue_size(struct bio_issue *issue)
-{
- return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
-}
-
-static inline void bio_issue_init(struct bio_issue *issue,
- sector_t size)
-{
- size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
- issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
- (blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
- ((u64)size << BIO_ISSUE_SIZE_SHIFT));
-}
-
void bdev_release(struct file *bdev_file);
int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops, struct file *bdev_file);
diff --git a/block/elevator.c b/block/elevator.c
index fe96c6f4753c..e2ebfbf107b3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -669,7 +669,8 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
lockdep_assert_held(&set->update_nr_hwq_lock);
if (strncmp(ctx->name, "none", 4)) {
- ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues);
+ ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues,
+ blk_mq_default_nr_requests(set));
if (!ctx->et)
return -ENOMEM;
}
diff --git a/block/elevator.h b/block/elevator.h
index adc5c157e17e..c4d20155065e 100644
--- a/block/elevator.h
+++ b/block/elevator.h
@@ -37,7 +37,7 @@ struct elevator_mq_ops {
void (*exit_sched)(struct elevator_queue *);
int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
- void (*depth_updated)(struct blk_mq_hw_ctx *);
+ void (*depth_updated)(struct request_queue *);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 70cbc7b2deb4..18efd6ef2a2b 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -399,6 +399,14 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
return ERR_PTR(ret);
}
+static void kyber_depth_updated(struct request_queue *q)
+{
+ struct kyber_queue_data *kqd = q->elevator->elevator_data;
+
+ kqd->async_depth = q->nr_requests * KYBER_ASYNC_PERCENT / 100U;
+ blk_mq_set_min_shallow_depth(q, kqd->async_depth);
+}
+
static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq)
{
struct kyber_queue_data *kqd;
@@ -413,6 +421,7 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq)
eq->elevator_data = kqd;
q->elevator = eq;
+ kyber_depth_updated(q);
return 0;
}
@@ -440,15 +449,6 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
INIT_LIST_HEAD(&kcq->rq_list[i]);
}
-static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
-{
- struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
- struct blk_mq_tags *tags = hctx->sched_tags;
-
- kqd->async_depth = hctx->queue->nr_requests * KYBER_ASYNC_PERCENT / 100U;
- sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
-}
-
static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
struct kyber_hctx_data *khd;
@@ -493,7 +493,6 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
khd->batching = 0;
hctx->sched_data = khd;
- kyber_depth_updated(hctx);
return 0;
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index b9b7cdf1d3c9..2e689b2c4021 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -507,22 +507,12 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
}
/* Called by blk_mq_update_nr_requests(). */
-static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
+static void dd_depth_updated(struct request_queue *q)
{
- struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data;
- struct blk_mq_tags *tags = hctx->sched_tags;
dd->async_depth = q->nr_requests;
-
- sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
-}
-
-/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
-static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
-{
- dd_depth_updated(hctx);
- return 0;
+ blk_mq_set_min_shallow_depth(q, 1);
}
static void dd_exit_sched(struct elevator_queue *e)
@@ -587,6 +577,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_queue *eq)
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
q->elevator = eq;
+ dd_depth_updated(q);
return 0;
}
@@ -1048,7 +1039,6 @@ static struct elevator_type mq_deadline = {
.has_work = dd_has_work,
.init_sched = dd_init_sched,
.exit_sched = dd_exit_sched,
- .init_hctx = dd_init_hctx,
},
#ifdef CONFIG_BLK_DEBUG_FS
diff --git a/crypto/842.c b/crypto/842.c
index 8c257c40e2b9..4007e87bed80 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -54,8 +54,10 @@ static int crypto842_sdecompress(struct crypto_scomp *tfm,
}
static struct scomp_alg scomp = {
- .alloc_ctx = crypto842_alloc_ctx,
- .free_ctx = crypto842_free_ctx,
+ .streams = {
+ .alloc_ctx = crypto842_alloc_ctx,
+ .free_ctx = crypto842_free_ctx,
+ },
.compress = crypto842_scompress,
.decompress = crypto842_sdecompress,
.base = {
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 2ffe4ae90bea..8df3fa60a44f 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -610,11 +610,14 @@ int x509_process_extension(void *context, size_t hdrlen,
/*
* Get hold of the basicConstraints
* v[1] is the encoding size
- * (Expect 0x2 or greater, making it 1 or more bytes)
+ * (Expect 0x00 for empty SEQUENCE with CA:FALSE, or
+ * 0x03 or greater for non-empty SEQUENCE)
* v[2] is the encoding type
* (Expect an ASN1_BOOL for the CA)
- * v[3] is the contents of the ASN1_BOOL
- * (Expect 1 if the CA is TRUE)
+ * v[3] is the length of the ASN1_BOOL
+ * (Expect 1 for a single byte boolean)
+ * v[4] is the contents of the ASN1_BOOL
+ * (Expect 0xFF if the CA is TRUE)
* vlen should match the entire extension size
*/
if (v[0] != (ASN1_CONS_BIT | ASN1_SEQ))
@@ -623,8 +626,13 @@ int x509_process_extension(void *context, size_t hdrlen,
return -EBADMSG;
if (v[1] != vlen - 2)
return -EBADMSG;
- if (vlen >= 4 && v[1] != 0 && v[2] == ASN1_BOOL && v[3] == 1)
+ /* Empty SEQUENCE means CA:FALSE (default value omitted per DER) */
+ if (v[1] == 0)
+ return 0;
+ if (vlen >= 5 && v[2] == ASN1_BOOL && v[3] == 1 && v[4] == 0xFF)
ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_CA;
+ else
+ return -EBADMSG;
return 0;
}
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 7a984ae5ae52..57b713516aef 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -68,8 +68,10 @@ static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
}
static struct scomp_alg scomp = {
- .alloc_ctx = lz4_alloc_ctx,
- .free_ctx = lz4_free_ctx,
+ .streams = {
+ .alloc_ctx = lz4_alloc_ctx,
+ .free_ctx = lz4_free_ctx,
+ },
.compress = lz4_scompress,
.decompress = lz4_sdecompress,
.base = {
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 9c61d05b6214..bb84f8a68cb5 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -66,8 +66,10 @@ static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
}
static struct scomp_alg scomp = {
- .alloc_ctx = lz4hc_alloc_ctx,
- .free_ctx = lz4hc_free_ctx,
+ .streams = {
+ .alloc_ctx = lz4hc_alloc_ctx,
+ .free_ctx = lz4hc_free_ctx,
+ },
.compress = lz4hc_scompress,
.decompress = lz4hc_sdecompress,
.base = {
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
index ba013f2d5090..794e7ec49536 100644
--- a/crypto/lzo-rle.c
+++ b/crypto/lzo-rle.c
@@ -70,8 +70,10 @@ static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src,
}
static struct scomp_alg scomp = {
- .alloc_ctx = lzorle_alloc_ctx,
- .free_ctx = lzorle_free_ctx,
+ .streams = {
+ .alloc_ctx = lzorle_alloc_ctx,
+ .free_ctx = lzorle_free_ctx,
+ },
.compress = lzorle_scompress,
.decompress = lzorle_sdecompress,
.base = {
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 7867e2c67c4e..d43242b24b4e 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -70,8 +70,10 @@ static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
}
static struct scomp_alg scomp = {
- .alloc_ctx = lzo_alloc_ctx,
- .free_ctx = lzo_free_ctx,
+ .streams = {
+ .alloc_ctx = lzo_alloc_ctx,
+ .free_ctx = lzo_free_ctx,
+ },
.compress = lzo_scompress,
.decompress = lzo_sdecompress,
.base = {
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 2cff5419bd2f..cda964ba33cd 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -192,7 +192,7 @@ aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
struct amdxdna_gem_obj *cmd_abo;
- u32 ret = 0;
+ int ret = 0;
u32 status;
cmd_abo = job->cmd_bo;
@@ -222,7 +222,7 @@ static int
aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
- u32 ret = 0;
+ int ret = 0;
u32 status;
if (unlikely(!data))
@@ -250,7 +250,7 @@ aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
u32 fail_cmd_status;
u32 fail_cmd_idx;
u32 cmd_status;
- u32 ret = 0;
+ int ret = 0;
cmd_abo = job->cmd_bo;
if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0c41f0097e8d..f98640086f4e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -1141,7 +1141,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91
#define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92
#define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93
/*****************************************************************************
*
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index ae035b93da08..3eb56b77cb6d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2637,7 +2637,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
if (ndr_desc->target_node == NUMA_NO_NODE) {
ndr_desc->target_node = phys_to_target_node(spa->address);
dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
- NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end);
}
/*
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2c2dc559e0f8..d0fc045a8d31 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1405,6 +1405,9 @@ int acpi_processor_power_init(struct acpi_processor *pr)
if (retval) {
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
+
+ per_cpu(acpi_cpuidle_device, pr->id) = NULL;
+ kfree(dev);
return retval;
}
acpi_processor_registered++;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 3399594136b2..67b01d579737 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -885,6 +885,10 @@ int register_one_node(int nid)
node_devices[nid] = node;
error = register_node(node_devices[nid], nid);
+ if (error) {
+ node_devices[nid] = NULL;
+ return error;
+ }
/* link cpu under this node */
for_each_present_cpu(cpu) {
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2ea6e05e6ec9..c883b01ffbdd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -724,8 +724,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
if (!dpm_wait_for_superior(dev, async))
goto Out;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 1f3f782a04ba..6883e1a43fe5 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -827,7 +827,7 @@ struct regmap *__regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
- if (config && config->read && config->write) {
+ if (config->read && config->write) {
map->reg_read = _regmap_bus_read;
if (config->reg_update_bits)
map->reg_update_bits = config->reg_update_bits;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6463d0e8d0ce..87b0b78249da 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1217,6 +1217,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (!sock)
return NULL;
+ if (!sk_is_tcp(sock->sk) &&
+ !sk_is_stream_unix(sock->sk)) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n");
+ *err = -EINVAL;
+ sockfd_put(sock);
+ return NULL;
+ }
+
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 91642c9a3b29..f982027e8c85 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -223,7 +223,7 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu
static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
-MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)");
static bool g_fua = true;
module_param_named(fua, g_fua, bool, 0444);
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 6e7bbbd35279..585de143ab25 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/unaligned.h>
+#include <linux/devcoredump.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -554,25 +555,6 @@ static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
}
-static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size)
-{
- struct sk_buff *skb;
- int err;
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- skb_put_data(skb, data, size);
- err = hci_devcd_append(hdev, skb);
- if (err) {
- bt_dev_err(hdev, "Failed to append data in the coredump");
- return err;
- }
-
- return 0;
-}
-
static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
{
u32 reg;
@@ -617,30 +599,35 @@ static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
}
-static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type,
- void *data, int size)
+static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
+ void *data, size_t size)
{
struct intel_tlv *tlv;
- tlv = skb_put(skb, sizeof(*tlv) + size);
+ tlv = dest;
tlv->type = type;
tlv->len = size;
memcpy(tlv->val, data, tlv->len);
+ return dest + sizeof(*tlv) + size;
}
static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
{
- u32 offset, prev_size, wr_ptr_status, dump_size, i;
+ u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
struct btintel_pcie_dbgc *dbgc = &data->dbgc;
- u8 buf_idx, dump_time_len, fw_build;
struct hci_dev *hdev = data->hdev;
+ u8 *pdata, *p, buf_idx;
struct intel_tlv *tlv;
struct timespec64 now;
- struct sk_buff *skb;
struct tm tm_now;
- char buf[256];
- u16 hdr_len;
- int ret;
+ char fw_build[128];
+ char ts[128];
+ char vendor[64];
+ char driver[64];
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ return -EOPNOTSUPP;
+
wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
@@ -657,88 +644,84 @@ static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
else
return -EINVAL;
+ snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
+ snprintf(driver, sizeof(driver), "Driver: %s\n",
+ data->dmp_hdr.driver_name);
+
ktime_get_real_ts64(&now);
time64_to_tm(now.tv_sec, 0, &tm_now);
- dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
+ snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
- fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len,
+ snprintf(fw_build, sizeof(fw_build),
"Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
2000 + (data->dmp_hdr.fw_timestamp >> 8),
data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
data->dmp_hdr.fw_build_num);
- hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
- sizeof(*tlv) + dump_time_len +
- sizeof(*tlv) + fw_build;
+ data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
+ sizeof(*tlv) + strlen(ts) +
+ sizeof(*tlv) + strlen(fw_build) +
+ sizeof(*tlv) + strlen(vendor) +
+ sizeof(*tlv) + strlen(driver);
- dump_size = hdr_len + sizeof(hdr_len);
+ /*
+ * sizeof(u32) - signature
+ * sizeof(data_len) - to store tlv data size
+ * data_len - TLV data
+ */
+ dump_size = sizeof(u32) + sizeof(data_len) + data_len;
- skb = alloc_skb(dump_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
/* Add debug buffers data length to dump size */
dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
- ret = hci_devcd_init(hdev, dump_size);
- if (ret) {
- bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret);
- kfree_skb(skb);
- return ret;
- }
+ pdata = vmalloc(dump_size);
+ if (!pdata)
+ return -ENOMEM;
+ p = pdata;
+
+ *(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
+ p += sizeof(u32);
- skb_put_data(skb, &hdr_len, sizeof(hdr_len));
+ *(u32 *)p = data_len;
+ p += sizeof(u32);
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
- sizeof(data->dmp_hdr.cnvi_bt));
- btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
- sizeof(data->dmp_hdr.write_ptr));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
+ strlen(fw_build));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
+ sizeof(data->dmp_hdr.cnvi_bt));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
+ sizeof(data->dmp_hdr.write_ptr));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
+ sizeof(data->dmp_hdr.wrap_ctr));
data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
- btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
- sizeof(data->dmp_hdr.wrap_ctr));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
- sizeof(data->dmp_hdr.trigger_reason));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
- sizeof(data->dmp_hdr.fw_git_sha1));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
- sizeof(data->dmp_hdr.cnvr_top));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
- sizeof(data->dmp_hdr.cnvi_top));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len);
-
- btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build);
-
- ret = hci_devcd_append(hdev, skb);
- if (ret)
- goto exit_err;
-
- for (i = 0; i < dbgc->count; i++) {
- ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data,
- BTINTEL_PCIE_DBGC_BUFFER_SIZE);
- if (ret)
- break;
- }
-
-exit_err:
- hci_devcd_complete(hdev);
- return ret;
+ p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
+ sizeof(data->dmp_hdr.trigger_reason));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
+ sizeof(data->dmp_hdr.fw_git_sha1));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
+ sizeof(data->dmp_hdr.cnvr_top));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
+ sizeof(data->dmp_hdr.cnvi_top));
+
+ memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
+ dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
+ return 0;
}
static void btintel_pcie_dump_traces(struct hci_dev *hdev)
@@ -760,51 +743,6 @@ static void btintel_pcie_dump_traces(struct hci_dev *hdev)
bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
}
-static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct btintel_pcie_data *data = hci_get_drvdata(hdev);
- u16 len = skb->len;
- u16 *hdrlen_ptr;
- char buf[80];
-
- hdrlen_ptr = skb_put_zero(skb, sizeof(len));
-
- snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
- INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt));
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n",
- data->dmp_hdr.fw_build_num);
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name);
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Vendor: Intel\n");
- skb_put_data(skb, buf, strlen(buf));
-
- *hdrlen_ptr = skb->len - len;
-}
-
-static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state)
-{
- struct btintel_pcie_data *data = hci_get_drvdata(hdev);
-
- switch (state) {
- case HCI_DEVCOREDUMP_IDLE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
- break;
- case HCI_DEVCOREDUMP_ACTIVE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE;
- break;
- case HCI_DEVCOREDUMP_TIMEOUT:
- case HCI_DEVCOREDUMP_ABORT:
- case HCI_DEVCOREDUMP_DONE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
- break;
- }
-}
-
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
* BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
* BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
@@ -1378,6 +1316,11 @@ static void btintel_pcie_rx_work(struct work_struct *work)
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
+ if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
+ btintel_pcie_dump_traces(data->hdev);
+ clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
+ }
+
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
/* Unlike usb products, controller will not send hardware
* exception event on exception. Instead controller writes the
@@ -1390,11 +1333,6 @@ static void btintel_pcie_rx_work(struct work_struct *work)
clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
}
- if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
- btintel_pcie_dump_traces(data->hdev);
- clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
- }
-
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
btintel_pcie_recv_frame(data, skb);
@@ -2184,13 +2122,6 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
- err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr,
- btintel_pcie_dump_notify);
- if (err) {
- bt_dev_err(hdev, "Failed to register coredump (%d)", err);
- goto exit_error;
- }
-
btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
@@ -2319,7 +2250,6 @@ static void btintel_pcie_removal_work(struct work_struct *wk)
btintel_pcie_synchronize_irqs(data);
flush_work(&data->rx_work);
- flush_work(&data->hdev->dump.dump_rx);
bt_dev_dbg(data->hdev, "Release bluetooth interface");
btintel_pcie_release_hdev(data);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 0fa876c5b954..04b21f968ad3 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -132,6 +132,8 @@ enum btintel_pcie_tlv_type {
BTINTEL_CNVI_TOP,
BTINTEL_DUMP_TIME,
BTINTEL_FW_BUILD,
+ BTINTEL_VENDOR,
+ BTINTEL_DRIVER
};
/* causes for the MBOX interrupts */
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index c1c0a4759c7e..5027da143728 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -1104,6 +1104,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
* Get physical address of MC portal for the root DPRC:
*/
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!plat_res)
+ return -EINVAL;
+
mc_portal_phys_addr = plat_res->start;
mc_portal_size = resource_size(plat_res);
mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
diff --git a/drivers/cdx/Kconfig b/drivers/cdx/Kconfig
index 3af41f51cf38..1f1e360507d7 100644
--- a/drivers/cdx/Kconfig
+++ b/drivers/cdx/Kconfig
@@ -8,7 +8,6 @@
config CDX_BUS
bool "CDX Bus driver"
depends on OF && ARM64 || COMPILE_TEST
- select GENERIC_MSI_IRQ
help
Driver to enable Composable DMA Transfer(CDX) Bus. CDX bus
exposes Fabric devices which uses composable DMA IP to the
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 092306ca2541..3d50f8cd9c0b 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -310,7 +310,7 @@ static int cdx_probe(struct device *dev)
* Setup MSI device data so that generic MSI alloc/free can
* be used by the device driver.
*/
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
error = msi_setup_device_data(&cdx_dev->dev);
if (error)
return error;
@@ -833,7 +833,7 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (cdx_dev->bus_num & CDX_BUS_NUM_MASK)),
cdx_dev->dev_num);
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
cdx_dev->num_msi = dev_params->num_msi;
dev_set_msi_domain(&cdx_dev->dev, cdx->msi_domain);
}
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
index 0641a4c21e66..a480b62cbd1f 100644
--- a/drivers/cdx/controller/Kconfig
+++ b/drivers/cdx/controller/Kconfig
@@ -10,7 +10,6 @@ if CDX_BUS
config CDX_CONTROLLER
tristate "CDX bus controller"
depends on HAS_DMA
- select GENERIC_MSI_IRQ
select REMOTEPROC
select RPMSG
help
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index fca83141e3e6..5e3fd89b6b56 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -193,7 +193,8 @@ static int xlnx_cdx_probe(struct platform_device *pdev)
cdx->ops = &cdx_ops;
/* Create MSI domain */
- cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ))
+ cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
if (!cdx->msi_domain) {
ret = dev_err_probe(&pdev->dev, -ENODEV, "cdx_msi_domain_init() failed");
goto cdx_msi_fail;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c85827843447..7826fd7c4603 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -312,6 +312,7 @@ config HW_RANDOM_INGENIC_TRNG
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK || COMPILE_TEST
+ depends on ARM_AMBA
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index d8fd8a354482..9e408144a10c 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -231,6 +231,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
if (IS_ERR(ks_sa_rng->regmap_cfg))
return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n");
+ ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(ks_sa_rng->clk))
+ return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n");
+
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index dddd702b2454..3e4684f6b4af 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -29,7 +29,7 @@ if TCG_TPM
config TCG_TPM2_HMAC
bool "Use HMAC and encrypted transactions on the TPM bus"
- default X86_64
+ default n
select CRYPTO_ECDH
select CRYPTO_LIB_AESCFB
select CRYPTO_LIB_SHA256
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index e5394f98a02e..47bdb1e320af 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -159,7 +159,7 @@ static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
tmr_writel(wdt->tmr, TMRCSSR_SRC_USEC, TMRCSSR);
/* configure timer (system reset happens on the fifth expiration) */
- value = TMRCR_PTV(wdt->base.timeout * USEC_PER_SEC / 5) |
+ value = TMRCR_PTV(wdt->base.timeout * (USEC_PER_SEC / 5)) |
TMRCR_PERIODIC | TMRCR_ENABLE;
tmr_writel(wdt->tmr, value, TMRCR);
@@ -267,7 +267,7 @@ static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
* counter value to the time of the counter expirations that
* remain.
*/
- timeleft += (((u64)wdt->base.timeout * USEC_PER_SEC) / 5) * (4 - expiration);
+ timeleft += ((u64)wdt->base.timeout * (USEC_PER_SEC / 5)) * (4 - expiration);
/*
* Convert the current counter value to seconds,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index ef078426bfd5..38c165d526d1 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -15,6 +15,7 @@
#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
@@ -424,6 +425,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
return true;
}
+ /*
+ * Older Broadcom STB chips had a "clocks" property for CPU node(s)
+ * that did not match the SCMI performance protocol node, if we got
+ * there, it means we had such an older Device Tree, therefore return
+ * true to preserve backwards compatibility.
+ */
+ if (of_machine_is_compatible("brcm,brcmstb"))
+ return true;
+
return false;
}
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 5f386761b156..f60a4cf53642 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
return -ENODEV;
saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ of_node_put(cpu_node);
if (!saw_node)
return -ENODEV;
pdev = of_find_device_by_node(saw_node);
of_node_put(saw_node);
- of_node_put(cpu_node);
if (!pdev)
return -ENODEV;
data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ put_device(&pdev->dev);
return -ENOMEM;
+ }
data->spm = dev_get_drvdata(&pdev->dev);
+ put_device(&pdev->dev);
if (!data->spm)
return -EINVAL;
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 45e130b901eb..17eb236e9ee4 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -888,6 +888,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm,
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
ret = PTR_ERR(qm->debug.acc_diff_regs);
qm->debug.acc_diff_regs = NULL;
+ qm->debug.qm_diff_regs = NULL;
return ret;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index f5b47e5ff48a..7b60e89015bd 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -78,6 +78,11 @@
#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
#define HPRE_PREFETCH_DISABLE BIT(30)
#define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
+#define HPRE_SVA_PREFTCH_DFX4 0x301144
+#define HPRE_WAIT_SVA_READY 500000
+#define HPRE_READ_SVA_STATUS_TIMES 3
+#define HPRE_WAIT_US_MIN 10
+#define HPRE_WAIT_US_MAX 20
/* clock gate */
#define HPRE_CLKGATE_CTL 0x301a10
@@ -466,6 +471,33 @@ struct hisi_qp *hpre_create_qp(u8 type)
return NULL;
}
+static int hpre_wait_sva_ready(struct hisi_qm *qm)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + HPRE_SVA_PREFTCH_DFX4);
+ if (val)
+ count = 0;
+ else if (++count == HPRE_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(HPRE_WAIT_US_MIN, HPRE_WAIT_US_MAX);
+ } while (++try_times < HPRE_WAIT_SVA_READY);
+
+ if (try_times == HPRE_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static void hpre_config_pasid(struct hisi_qm *qm)
{
u32 val1, val2;
@@ -563,7 +595,7 @@ static void disable_flr_of_bme(struct hisi_qm *qm)
writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
}
-static void hpre_open_sva_prefetch(struct hisi_qm *qm)
+static void hpre_close_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -571,20 +603,21 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
- /* Enable prefetch */
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
- val &= HPRE_PREFETCH_ENABLE;
+ val |= HPRE_PREFETCH_DISABLE;
writel(val, qm->io_base + HPRE_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
- val, !(val & HPRE_PREFETCH_DISABLE),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
+ val, !(val & HPRE_SVA_DISABLE_READY),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)hpre_wait_sva_ready(qm);
}
-static void hpre_close_sva_prefetch(struct hisi_qm *qm)
+static void hpre_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -592,16 +625,24 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
+ /* Enable prefetch */
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
- val |= HPRE_PREFETCH_DISABLE;
+ val &= HPRE_PREFETCH_ENABLE;
writel(val, qm->io_base + HPRE_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
- val, !(val & HPRE_SVA_DISABLE_READY),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
+ val, !(val & HPRE_PREFETCH_DISABLE),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ hpre_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = hpre_wait_sva_ready(qm);
if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ hpre_close_sva_prefetch(qm);
}
static void hpre_enable_clock_gate(struct hisi_qm *qm)
@@ -721,6 +762,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
/* Config data buffer pasid needed by Kunpeng 920 */
hpre_config_pasid(qm);
+ hpre_open_sva_prefetch(qm);
hpre_enable_clock_gate(qm);
@@ -1450,8 +1492,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
if (ret)
return ret;
- hpre_open_sva_prefetch(qm);
-
hisi_qm_dev_err_init(qm);
ret = hpre_show_last_regs_init(qm);
if (ret)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 2e4ee7ecfdfb..102aff9ea19a 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -3826,6 +3826,10 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
}
pdev = container_of(dev, struct pci_dev, dev);
+ if (pci_physfn(pdev) != qm->pdev) {
+ pci_err(qm->pdev, "the pdev input does not match the pf!\n");
+ return -EINVAL;
+ }
*fun_index = pdev->devfn;
@@ -4447,9 +4451,6 @@ static void qm_restart_prepare(struct hisi_qm *qm)
{
u32 value;
- if (qm->err_ini->open_sva_prefetch)
- qm->err_ini->open_sva_prefetch(qm);
-
if (qm->ver >= QM_HW_V3)
return;
@@ -4731,6 +4732,15 @@ void hisi_qm_reset_done(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
+static irqreturn_t qm_rsvd_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+
+ dev_info(&qm->pdev->dev, "Reserved interrupt, ignore!\n");
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t qm_abnormal_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
@@ -5014,7 +5024,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- if (qm->fun_type == QM_HW_VF)
+ if (qm->fun_type == QM_HW_VF && qm->ver < QM_HW_V3)
return;
val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
@@ -5031,17 +5041,28 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return 0;
-
irq_vector = val & QM_IRQ_VECTOR_MASK;
+
+ /* For VF, this is a reserved interrupt in V3 version. */
+ if (qm->fun_type == QM_HW_VF) {
+ if (qm->ver < QM_HW_V3)
+ return 0;
+
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_rsvd_irq,
+ IRQF_NO_AUTOEN, qm->dev_name, qm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request reserved irq, ret = %d!\n", ret);
+ return ret;
+ }
+ return 0;
+ }
+
ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
if (ret)
- dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
+ dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d!\n", ret);
return ret;
}
@@ -5407,6 +5428,12 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
pci_set_master(pdev);
num_vec = qm_get_irq_num(qm);
+ if (!num_vec) {
+ dev_err(dev, "Device irq num is zero!\n");
+ ret = -EINVAL;
+ goto err_get_pci_res;
+ }
+ num_vec = roundup_pow_of_two(num_vec);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 72cf48d1f3ab..348f1f52956d 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -93,6 +93,16 @@
#define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11)))
#define SEC_PREFETCH_DISABLE BIT(1)
#define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11))
+#define SEC_SVA_PREFETCH_INFO 0x301ED4
+#define SEC_SVA_STALL_NUM GENMASK(23, 8)
+#define SEC_SVA_PREFETCH_NUM GENMASK(2, 0)
+#define SEC_WAIT_SVA_READY 500000
+#define SEC_READ_SVA_STATUS_TIMES 3
+#define SEC_WAIT_US_MIN 10
+#define SEC_WAIT_US_MAX 20
+#define SEC_WAIT_QP_US_MIN 1000
+#define SEC_WAIT_QP_US_MAX 2000
+#define SEC_MAX_WAIT_TIMES 2000
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
@@ -464,6 +474,81 @@ static void sec_set_endian(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
}
+static int sec_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + offset);
+ if (val & mask)
+ count = 0;
+ else if (++count == SEC_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(SEC_WAIT_US_MIN, SEC_WAIT_US_MAX);
+ } while (++try_times < SEC_WAIT_SVA_READY);
+
+ if (try_times == SEC_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void sec_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+ return;
+
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val |= SEC_PREFETCH_DISABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
+ val, !(val & SEC_SVA_DISABLE_READY),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)sec_wait_sva_ready(qm, SEC_SVA_PREFETCH_INFO, SEC_SVA_STALL_NUM);
+}
+
+static void sec_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val &= SEC_PREFETCH_ENABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
+ val, !(val & SEC_PREFETCH_DISABLE),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ sec_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = sec_wait_sva_ready(qm, SEC_SVA_TRANS, SEC_SVA_PREFETCH_NUM);
+ if (ret)
+ sec_close_sva_prefetch(qm);
+}
+
static void sec_engine_sva_config(struct hisi_qm *qm)
{
u32 reg;
@@ -497,45 +582,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base +
SEC_INTERFACE_USER_CTRL1_REG);
}
-}
-
-static void sec_open_sva_prefetch(struct hisi_qm *qm)
-{
- u32 val;
- int ret;
-
- if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
- return;
-
- /* Enable prefetch */
- val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
- val &= SEC_PREFETCH_ENABLE;
- writel(val, qm->io_base + SEC_PREFETCH_CFG);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
- val, !(val & SEC_PREFETCH_DISABLE),
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
-}
-
-static void sec_close_sva_prefetch(struct hisi_qm *qm)
-{
- u32 val;
- int ret;
-
- if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
- return;
-
- val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
- val |= SEC_PREFETCH_DISABLE;
- writel(val, qm->io_base + SEC_PREFETCH_CFG);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
- val, !(val & SEC_SVA_DISABLE_READY),
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ sec_open_sva_prefetch(qm);
}
static void sec_enable_clock_gate(struct hisi_qm *qm)
@@ -1152,7 +1199,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
if (ret)
return ret;
- sec_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
sec_debug_regs_clear(qm);
ret = sec_show_last_regs_init(qm);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index d8ba23b7cc7d..341c4564e21a 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -95,10 +95,16 @@
#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
#define HZIP_SVA_PREFETCH_DISABLE BIT(26)
#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
+#define HZIP_SVA_PREFETCH_NUM GENMASK(18, 16)
+#define HZIP_SVA_STALL_NUM GENMASK(15, 0)
#define HZIP_SHAPER_RATE_COMPRESS 750
#define HZIP_SHAPER_RATE_DECOMPRESS 140
-#define HZIP_DELAY_1_US 1
-#define HZIP_POLL_TIMEOUT_US 1000
+#define HZIP_DELAY_1_US 1
+#define HZIP_POLL_TIMEOUT_US 1000
+#define HZIP_WAIT_SVA_READY 500000
+#define HZIP_READ_SVA_STATUS_TIMES 3
+#define HZIP_WAIT_US_MIN 10
+#define HZIP_WAIT_US_MAX 20
/* clock gating */
#define HZIP_PEH_CFG_AUTO_GATE 0x3011A8
@@ -448,10 +454,9 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
return false;
}
-static int hisi_zip_set_high_perf(struct hisi_qm *qm)
+static void hisi_zip_set_high_perf(struct hisi_qm *qm)
{
u32 val;
- int ret;
val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
if (perf_mode == HZIP_HIGH_COMP_PERF)
@@ -461,16 +466,36 @@ static int hisi_zip_set_high_perf(struct hisi_qm *qm)
/* Set perf mode */
writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
- val, val == perf_mode, HZIP_DELAY_1_US,
- HZIP_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to set perf mode\n");
+}
- return ret;
+static int hisi_zip_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + offset);
+ if (val & mask)
+ count = 0;
+ else if (++count == HZIP_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(HZIP_WAIT_US_MIN, HZIP_WAIT_US_MAX);
+ } while (++try_times < HZIP_WAIT_SVA_READY);
+
+ if (try_times == HZIP_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
-static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
+static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -478,19 +503,20 @@ static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
- /* Enable prefetch */
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
- val &= HZIP_PREFETCH_ENABLE;
+ val |= HZIP_SVA_PREFETCH_DISABLE;
writel(val, qm->io_base + HZIP_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
- val, !(val & HZIP_SVA_PREFETCH_DISABLE),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
+ val, !(val & HZIP_SVA_DISABLE_READY),
HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_STALL_NUM);
}
-static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
+static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -498,15 +524,23 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
+ /* Enable prefetch */
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
- val |= HZIP_SVA_PREFETCH_DISABLE;
+ val &= HZIP_PREFETCH_ENABLE;
writel(val, qm->io_base + HZIP_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
- val, !(val & HZIP_SVA_DISABLE_READY),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
+ val, !(val & HZIP_SVA_PREFETCH_DISABLE),
HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ hisi_zip_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_PREFETCH_NUM);
if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ hisi_zip_close_sva_prefetch(qm);
}
static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
@@ -530,6 +564,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
void __iomem *base = qm->io_base;
u32 dcomp_bm, comp_bm;
u32 zip_core_en;
+ int ret;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -565,6 +600,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
}
+ hisi_zip_open_sva_prefetch(qm);
/* let's open all compression/decompression cores */
@@ -580,9 +616,18 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+ hisi_zip_set_high_perf(qm);
hisi_zip_enable_clock_gate(qm);
- return hisi_dae_set_user_domain(qm);
+ ret = hisi_dae_set_user_domain(qm);
+ if (ret)
+ goto close_sva_prefetch;
+
+ return 0;
+
+close_sva_prefetch:
+ hisi_zip_close_sva_prefetch(qm);
+ return ret;
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -1251,11 +1296,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
if (ret)
return ret;
- ret = hisi_zip_set_high_perf(qm);
- if (ret)
- return ret;
-
- hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index 8f9e21ced0fe..48281d882260 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -232,7 +232,7 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
struct device *dev = rctx->hcu_dev->dev;
unsigned int remainder = 0;
unsigned int total;
- size_t nents;
+ int nents;
size_t count;
int rc;
int i;
@@ -253,6 +253,9 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
/* Determine the number of scatter gather list entries to process. */
nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
+ if (nents < 0)
+ return nents;
+
/* If there are entries to process, map them. */
if (nents) {
rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index cc47e361089a..ebdf4efa09d4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1615,7 +1615,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
return -EINVAL;
}
err_msg = "Invalid engine group format";
- strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
+ strscpy(tmp_buf, ctx->val.vstr);
start = tmp_buf;
has_se = has_ie = has_ae = false;
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index fd0a98b2fb1b..0493041ea088 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -1043,8 +1043,10 @@ static struct scomp_alg nx842_powernv_alg = {
.base.cra_priority = 300,
.base.cra_module = THIS_MODULE,
- .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
- .free_ctx = nx842_crypto_free_ctx,
+ .streams = {
+ .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ },
.compress = nx842_crypto_compress,
.decompress = nx842_crypto_decompress,
};
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index f528e072494a..fc0222ebe807 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -1020,8 +1020,10 @@ static struct scomp_alg nx842_pseries_alg = {
.base.cra_priority = 300,
.base.cra_module = THIS_MODULE,
- .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
- .free_ctx = nx842_crypto_free_ctx,
+ .streams = {
+ .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ },
.compress = nx842_crypto_compress,
.decompress = nx842_crypto_decompress,
};
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 0470d7c175f4..54effb635196 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -116,6 +116,7 @@ struct rockchip_dfi {
int buswidth[DMC_MAX_CHANNELS];
int ddrmon_stride;
bool ddrmon_ctrl_single;
+ unsigned int count_multiplier; /* number of data clocks per count */
};
static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
@@ -435,7 +436,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
switch (event->attr.config) {
case PERF_EVENT_CYCLES:
- count = total.c[0].clock_cycles;
+ count = total.c[0].clock_cycles * dfi->count_multiplier;
break;
case PERF_EVENT_READ_BYTES:
for (i = 0; i < dfi->max_channels; i++)
@@ -655,6 +656,9 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
break;
}
+ if (!dfi->count_multiplier)
+ dfi->count_multiplier = 1;
+
ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
if (ret)
return ret;
@@ -751,6 +755,7 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi)
dfi->max_channels = 4;
dfi->ddrmon_stride = 0x4000;
+ dfi->count_multiplier = 2;
return 0;
};
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 22fe9e631f8a..5730076846e1 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -386,7 +386,8 @@ static int mtk_ccifreq_probe(struct platform_device *pdev)
out_free_resources:
if (regulator_is_enabled(drv->proc_reg))
regulator_disable(drv->proc_reg);
- if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
+ if (!IS_ERR_OR_NULL(drv->sram_reg) &&
+ regulator_is_enabled(drv->sram_reg))
regulator_disable(drv->sram_reg);
return ret;
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index bf4171ac191d..9d00f247f4e0 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -1057,6 +1057,15 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
return !!GET_BITFIELD(mcmtr, 2, 2);
}
+static bool i10nm_channel_disabled(struct skx_imc *imc, int chan)
+{
+ u32 mcmtr = I10NM_GET_MCMTR(imc, chan);
+
+ edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr);
+
+ return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18));
+}
+
static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
struct res_config *cfg)
{
@@ -1070,6 +1079,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
if (!imc->mbase)
continue;
+ if (i10nm_channel_disabled(imc, i)) {
+ edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i);
+ continue;
+ }
+
ndimms = 0;
if (res_cfg->type != GNR)
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index cb934db9b2b4..326c4a93e44b 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -871,6 +871,9 @@ static int scmi_vio_probe(struct virtio_device *vdev)
/* Ensure initialized scmi_vdev is visible */
smp_store_mb(scmi_vdev, vdev);
+ /* Set device ready */
+ virtio_device_ready(vdev);
+
ret = platform_driver_register(&scmi_virtio_driver);
if (ret) {
vdev->priv = NULL;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index d528c94c5859..29e0729299f5 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -267,9 +267,10 @@ config OVMF_DEBUG_LOG
bool "Expose OVMF firmware debug log via sysfs"
depends on EFI
help
- Recent OVMF versions (edk2-stable202508 + newer) can write
- their debug log to a memory buffer. This driver exposes the
- log content via sysfs (/sys/firmware/efi/ovmf_debug_log).
+ Recent versions of the Open Virtual Machine Firmware
+ (edk2-stable202508 + newer) can write their debug log to a memory
+ buffer. This driver exposes the log content via sysfs
+ (/sys/firmware/efi/ovmf_debug_log).
config UNACCEPTED_MEMORY
bool
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
index f2fdd3756648..179f5d46d8dd 100644
--- a/drivers/firmware/meson/Kconfig
+++ b/drivers/firmware/meson/Kconfig
@@ -5,7 +5,7 @@
config MESON_SM
tristate "Amlogic Secure Monitor driver"
depends on ARCH_MESON || COMPILE_TEST
- default y
+ default ARCH_MESON
depends on ARM64_4K_PAGES
help
Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/fwctl/mlx5/main.c b/drivers/fwctl/mlx5/main.c
index f93aa0cecdb9..4b379f695eb7 100644
--- a/drivers/fwctl/mlx5/main.c
+++ b/drivers/fwctl/mlx5/main.c
@@ -345,7 +345,7 @@ static void *mlx5ctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
*/
if (ret && ret != -EREMOTEIO) {
if (rpc_out != rpc_in)
- kfree(rpc_out);
+ kvfree(rpc_out);
return ERR_PTR(ret);
}
return rpc_out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 395c6be901ce..dbbb3407fa13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2964,15 +2964,15 @@ long amdgpu_drm_ioctl(struct file *filp,
}
static const struct dev_pm_ops amdgpu_pm_ops = {
- .prepare = amdgpu_pmops_prepare,
- .complete = amdgpu_pmops_complete,
- .suspend = amdgpu_pmops_suspend,
- .suspend_noirq = amdgpu_pmops_suspend_noirq,
- .resume = amdgpu_pmops_resume,
- .freeze = amdgpu_pmops_freeze,
- .thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_poweroff,
- .restore = amdgpu_pmops_restore,
+ .prepare = pm_sleep_ptr(amdgpu_pmops_prepare),
+ .complete = pm_sleep_ptr(amdgpu_pmops_complete),
+ .suspend = pm_sleep_ptr(amdgpu_pmops_suspend),
+ .suspend_noirq = pm_sleep_ptr(amdgpu_pmops_suspend_noirq),
+ .resume = pm_sleep_ptr(amdgpu_pmops_resume),
+ .freeze = pm_sleep_ptr(amdgpu_pmops_freeze),
+ .thaw = pm_sleep_ptr(amdgpu_pmops_thaw),
+ .poweroff = pm_sleep_ptr(amdgpu_pmops_poweroff),
+ .restore = pm_sleep_ptr(amdgpu_pmops_restore),
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -3117,7 +3117,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
- .driver.pm = &amdgpu_pm_ops,
+ .driver.pm = pm_ptr(&amdgpu_pm_ops),
.err_handler = &amdgpu_pci_err_handler,
.dev_groups = amdgpu_sysfs_groups,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 486c3646710c..8f6ce948c684 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -364,7 +364,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
+ PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7670f5d82b9e..12783ea3ba0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -211,7 +211,18 @@ struct amdgpu_ring_funcs {
bool support_64bit_ptrs;
bool no_user_fence;
bool secure_submission_supported;
- unsigned extra_dw;
+
+ /**
+ * @extra_bytes:
+ *
+ * Optional extra space in bytes that is added to the ring size
+ * when allocating the BO that holds the contents of the ring.
+ * This space isn't used for command submission to the ring,
+ * but is just there to satisfy some hardware requirements or
+ * implement workarounds. It's up to the implementation of each
+ * specific ring to initialize this space.
+ */
+ unsigned extra_bytes;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f1f67521c29c..affb68eabc4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -92,6 +92,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
{
@@ -285,6 +286,10 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
amdgpu_ucode_release(&adev->vcn.inst[0].fw);
adev->vcn.inst[i].fw = NULL;
}
+
+ if (adev->vcn.reg_list)
+ amdgpu_vcn_reg_dump_fini(adev);
+
mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
@@ -405,6 +410,54 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
return 0;
}
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
+{
+ int r;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+
+ if (adev->vcn.workload_profile_active) {
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+ return;
+ }
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ true);
+ if (r)
+ dev_warn(adev->dev,
+ "(%d) failed to enable video power profile mode\n", r);
+ else
+ adev->vcn.workload_profile_active = true;
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
+{
+ bool pg = true;
+ int r, i;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
+ pg = false;
+ break;
+ }
+ }
+
+ if (pg) {
+ r = amdgpu_dpm_switch_power_profile(
+ adev, PP_SMC_POWER_PROFILE_VIDEO, false);
+ if (r)
+ dev_warn(
+ adev->dev,
+ "(%d) failed to disable video power profile mode\n",
+ r);
+ else
+ adev->vcn.workload_profile_active = false;
+ }
+
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_vcn_inst *vcn_inst =
@@ -412,7 +465,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i = vcn_inst->inst, j;
- int r = 0;
if (adev->vcn.harvest_config & (1 << i))
return;
@@ -438,16 +490,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += fence[i];
if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_put_profile(adev);
+
} else {
schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
}
@@ -457,30 +504,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&vcn_inst->total_submission_cnt);
cancel_delayed_work_sync(&vcn_inst->idle_work);
- /* We can safely return early here because we've cancelled the
- * the delayed work so there is no one else to set it to false
- * and we don't care if someone else sets it to true.
- */
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
@@ -508,6 +536,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
}
mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -1527,3 +1556,86 @@ int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
return amdgpu_vcn_reset_engine(adev, ring->me);
}
+
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->vcn.ip_dump)
+ return -ENOMEM;
+ adev->vcn.reg_list = reg;
+ adev->vcn.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->vcn.ip_dump);
+ adev->vcn.ip_dump = NULL;
+ adev->vcn.reg_list = NULL;
+ adev->vcn.reg_count = 0;
+}
+
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * adev->vcn.reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
+ adev->vcn.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered)
+ for (j = 1; j < adev->vcn.reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
+ }
+}
+
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->vcn.reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < adev->vcn.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 0bc0a94d7cf0..6d9acd36041d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -237,6 +237,8 @@
#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+struct amdgpu_hwip_reg_entry;
+
enum amdgpu_vcn_caps {
AMDGPU_VCN_RRMT_ENABLED,
};
@@ -362,6 +364,8 @@ struct amdgpu_vcn {
bool workload_profile_active;
struct mutex workload_profile_mutex;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -557,4 +561,11 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
unsigned int vmid,
struct amdgpu_fence *guilty_fence);
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 9e428e669ada..b5bb7f4d607c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -557,7 +557,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.no_user_fence = true,
- .extra_dw = 64,
+ .extra_bytes = 256,
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
.set_wptr = jpeg_v1_0_decode_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index b86288a69e7b..a78144773fab 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -444,7 +444,7 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 5dbaebb592b3..2e79a3afc774 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -623,7 +623,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
*
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
- * Initialize the hardware, boot up the VCPU and do some testing
+ * Initialize the hardware, boot up the VCPU and do some testing.
+ *
+ * On SI, the UVD is meant to be used in a specific power state,
+ * or alternatively the driver can manually enable its clock.
+ * In amdgpu we use the dedicated UVD power state when DPM is enabled.
+ * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state
+ * for the SMU and afterwards enables the UVD clock.
+ * This is automatically done by amdgpu_uvd_ring_begin_use when work
+ * is submitted to the UVD ring. Here, we have to call it manually
+ * in order to power up UVD before firmware validation.
+ *
+ * Note that we must not disable the UVD clock here, as that would
+ * cause the ring test to fail. However, UVD is powered off
+ * automatically after the ring test: amdgpu_uvd_ring_end_use calls
+ * the UVD idle work handler which will disable the UVD clock when
+ * all fences are signalled.
*/
static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
{
@@ -633,6 +648,15 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
int r;
uvd_v3_1_mc_resume(adev);
+ uvd_v3_1_enable_mgcg(adev, true);
+
+ /* Make sure UVD is powered during FW validation.
+ * It's going to be automatically powered off after the ring test.
+ */
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+ amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
r = uvd_v3_1_fw_validate(adev);
if (r) {
@@ -640,9 +664,6 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- uvd_v3_1_enable_mgcg(adev, true);
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-
uvd_v3_1_start(adev);
r = amdgpu_ring_test_helper(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index bc30a5326866..f13ed3c1e29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -116,7 +116,6 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i, j;
- int r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn_inst *v = &adev->vcn.inst[i];
@@ -149,15 +148,7 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) {
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ amdgpu_vcn_put_profile(adev);
} else {
schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
}
@@ -167,7 +158,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&adev->vcn.inst[0].total_submission_cnt);
@@ -177,20 +167,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
* the delayed work so there is no one else to set it to false
* and we don't care if someone else sets it to true.
*/
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&adev->vcn.inst[0].vcn_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
@@ -218,6 +194,7 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
v->pause_dpg_mode(v, &new_state);
}
mutex_unlock(&adev->vcn.inst[0].vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 2811226b0ea5..866222fc10a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -361,7 +361,6 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- kfree(adev->vcn.ip_dump);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 706f3b2f484f..ac55549e20be 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1984,7 +1984,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
+ .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata),
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 2a3663b551af..ba944a96c070 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -287,8 +287,6 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -391,7 +389,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index caf2d95a85d4..11fec716e846 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -284,8 +284,6 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index a0f22ea6d15a..3d8b20828c06 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -4239,7 +4239,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
break;
default:
- r = EINVAL;
+ r = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4d6bc9fd4faa..9ac2d41f8fca 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -316,6 +316,9 @@ bool dc_stream_set_cursor_attributes(
{
bool result = false;
+ if (!stream)
+ return false;
+
if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
stream->cursor_attributes = *attributes;
result = true;
@@ -331,7 +334,10 @@ bool dc_stream_program_cursor_attributes(
struct dc *dc;
bool reset_idle_optimizations = false;
- dc = stream ? stream->ctx->dc : NULL;
+ if (!stream)
+ return false;
+
+ dc = stream->ctx->dc;
if (dc_stream_set_cursor_attributes(stream, attributes)) {
dc_z10_restore(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
index 9ba6cb67655f..6c75aa82327a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
@@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
if (dual_plane) {
unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx);
- ;
if (src->sw_mode == dm_sw_linear)
ASSERT(p1_pte_row_height_linear >= 8);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 4ea13d0bf815..c69194e04ff9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1600,19 +1600,17 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
}
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output = {0};
+ build_audio_output(context, pipe_ctx, &pipe_ctx->stream_res.audio_output);
- build_audio_output(context, pipe_ctx, &audio_output);
-
- link_hwss->setup_audio_output(pipe_ctx, &audio_output,
+ link_hwss->setup_audio_output(pipe_ctx, &pipe_ctx->stream_res.audio_output,
pipe_ctx->stream_res.audio->inst);
pipe_ctx->stream_res.audio->funcs->az_configure(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
- &audio_output.crtc_info,
+ &pipe_ctx->stream_res.audio_output.crtc_info,
&pipe_ctx->stream->audio_info,
- &audio_output.dp_link_info);
+ &pipe_ctx->stream_res.audio_output.dp_link_info);
if (dc->config.disable_hbr_audio_dp2)
if (pipe_ctx->stream_res.audio->funcs->az_disable_hbr_audio &&
@@ -2386,9 +2384,7 @@ static void dce110_setup_audio_dto(
if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
continue;
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output;
-
- build_audio_output(context, pipe_ctx, &audio_output);
+ build_audio_output(context, pipe_ctx, &pipe_ctx->stream_res.audio_output);
if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) {
struct dtbclk_dto_params dto_params = {0};
@@ -2399,14 +2395,14 @@ static void dce110_setup_audio_dto(
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
- &audio_output.crtc_info,
- &audio_output.pll_info);
+ &pipe_ctx->stream_res.audio_output.crtc_info,
+ &pipe_ctx->stream_res.audio_output.pll_info);
} else
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
- &audio_output.crtc_info,
- &audio_output.pll_info);
+ &pipe_ctx->stream_res.audio_output.crtc_info,
+ &pipe_ctx->stream_res.audio_output.pll_info);
break;
}
}
@@ -2426,15 +2422,15 @@ static void dce110_setup_audio_dto(
continue;
if (pipe_ctx->stream_res.audio != NULL) {
- struct audio_output audio_output = {0};
-
- build_audio_output(context, pipe_ctx, &audio_output);
+ build_audio_output(context,
+ pipe_ctx,
+ &pipe_ctx->stream_res.audio_output);
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
pipe_ctx->stream_res.audio,
pipe_ctx->stream->signal,
- &audio_output.crtc_info,
- &audio_output.pll_info);
+ &pipe_ctx->stream_res.audio_output.crtc_info,
+ &pipe_ctx->stream_res.audio_output.pll_info);
break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f0d7185153b2..f896cce87b8d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -228,8 +228,7 @@ struct resource_funcs {
enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link,
struct dc_link_settings *link_setting,
uint8_t pipe_count,
- struct pipe_ctx *pipes,
- struct audio_output *audio_output);
+ struct pipe_ctx *pipes);
};
struct audio_support{
@@ -361,6 +360,8 @@ struct stream_resource {
uint8_t gsl_group;
struct test_pattern_params test_pattern_params;
+
+ struct audio_output audio_output;
};
struct plane_resource {
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 2956c2b3ad1a..b12d61701d4d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -75,7 +75,6 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
bool is_hpo_acquired;
uint8_t count;
int i;
- struct audio_output audio_output[MAX_PIPES];
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -99,7 +98,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) {
link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link,
link_setting, count,
- *pipes, &audio_output[0]);
+ *pipes);
for (i = 0; i < count; i++) {
pipes[i]->clock_source->funcs->program_pix_clk(
pipes[i]->clock_source,
@@ -111,15 +110,16 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
const struct link_hwss *link_hwss = get_link_hwss(
link, &pipes[i]->link_res);
- link_hwss->setup_audio_output(pipes[i], &audio_output[i],
- pipes[i]->stream_res.audio->inst);
+ link_hwss->setup_audio_output(pipes[i],
+ &pipes[i]->stream_res.audio_output,
+ pipes[i]->stream_res.audio->inst);
pipes[i]->stream_res.audio->funcs->az_configure(
pipes[i]->stream_res.audio,
pipes[i]->stream->signal,
- &audio_output[i].crtc_info,
+ &pipes[i]->stream_res.audio_output.crtc_info,
&pipes[i]->stream->audio_info,
- &audio_output[i].dp_link_info);
+ &pipes[i]->stream_res.audio_output.dp_link_info);
if (link->dc->config.disable_hbr_audio_dp2 &&
pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio &&
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 3ed7f50554e2..ca17e5d8fdc2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -2239,8 +2239,7 @@ struct resource_pool *dcn31_create_resource_pool(
enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
struct dc_link_settings *link_setting,
uint8_t pipe_count,
- struct pipe_ctx *pipes,
- struct audio_output *audio_output)
+ struct pipe_ctx *pipes)
{
struct dc_state *state = link->dc->current_state;
int i;
@@ -2255,7 +2254,7 @@ enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
// Setup audio
if (pipes[i].stream_res.audio != NULL)
- build_audio_output(state, &pipes[i], &audio_output[i]);
+ build_audio_output(state, &pipes[i], &pipes[i].stream_res.audio_output);
}
#else
/* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index c32c85ef0ba4..7e8fde65528f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -69,8 +69,7 @@ unsigned int dcn31_get_det_buffer_size(
enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link,
struct dc_link_settings *link_setting,
uint8_t pipe_count,
- struct pipe_ctx *pipes,
- struct audio_output *audio_output);
+ struct pipe_ctx *pipes);
/*temp: B0 specific before switch to dcn313 headers*/
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
index 42efe838fa85..2d2d2d5e6763 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
@@ -66,6 +66,13 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
(amdgpu_crtc->v_border * 2));
vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
+
+ /* we have issues with mclk switching with
+ * refresh rates over 120 hz on the non-DC code.
+ */
+ if (drm_mode_vrefresh(&amdgpu_crtc->hw_mode) > 120)
+ vblank_time_us = 0;
+
break;
}
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 52e732be59e3..4236700fc1ad 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -3085,7 +3085,13 @@ static bool si_dpm_vblank_too_short(void *handle)
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
- if (vblank_time < switch_limit)
+ /* Consider zero vblank time too short and disable MCLK switching.
+ * Note that the vblank time is set to maximum when no displays are attached,
+ * so we'll still enable MCLK switching in that case.
+ */
+ if (vblank_time == 0)
+ return true;
+ else if (vblank_time < switch_limit)
return true;
else
return false;
@@ -3443,12 +3449,14 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
{
struct si_ps *ps = si_get_ps(rps);
struct amdgpu_clock_and_voltage_limits *max_limits;
+ struct amdgpu_connector *conn;
bool disable_mclk_switching = false;
bool disable_sclk_switching = false;
u32 mclk, sclk;
u16 vddc, vddci, min_vce_voltage = 0;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
u32 max_sclk = 0, max_mclk = 0;
+ u32 high_pixelclock_count = 0;
int i;
if (adev->asic_type == CHIP_HAINAN) {
@@ -3476,6 +3484,35 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
}
}
+ /* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz.
+ * For example, 4K 60Hz and 1080p 144Hz fall into this category.
+ * Find number of such displays connected.
+ */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (!(adev->pm.dpm.new_active_crtcs & (1 << i)) ||
+ !adev->mode_info.crtcs[i]->enabled)
+ continue;
+
+ conn = to_amdgpu_connector(adev->mode_info.crtcs[i]->connector);
+
+ if (conn->pixelclock_for_modeset > 297000)
+ high_pixelclock_count++;
+ }
+
+ /* These are some ad-hoc fixes to some issues observed with SI GPUs.
+ * They are necessary because we don't have something like dce_calcs
+ * for these GPUs to calculate bandwidth requirements.
+ */
+ if (high_pixelclock_count) {
+ /* On Oland, we observe some flickering when two 4K 60Hz
+ * displays are connected, possibly because voltage is too low.
+ * Raise the voltage by requiring a higher SCLK.
+ * (Voltage cannot be adjusted independently without also SCLK.)
+ */
+ if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND)
+ disable_sclk_switching = true;
+ }
+
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
@@ -5637,14 +5674,10 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
static int si_disable_ulv(struct amdgpu_device *adev)
{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
-
- if (ulv->supported)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
+ PPSMC_Result r;
- return 0;
+ r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV);
+ return (r == PPSMC_Result_OK) ? 0 : -EINVAL;
}
static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
@@ -5817,9 +5850,9 @@ static int si_upload_smc_data(struct amdgpu_device *adev)
{
struct amdgpu_crtc *amdgpu_crtc = NULL;
int i;
-
- if (adev->pm.dpm.new_active_crtc_count == 0)
- return 0;
+ u32 crtc_index = 0;
+ u32 mclk_change_block_cp_min = 0;
+ u32 mclk_change_block_cp_max = 0;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
@@ -5828,26 +5861,31 @@ static int si_upload_smc_data(struct amdgpu_device *adev)
}
}
- if (amdgpu_crtc == NULL)
- return 0;
+ /* When a display is plugged in, program these so that the SMC
+ * performs MCLK switching when it doesn't cause flickering.
+ * When no display is plugged in, there is no need to restrict
+ * MCLK switching, so program them to zero.
+ */
+ if (adev->pm.dpm.new_active_crtc_count && amdgpu_crtc) {
+ crtc_index = amdgpu_crtc->crtc_id;
- if (amdgpu_crtc->line_time <= 0)
- return 0;
+ if (amdgpu_crtc->line_time) {
+ mclk_change_block_cp_min = 200 / amdgpu_crtc->line_time;
+ mclk_change_block_cp_max = 100 / amdgpu_crtc->line_time;
+ }
+ }
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_crtc_index,
- amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_crtc_index,
+ crtc_index);
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
- amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+ mclk_change_block_cp_min);
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
- amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+ mclk_change_block_cp_max);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index b9e0ca85226a..a6d6e62071a0 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -122,6 +122,7 @@ config DRM_ITE_IT6505
select EXTCON
select CRYPTO
select CRYPTO_HASH
+ select REGMAP_I2C
help
ITE IT6505 DisplayPort bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index a57ca8c3bdae..695b6246b280 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -997,10 +997,10 @@ static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
- const struct drm_display_mode *mode = &crtc_state->mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
- return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false);
+ return cdns_dsi_check_conf(dsi, adjusted_mode, dsi_cfg, true);
}
static struct drm_bridge_state *
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 5eb7e9bfe361..8c915427d053 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -816,6 +816,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_cec &&
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ bridge = bridge_connector->bridge_hdmi_cec;
+
ret = drmm_connector_hdmi_cec_notifier_register(connector,
NULL,
bridge->hdmi_cec_dev);
@@ -825,6 +827,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_cec &&
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ bridge = bridge_connector->bridge_hdmi_cec;
+
ret = drmm_connector_hdmi_cec_register(connector,
&drm_bridge_connector_hdmi_cec_funcs,
bridge->hdmi_cec_adapter_name,
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 1ecc3df7e316..4aaeae4fa03c 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -3962,6 +3962,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
int ret;
unsigned int offset = DP_EDP_BACKLIGHT_BRIGHTNESS_MSB;
u8 buf[3] = { 0 };
+ size_t len = 2;
/* The panel uses the PWM for controlling brightness levels */
if (!(bl->aux_set || bl->luminance_set))
@@ -3974,6 +3975,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[1] = (level & 0x00ff00) >> 8;
buf[2] = (level & 0xff0000) >> 16;
offset = DP_EDP_PANEL_TARGET_LUMINANCE_VALUE;
+ len = 3;
} else if (bl->lsb_reg_used) {
buf[0] = (level & 0xff00) >> 8;
buf[1] = (level & 0x00ff);
@@ -3981,7 +3983,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[0] = level;
}
- ret = drm_dp_dpcd_write_data(aux, offset, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write_data(aux, offset, buf, len);
if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index ecc73d52bfae..85dbdaa4a2e2 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1078,19 +1078,20 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
if (async_flip) {
- /* check if the prop does a nop change */
- if ((prop != config->prop_fb_id &&
- prop != config->prop_in_fence_fd &&
- prop != config->prop_fb_damage_clips)) {
- ret = drm_atomic_plane_get_property(plane, plane_state,
- prop, &old_val);
- ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- }
+ /* no-op changes are always allowed */
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- /* ask the driver if this non-primary plane is supported */
- if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
- ret = -EINVAL;
+ /* fail everything that isn't no-op or a pure flip */
+ if (ret && prop != config->prop_fb_id &&
+ prop != config->prop_in_fence_fd &&
+ prop != config->prop_fb_damage_clips) {
+ break;
+ }
+ if (ret && plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ /* ask the driver if this non-primary plane is supported */
if (plane_funcs && plane_funcs->atomic_async_check)
ret = plane_funcs->atomic_async_check(plane, state, true);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index c8bb28dccdc1..d1e6598ea3bc 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -134,6 +134,9 @@ void drm_panel_prepare(struct drm_panel *panel)
panel->prepared = true;
list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_prepared)
+ continue;
+
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
@@ -179,6 +182,9 @@ void drm_panel_unprepare(struct drm_panel *panel)
mutex_lock(&panel->follower_lock);
list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_unpreparing)
+ continue;
+
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
@@ -209,6 +215,7 @@ EXPORT_SYMBOL(drm_panel_unprepare);
*/
void drm_panel_enable(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
int ret;
if (!panel)
@@ -219,10 +226,12 @@ void drm_panel_enable(struct drm_panel *panel)
return;
}
+ mutex_lock(&panel->follower_lock);
+
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
- return;
+ goto exit;
}
panel->enabled = true;
@@ -230,6 +239,19 @@ void drm_panel_enable(struct drm_panel *panel)
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
ret);
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_enabled)
+ continue;
+
+ ret = follower->funcs->panel_enabled(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_enabled, ret);
+ }
+
+exit:
+ mutex_unlock(&panel->follower_lock);
}
EXPORT_SYMBOL(drm_panel_enable);
@@ -243,6 +265,7 @@ EXPORT_SYMBOL(drm_panel_enable);
*/
void drm_panel_disable(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
int ret;
if (!panel)
@@ -262,6 +285,18 @@ void drm_panel_disable(struct drm_panel *panel)
return;
}
+ mutex_lock(&panel->follower_lock);
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_disabling)
+ continue;
+
+ ret = follower->funcs->panel_disabling(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_disabling, ret);
+ }
+
ret = backlight_disable(panel->backlight);
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n",
@@ -270,9 +305,12 @@ void drm_panel_disable(struct drm_panel *panel)
if (panel->funcs && panel->funcs->disable) {
ret = panel->funcs->disable(panel);
if (ret < 0)
- return;
+ goto exit;
}
panel->enabled = false;
+
+exit:
+ mutex_unlock(&panel->follower_lock);
}
EXPORT_SYMBOL(drm_panel_disable);
@@ -539,13 +577,13 @@ EXPORT_SYMBOL(drm_is_panel_follower);
* @follower_dev: The 'struct device' for the follower.
* @follower: The panel follower descriptor for the follower.
*
- * A panel follower is called right after preparing the panel and right before
- * unpreparing the panel. It's primary intention is to power on an associated
- * touchscreen, though it could be used for any similar devices. Multiple
- * devices are allowed the follow the same panel.
+ * A panel follower is called right after preparing/enabling the panel and right
+ * before unpreparing/disabling the panel. It's primary intention is to power on
+ * an associated touchscreen, though it could be used for any similar devices.
+ * Multiple devices are allowed the follow the same panel.
*
- * If a follower is added to a panel that's already been turned on, the
- * follower's prepare callback is called right away.
+ * If a follower is added to a panel that's already been prepared/enabled, the
+ * follower's prepared/enabled callback is called right away.
*
* The "panel" property of the follower points to the panel to be followed.
*
@@ -569,12 +607,18 @@ int drm_panel_add_follower(struct device *follower_dev,
mutex_lock(&panel->follower_lock);
list_add_tail(&follower->list, &panel->followers);
- if (panel->prepared) {
+ if (panel->prepared && follower->funcs->panel_prepared) {
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
follower->funcs->panel_prepared, ret);
}
+ if (panel->enabled && follower->funcs->panel_enabled) {
+ ret = follower->funcs->panel_enabled(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_enabled, ret);
+ }
mutex_unlock(&panel->follower_lock);
@@ -587,7 +631,8 @@ EXPORT_SYMBOL(drm_panel_add_follower);
* @follower: The panel follower descriptor for the follower.
*
* Undo drm_panel_add_follower(). This includes calling the follower's
- * unprepare function if we're removed from a panel that's currently prepared.
+ * unpreparing/disabling function if we're removed from a panel that's currently
+ * prepared/enabled.
*
* Return: 0 or an error code.
*/
@@ -598,7 +643,13 @@ void drm_panel_remove_follower(struct drm_panel_follower *follower)
mutex_lock(&panel->follower_lock);
- if (panel->prepared) {
+ if (panel->enabled && follower->funcs->panel_disabling) {
+ ret = follower->funcs->panel_disabling(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_disabling, ret);
+ }
+ if (panel->prepared && follower->funcs->panel_unpreparing) {
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 56a5b596554d..46f348972a97 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -446,7 +446,7 @@ static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
static int dpu_encoder_phys_wb_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
- unsigned long ret;
+ int ret;
struct dpu_encoder_wait_info wait_info;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 6859e8ef6b05..f54cf0faa1c7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -922,6 +922,9 @@ static int dpu_plane_is_multirect_capable(struct dpu_hw_sspp *sspp,
if (MSM_FORMAT_IS_YUV(fmt))
return false;
+ if (!sspp)
+ return true;
+
if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
return false;
@@ -1028,6 +1031,7 @@ static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate,
prev_pipe->multirect_mode != DPU_SSPP_MULTIRECT_NONE)
return false;
+ /* Do not validate SSPP of current plane when it is not ready */
if (!dpu_plane_is_multirect_capable(pipe->sspp, pipe_cfg, fmt) ||
!dpu_plane_is_multirect_capable(prev_pipe->sspp, prev_pipe_cfg, prev_fmt))
return false;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 0952c7f18abd..4d1ea9b26191 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -463,9 +463,9 @@ static int mdp4_kms_init(struct drm_device *dev)
ret = PTR_ERR(mmu);
goto fail;
} else if (!mmu) {
- DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- vm = NULL;
+ DRM_DEV_INFO(dev->dev, "no IOMMU, bailing out\n");
+ ret = -ENODEV;
+ goto fail;
} else {
vm = msm_gem_vm_create(dev, mmu, "mdp4",
0x1000, 0x100000000 - 0x1000,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9dcc7a596a11..7e977fec4100 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -826,6 +826,7 @@ static const struct file_operations fops = {
#define DRIVER_FEATURES_KMS ( \
DRIVER_GEM | \
+ DRIVER_GEM_GPUVA | \
DRIVER_ATOMIC | \
DRIVER_MODESET | \
0 )
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 00d0f3b7ba32..381a0853c05b 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -1023,6 +1023,7 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
struct drm_device *dev = job->vm->drm;
int ret = 0;
int cnt = 0;
+ int i = -1;
if (args->nr_ops == 1) {
/* Single op case, the op is inlined: */
@@ -1056,11 +1057,12 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
spin_lock(&file->table_lock);
- for (unsigned i = 0; i < args->nr_ops; i++) {
+ for (i = 0; i < args->nr_ops; i++) {
+ struct msm_vm_bind_op *op = &job->ops[i];
struct drm_gem_object *obj;
- if (!job->ops[i].handle) {
- job->ops[i].obj = NULL;
+ if (!op->handle) {
+ op->obj = NULL;
continue;
}
@@ -1068,16 +1070,22 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
- obj = idr_find(&file->object_idr, job->ops[i].handle);
+ obj = idr_find(&file->object_idr, op->handle);
if (!obj) {
- ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", job->ops[i].handle, i);
+ ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", op->handle, i);
goto out_unlock;
}
drm_gem_object_get(obj);
- job->ops[i].obj = obj;
+ op->obj = obj;
cnt++;
+
+ if ((op->range + op->obj_offset) > obj->size) {
+ ret = UERR(EINVAL, dev, "invalid range: %016llx + %016llx > %016zx\n",
+ op->range, op->obj_offset, obj->size);
+ goto out_unlock;
+ }
}
*nr_bos = cnt;
@@ -1085,6 +1093,17 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
out_unlock:
spin_unlock(&file->table_lock);
+ if (ret) {
+ for (; i >= 0; i--) {
+ struct msm_vm_bind_op *op = &job->ops[i];
+
+ if (!op->obj)
+ continue;
+
+ drm_gem_object_put(op->obj);
+ op->obj = NULL;
+ }
+ }
out:
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 56828d218e88..4c4dcb095c4d 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -195,14 +195,13 @@ struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
iommu_dev = mdp_dev;
else
iommu_dev = mdss_dev;
-
mmu = msm_iommu_disp_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
if (!mmu) {
- drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
- return NULL;
+ drm_info(dev, "no IOMMU, bailing out\n");
+ return ERR_PTR(-ENODEV);
}
vm = msm_gem_vm_create(dev, mmu, "mdp_kms",
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 9a56e208cbdd..d0aa602ecc9d 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1736,10 +1736,11 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
-static const struct panel_delay delay_200_500_e50_p2e200 = {
+static const struct panel_delay delay_200_500_e50_d50_p2e200 = {
.hpd_absent = 200,
.unprepare = 500,
.enable = 50,
+ .disable = 50,
.prepare_to_enable = 200,
};
@@ -1828,6 +1829,13 @@ static const struct panel_delay delay_50_500_e200_d200_po2e335 = {
.powered_on_to_enable = 335,
};
+static const struct panel_delay delay_200_500_e50_d100 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .disable = 100,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.ident = { \
@@ -1934,13 +1942,13 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a1b, &delay_200_500_e50, "NV133WUM-N63"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"),
- EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80, "NV116WHM-N49"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80_d50, "NV116WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b1e, &delay_200_500_e80, "NE140QDM-N6A"),
- EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80_d50, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b66, &delay_200_500_e80, "NE140WUM-N6G"),
@@ -1979,12 +1987,12 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_d50_p2e200, "MNC207QS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_d50_p2e200, "MNE007JA1-2"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"),
- EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"),
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 98f0782c8411..17898a29efe8 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -161,7 +161,7 @@ static int nt35560_set_brightness(struct backlight_device *bl)
par = 0x00;
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
&par, 1);
- if (ret) {
+ if (ret < 0) {
dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ac77d1246b94..811265648a58 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1408,7 +1408,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
- unsigned offset, i, level;
+ unsigned offset, i;
unsigned width, height, depth, size;
unsigned blocksize;
unsigned nbx, nby;
@@ -1420,7 +1420,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
w0 = r600_mip_minify(w0, 0);
h0 = r600_mip_minify(h0, 0);
d0 = r600_mip_minify(d0, 0);
- for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+ for (i = 0, offset = 0; i < nlevels; i++) {
width = r600_mip_minify(w0, i);
nbx = r600_fmt_get_nblocksx(format, width);
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
index 65acffc3fea8..8e9ae7d980eb 100644
--- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -219,7 +219,7 @@ mock_sched_timedout_job(struct drm_sched_job *sched_job)
unsigned long flags;
if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) {
- job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET;
+ job->flags |= DRM_MOCK_SCHED_JOB_RESET_SKIPPED;
return DRM_GPU_SCHED_STAT_NO_HANG;
}
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
index 63d4f2ac7074..5b262126b776 100644
--- a/drivers/gpu/drm/scheduler/tests/sched_tests.h
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -95,9 +95,10 @@ struct drm_mock_sched_job {
struct completion done;
-#define DRM_MOCK_SCHED_JOB_DONE 0x1
-#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
-#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
+#define DRM_MOCK_SCHED_JOB_DONE 0x1
+#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
+#define DRM_MOCK_SCHED_JOB_RESET_SKIPPED 0x8
unsigned long flags;
struct list_head link;
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
index 55eb142bd7c5..82a41a456b0a 100644
--- a/drivers/gpu/drm/scheduler/tests/tests_basic.c
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -317,8 +317,8 @@ static void drm_sched_skip_reset(struct kunit *test)
KUNIT_ASSERT_FALSE(test, done);
KUNIT_ASSERT_EQ(test,
- job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET,
- 0);
+ job->flags & DRM_MOCK_SCHED_JOB_RESET_SKIPPED,
+ DRM_MOCK_SCHED_JOB_RESET_SKIPPED);
i = drm_mock_sched_advance(sched, 1);
KUNIT_ASSERT_EQ(test, i, 1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c2294abbe753..00be92da5509 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@ static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
if (likely(eaction->tv_sec != NULL)) {
struct timespec64 ts;
- ktime_to_timespec64(f->timestamp);
+ ts = ktime_to_timespec64(f->timestamp);
/* monotonic time, so no y2038 overflow */
*eaction->tv_sec = ts.tv_sec;
*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 149798754570..ded5348d190c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1296,6 +1296,8 @@
#define USB_VENDOR_ID_STEELSERIES 0x1038
#define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410
+#define USB_DEVICE_ID_STEELSERIES_ARCTIS_1 0x12b6
+#define USB_DEVICE_ID_STEELSERIES_ARCTIS_9 0x12c2
#define USB_VENDOR_ID_SUN 0x0430
#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index f619ed10535d..ffd034566e2e 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -695,6 +695,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
#endif
#if IS_ENABLED(CONFIG_HID_STEELSERIES)
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_9) },
#endif
#if IS_ENABLED(CONFIG_HID_SUNPLUS)
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index d4bd7848b8c6..f98435631aa1 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -249,11 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
{
int ret, i;
struct led_classdev *led;
+ struct steelseries_srws1_data *drv_data;
size_t name_sz;
char *name;
- struct steelseries_srws1_data *drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
-
+ drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
if (drv_data == NULL) {
hid_err(hdev, "can't alloc SRW-S1 memory\n");
return -ENOMEM;
@@ -264,18 +264,18 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
- goto err_free;
+ goto err;
}
if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) {
ret = -ENODEV;
- goto err_free;
+ goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
- goto err_free;
+ goto err;
}
/* register led subsystem */
@@ -288,10 +288,10 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
name_sz = strlen(hdev->uniq) + 16;
/* 'ALL', for setting all LEDs simultaneously */
- led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
if (!led) {
hid_err(hdev, "can't allocate memory for LED ALL\n");
- goto err_led;
+ goto out;
}
name = (void *)(&led[1]);
@@ -303,16 +303,18 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
led->brightness_set = steelseries_srws1_led_all_set_brightness;
drv_data->led[SRWS1_NUMBER_LEDS] = led;
- ret = led_classdev_register(&hdev->dev, led);
- if (ret)
- goto err_led;
+ ret = devm_led_classdev_register(&hdev->dev, led);
+ if (ret) {
+ hid_err(hdev, "failed to register LED %d. Aborting.\n", SRWS1_NUMBER_LEDS);
+ goto out; /* let the driver continue without LEDs */
+ }
/* Each individual LED */
for (i = 0; i < SRWS1_NUMBER_LEDS; i++) {
- led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
if (!led) {
hid_err(hdev, "can't allocate memory for LED %d\n", i);
- goto err_led;
+ break;
}
name = (void *)(&led[1]);
@@ -324,53 +326,18 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
led->brightness_set = steelseries_srws1_led_set_brightness;
drv_data->led[i] = led;
- ret = led_classdev_register(&hdev->dev, led);
+ ret = devm_led_classdev_register(&hdev->dev, led);
if (ret) {
hid_err(hdev, "failed to register LED %d. Aborting.\n", i);
-err_led:
- /* Deregister all LEDs (if any) */
- for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
- led = drv_data->led[i];
- drv_data->led[i] = NULL;
- if (!led)
- continue;
- led_classdev_unregister(led);
- kfree(led);
- }
- goto out; /* but let the driver continue without LEDs */
+ break; /* but let the driver continue without LEDs */
}
}
out:
return 0;
-err_free:
- kfree(drv_data);
+err:
return ret;
}
-
-static void steelseries_srws1_remove(struct hid_device *hdev)
-{
- int i;
- struct led_classdev *led;
-
- struct steelseries_srws1_data *drv_data = hid_get_drvdata(hdev);
-
- if (drv_data) {
- /* Deregister LEDs (if any) */
- for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
- led = drv_data->led[i];
- drv_data->led[i] = NULL;
- if (!led)
- continue;
- led_classdev_unregister(led);
- kfree(led);
- }
-
- }
-
- hid_hw_stop(hdev);
- kfree(drv_data);
-}
#endif
#define STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS 3000
@@ -405,13 +372,12 @@ static int steelseries_headset_request_battery(struct hid_device *hdev,
static void steelseries_headset_fetch_battery(struct hid_device *hdev)
{
- struct steelseries_device *sd = hid_get_drvdata(hdev);
int ret = 0;
- if (sd->quirks & STEELSERIES_ARCTIS_1)
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_1)
ret = steelseries_headset_request_battery(hdev,
arctis_1_battery_request, sizeof(arctis_1_battery_request));
- else if (sd->quirks & STEELSERIES_ARCTIS_9)
+ else if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_9)
ret = steelseries_headset_request_battery(hdev,
arctis_9_battery_request, sizeof(arctis_9_battery_request));
@@ -567,14 +533,7 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
struct steelseries_device *sd;
int ret;
- sd = devm_kzalloc(&hdev->dev, sizeof(*sd), GFP_KERNEL);
- if (!sd)
- return -ENOMEM;
- hid_set_drvdata(hdev, sd);
- sd->hdev = hdev;
- sd->quirks = id->driver_data;
-
- if (sd->quirks & STEELSERIES_SRWS1) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_SRWS1) {
#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
(IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
return steelseries_srws1_probe(hdev, id);
@@ -583,6 +542,13 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
#endif
}
+ sd = devm_kzalloc(&hdev->dev, sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+ hid_set_drvdata(hdev, sd);
+ sd->hdev = hdev;
+ sd->quirks = id->driver_data;
+
ret = hid_parse(hdev);
if (ret)
return ret;
@@ -610,17 +576,19 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
static void steelseries_remove(struct hid_device *hdev)
{
- struct steelseries_device *sd = hid_get_drvdata(hdev);
+ struct steelseries_device *sd;
unsigned long flags;
- if (sd->quirks & STEELSERIES_SRWS1) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_SRWS1) {
#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
(IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
- steelseries_srws1_remove(hdev);
+ hid_hw_stop(hdev);
#endif
return;
}
+ sd = hid_get_drvdata(hdev);
+
spin_lock_irqsave(&sd->lock, flags);
sd->removed = true;
spin_unlock_irqrestore(&sd->lock, flags);
@@ -667,10 +635,10 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
unsigned long flags;
/* Not a headset */
- if (sd->quirks & STEELSERIES_SRWS1)
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_SRWS1)
return 0;
- if (sd->quirks & STEELSERIES_ARCTIS_1) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_1) {
hid_dbg(sd->hdev,
"Parsing raw event for Arctis 1 headset (%*ph)\n", size, read_buf);
if (size < ARCTIS_1_BATTERY_RESPONSE_LEN ||
@@ -688,7 +656,7 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
}
}
- if (sd->quirks & STEELSERIES_ARCTIS_9) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_9) {
hid_dbg(sd->hdev,
"Parsing raw event for Arctis 9 headset (%*ph)\n", size, read_buf);
if (size < ARCTIS_9_BATTERY_RESPONSE_LEN) {
@@ -757,11 +725,11 @@ static const struct hid_device_id steelseries_devices[] = {
.driver_data = STEELSERIES_SRWS1 },
{ /* SteelSeries Arctis 1 Wireless for XBox */
- HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12b6),
- .driver_data = STEELSERIES_ARCTIS_1 },
+ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_1),
+ .driver_data = STEELSERIES_ARCTIS_1 },
{ /* SteelSeries Arctis 9 Wireless for XBox */
- HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12c2),
+ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_9),
.driver_data = STEELSERIES_ARCTIS_9 },
{ }
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index c887f48756f4..bbd6f23bce78 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -394,27 +394,15 @@ static int hidraw_revoke(struct hidraw_list *list)
return 0;
}
-static long hidraw_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long hidraw_fixed_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- unsigned int minor = iminor(inode);
- long ret = 0;
- struct hidraw *dev;
- struct hidraw_list *list = file->private_data;
- void __user *user_arg = (void __user*) arg;
-
- down_read(&minors_rwsem);
- dev = hidraw_table[minor];
- if (!dev || !dev->exist || hidraw_is_revoked(list)) {
- ret = -ENODEV;
- goto out;
- }
+ struct hid_device *hid = dev->hid;
switch (cmd) {
case HIDIOCGRDESCSIZE:
- if (put_user(dev->hid->rsize, (int __user *)arg))
- ret = -EFAULT;
+ if (put_user(hid->rsize, (int __user *)arg))
+ return -EFAULT;
break;
case HIDIOCGRDESC:
@@ -422,113 +410,145 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
__u32 len;
if (get_user(len, (int __user *)arg))
- ret = -EFAULT;
- else if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
- ret = -EINVAL;
- else if (copy_to_user(user_arg + offsetof(
- struct hidraw_report_descriptor,
- value[0]),
- dev->hid->rdesc,
- min(dev->hid->rsize, len)))
- ret = -EFAULT;
+ return -EFAULT;
+
+ if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
+ return -EINVAL;
+
+ if (copy_to_user(arg + offsetof(
+ struct hidraw_report_descriptor,
+ value[0]),
+ hid->rdesc,
+ min(hid->rsize, len)))
+ return -EFAULT;
+
break;
}
case HIDIOCGRAWINFO:
{
struct hidraw_devinfo dinfo;
- dinfo.bustype = dev->hid->bus;
- dinfo.vendor = dev->hid->vendor;
- dinfo.product = dev->hid->product;
- if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
- ret = -EFAULT;
+ dinfo.bustype = hid->bus;
+ dinfo.vendor = hid->vendor;
+ dinfo.product = hid->product;
+ if (copy_to_user(arg, &dinfo, sizeof(dinfo)))
+ return -EFAULT;
break;
}
case HIDIOCREVOKE:
{
- if (user_arg)
- ret = -EINVAL;
- else
- ret = hidraw_revoke(list);
- break;
+ struct hidraw_list *list = file->private_data;
+
+ if (arg)
+ return -EINVAL;
+
+ return hidraw_revoke(list);
}
default:
- {
- struct hid_device *hid = dev->hid;
- if (_IOC_TYPE(cmd) != 'H') {
- ret = -EINVAL;
- break;
- }
+ /*
+ * None of the above ioctls can return -EAGAIN, so
+ * use it as a marker that we need to check variable
+ * length ioctls.
+ */
+ return -EAGAIN;
+ }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT);
- break;
- }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT);
- break;
- }
+ return 0;
+}
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSINPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT);
- break;
- }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGINPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT);
- break;
- }
+static long hidraw_rw_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
+ void __user *user_arg)
+{
+ int len = _IOC_SIZE(cmd);
+
+ switch (cmd & ~IOCSIZE_MASK) {
+ case HIDIOCSFEATURE(0):
+ return hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT);
+ case HIDIOCGFEATURE(0):
+ return hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT);
+ case HIDIOCSINPUT(0):
+ return hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT);
+ case HIDIOCGINPUT(0):
+ return hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT);
+ case HIDIOCSOUTPUT(0):
+ return hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT);
+ case HIDIOCGOUTPUT(0):
+ return hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT);
+ }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSOUTPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT);
- break;
- }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGOUTPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT);
- break;
- }
+ return -EINVAL;
+}
- /* Begin Read-only ioctls. */
- if (_IOC_DIR(cmd) != _IOC_READ) {
- ret = -EINVAL;
- break;
- }
+static long hidraw_ro_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
+ void __user *user_arg)
+{
+ struct hid_device *hid = dev->hid;
+ int len = _IOC_SIZE(cmd);
+ int field_len;
+
+ switch (cmd & ~IOCSIZE_MASK) {
+ case HIDIOCGRAWNAME(0):
+ field_len = strlen(hid->name) + 1;
+ if (len > field_len)
+ len = field_len;
+ return copy_to_user(user_arg, hid->name, len) ? -EFAULT : len;
+ case HIDIOCGRAWPHYS(0):
+ field_len = strlen(hid->phys) + 1;
+ if (len > field_len)
+ len = field_len;
+ return copy_to_user(user_arg, hid->phys, len) ? -EFAULT : len;
+ case HIDIOCGRAWUNIQ(0):
+ field_len = strlen(hid->uniq) + 1;
+ if (len > field_len)
+ len = field_len;
+ return copy_to_user(user_arg, hid->uniq, len) ? -EFAULT : len;
+ }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWNAME(0))) {
- int len = strlen(hid->name) + 1;
- if (len > _IOC_SIZE(cmd))
- len = _IOC_SIZE(cmd);
- ret = copy_to_user(user_arg, hid->name, len) ?
- -EFAULT : len;
- break;
- }
+ return -EINVAL;
+}
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWPHYS(0))) {
- int len = strlen(hid->phys) + 1;
- if (len > _IOC_SIZE(cmd))
- len = _IOC_SIZE(cmd);
- ret = copy_to_user(user_arg, hid->phys, len) ?
- -EFAULT : len;
- break;
- }
+static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file_inode(file);
+ unsigned int minor = iminor(inode);
+ struct hidraw *dev;
+ struct hidraw_list *list = file->private_data;
+ void __user *user_arg = (void __user *)arg;
+ int ret;
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) {
- int len = strlen(hid->uniq) + 1;
- if (len > _IOC_SIZE(cmd))
- len = _IOC_SIZE(cmd);
- ret = copy_to_user(user_arg, hid->uniq, len) ?
- -EFAULT : len;
- break;
- }
- }
+ down_read(&minors_rwsem);
+ dev = hidraw_table[minor];
+ if (!dev || !dev->exist || hidraw_is_revoked(list)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (_IOC_TYPE(cmd) != 'H') {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (_IOC_NR(cmd) > HIDIOCTL_LAST || _IOC_NR(cmd) == 0) {
ret = -ENOTTY;
+ goto out;
}
+
+ ret = hidraw_fixed_size_ioctl(file, dev, cmd, user_arg);
+ if (ret != -EAGAIN)
+ goto out;
+
+ switch (_IOC_DIR(cmd)) {
+ case (_IOC_READ | _IOC_WRITE):
+ ret = hidraw_rw_variable_size_ioctl(file, dev, cmd, user_arg);
+ break;
+ case _IOC_READ:
+ ret = hidraw_ro_variable_size_ioctl(file, dev, cmd, user_arg);
+ break;
+ default:
+ /* Any other IOC_DIR is wrong */
+ ret = -EINVAL;
+ }
+
out:
up_read(&minors_rwsem);
return ret;
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index d3912e3f2f13..30ebde1273be 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -112,9 +112,9 @@ struct i2c_hid {
struct i2chid_ops *ops;
struct drm_panel_follower panel_follower;
- struct work_struct panel_follower_prepare_work;
+ struct work_struct panel_follower_work;
bool is_panel_follower;
- bool prepare_work_finished;
+ bool panel_follower_work_finished;
};
static const struct i2c_hid_quirks {
@@ -1110,10 +1110,10 @@ static int i2c_hid_core_probe_panel_follower(struct i2c_hid *ihid)
return ret;
}
-static void ihid_core_panel_prepare_work(struct work_struct *work)
+static void ihid_core_panel_follower_work(struct work_struct *work)
{
struct i2c_hid *ihid = container_of(work, struct i2c_hid,
- panel_follower_prepare_work);
+ panel_follower_work);
struct hid_device *hid = ihid->hid;
int ret;
@@ -1130,7 +1130,7 @@ static void ihid_core_panel_prepare_work(struct work_struct *work)
if (ret)
dev_warn(&ihid->client->dev, "Power on failed: %d\n", ret);
else
- WRITE_ONCE(ihid->prepare_work_finished, true);
+ WRITE_ONCE(ihid->panel_follower_work_finished, true);
/*
* The work APIs provide a number of memory ordering guarantees
@@ -1139,12 +1139,12 @@ static void ihid_core_panel_prepare_work(struct work_struct *work)
* guarantee that a write that happened in the work is visible after
* cancel_work_sync(). We'll add a write memory barrier here to match
* with i2c_hid_core_panel_unpreparing() to ensure that our write to
- * prepare_work_finished is visible there.
+ * panel_follower_work_finished is visible there.
*/
smp_wmb();
}
-static int i2c_hid_core_panel_prepared(struct drm_panel_follower *follower)
+static int i2c_hid_core_panel_follower_resume(struct drm_panel_follower *follower)
{
struct i2c_hid *ihid = container_of(follower, struct i2c_hid, panel_follower);
@@ -1152,29 +1152,36 @@ static int i2c_hid_core_panel_prepared(struct drm_panel_follower *follower)
* Powering on a touchscreen can be a slow process. Queue the work to
* the system workqueue so we don't block the panel's power up.
*/
- WRITE_ONCE(ihid->prepare_work_finished, false);
- schedule_work(&ihid->panel_follower_prepare_work);
+ WRITE_ONCE(ihid->panel_follower_work_finished, false);
+ schedule_work(&ihid->panel_follower_work);
return 0;
}
-static int i2c_hid_core_panel_unpreparing(struct drm_panel_follower *follower)
+static int i2c_hid_core_panel_follower_suspend(struct drm_panel_follower *follower)
{
struct i2c_hid *ihid = container_of(follower, struct i2c_hid, panel_follower);
- cancel_work_sync(&ihid->panel_follower_prepare_work);
+ cancel_work_sync(&ihid->panel_follower_work);
- /* Match with ihid_core_panel_prepare_work() */
+ /* Match with ihid_core_panel_follower_work() */
smp_rmb();
- if (!READ_ONCE(ihid->prepare_work_finished))
+ if (!READ_ONCE(ihid->panel_follower_work_finished))
return 0;
return i2c_hid_core_suspend(ihid, true);
}
-static const struct drm_panel_follower_funcs i2c_hid_core_panel_follower_funcs = {
- .panel_prepared = i2c_hid_core_panel_prepared,
- .panel_unpreparing = i2c_hid_core_panel_unpreparing,
+static const struct drm_panel_follower_funcs
+ i2c_hid_core_panel_follower_prepare_funcs = {
+ .panel_prepared = i2c_hid_core_panel_follower_resume,
+ .panel_unpreparing = i2c_hid_core_panel_follower_suspend,
+};
+
+static const struct drm_panel_follower_funcs
+ i2c_hid_core_panel_follower_enable_funcs = {
+ .panel_enabled = i2c_hid_core_panel_follower_resume,
+ .panel_disabling = i2c_hid_core_panel_follower_suspend,
};
static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
@@ -1182,7 +1189,10 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
struct device *dev = &ihid->client->dev;
int ret;
- ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_funcs;
+ if (ihid->hid->initial_quirks & HID_QUIRK_POWER_ON_AFTER_BACKLIGHT)
+ ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_enable_funcs;
+ else
+ ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_prepare_funcs;
/*
* If we're not in control of our own power up/power down then we can't
@@ -1237,7 +1247,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
init_waitqueue_head(&ihid->wait);
mutex_init(&ihid->cmd_lock);
mutex_init(&ihid->reset_lock);
- INIT_WORK(&ihid->panel_follower_prepare_work, ihid_core_panel_prepare_work);
+ INIT_WORK(&ihid->panel_follower_work, ihid_core_panel_follower_work);
/* we need to allocate the command buffer without knowing the maximum
* size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
index 3fcff6daa0d3..0215f217f6d8 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
+#include <linux/hid.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,6 +24,7 @@ struct elan_i2c_hid_chip_data {
unsigned int post_power_delay_ms;
u16 hid_descriptor_address;
const char *main_supply_name;
+ bool power_after_backlight;
};
struct i2c_hid_of_elan {
@@ -97,6 +99,7 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
{
struct i2c_hid_of_elan *ihid_elan;
int ret;
+ u32 quirks = 0;
ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
if (!ihid_elan)
@@ -131,8 +134,12 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
}
}
+ if (ihid_elan->chip_data->power_after_backlight)
+ quirks = HID_QUIRK_POWER_ON_AFTER_BACKLIGHT;
+
ret = i2c_hid_core_probe(client, &ihid_elan->ops,
- ihid_elan->chip_data->hid_descriptor_address, 0);
+ ihid_elan->chip_data->hid_descriptor_address,
+ quirks);
if (ret)
goto err_deassert_reset;
@@ -150,6 +157,7 @@ static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
.post_gpio_reset_on_delay_ms = 300,
.hid_descriptor_address = 0x0001,
.main_supply_name = "vcc33",
+ .power_after_backlight = true,
};
static const struct elan_i2c_hid_chip_data elan_ekth6a12nay_chip_data = {
@@ -157,6 +165,7 @@ static const struct elan_i2c_hid_chip_data elan_ekth6a12nay_chip_data = {
.post_gpio_reset_on_delay_ms = 300,
.hid_descriptor_address = 0x0001,
.main_supply_name = "vcc33",
+ .power_after_backlight = true,
};
static const struct elan_i2c_hid_chip_data ilitek_ili9882t_chip_data = {
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 4ac554731e98..f43efb80aabf 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -396,7 +396,7 @@ static const struct ec_board_info board_info_pro_art_x870E_creator_wifi = {
.sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
.family = family_amd_800_series,
};
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index c25a54d5b39a..0ba9195c9d71 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -113,8 +113,8 @@ struct mlxreg_fan {
int divider;
};
-static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
- unsigned long state);
+static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state, bool thermal);
static int
mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
@@ -224,8 +224,9 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
* last thermal state.
*/
if (pwm->last_hwmon_state >= pwm->last_thermal_state)
- return mlxreg_fan_set_cur_state(pwm->cdev,
- pwm->last_hwmon_state);
+ return _mlxreg_fan_set_cur_state(pwm->cdev,
+ pwm->last_hwmon_state,
+ false);
return 0;
}
return regmap_write(fan->regmap, pwm->reg, val);
@@ -357,9 +358,8 @@ static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
-static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
- unsigned long state)
-
+static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state, bool thermal)
{
struct mlxreg_fan_pwm *pwm = cdev->devdata;
struct mlxreg_fan *fan = pwm->fan;
@@ -369,7 +369,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
return -EINVAL;
/* Save thermal state. */
- pwm->last_thermal_state = state;
+ if (thermal)
+ pwm->last_thermal_state = state;
state = max_t(unsigned long, state, pwm->last_hwmon_state);
err = regmap_write(fan->regmap, pwm->reg,
@@ -381,6 +382,13 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
+static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+
+{
+ return _mlxreg_fan_set_cur_state(cdev, state, true);
+}
+
static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
.get_max_state = mlxreg_fan_get_max_state,
.get_cur_state = mlxreg_fan_get_cur_state,
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 5058432233da..4c345ff2cff1 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -520,6 +520,10 @@ static int __catu_probe(struct device *dev, struct resource *res)
struct coresight_platform_data *pdata = NULL;
void __iomem *base;
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
+
catu_desc.name = coresight_alloc_device_name(&catu_devs, dev);
if (!catu_desc.name)
return -ENOMEM;
@@ -632,7 +636,7 @@ static int catu_platform_probe(struct platform_device *pdev)
drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ return PTR_ERR(drvdata->pclk);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
@@ -641,11 +645,8 @@ static int catu_platform_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, drvdata);
ret = __catu_probe(&pdev->dev, res);
pm_runtime_put(&pdev->dev);
- if (ret) {
+ if (ret)
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
- }
return ret;
}
@@ -659,8 +660,6 @@ static void catu_platform_remove(struct platform_device *pdev)
__catu_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_PM
@@ -668,18 +667,26 @@ static int catu_runtime_suspend(struct device *dev)
{
struct catu_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
+
return 0;
}
static int catu_runtime_resume(struct device *dev)
{
struct catu_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 755776cd19c5..6e6b7aac206d 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -62,6 +62,7 @@
struct catu_drvdata {
struct clk *pclk;
+ struct clk *atclk;
void __iomem *base;
struct coresight_device *csdev;
int irq;
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index fa758cc21827..1accd7cbd54b 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*/
+#include <linux/bitfield.h>
#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -1374,8 +1375,9 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
goto out_unlock;
}
- if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+ sink_ops(csdev)->alloc_buffer) {
ret = etm_perf_add_symlink_sink(csdev);
if (ret) {
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index a871d997330b..e39dfb886688 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -699,7 +699,7 @@ static int debug_platform_probe(struct platform_device *pdev)
drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ return PTR_ERR(drvdata->pclk);
dev_set_drvdata(&pdev->dev, drvdata);
pm_runtime_get_noresume(&pdev->dev);
@@ -710,8 +710,6 @@ static int debug_platform_probe(struct platform_device *pdev)
if (ret) {
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
return ret;
}
@@ -725,8 +723,6 @@ static void debug_platform_remove(struct platform_device *pdev)
__debug_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_ACPI
diff --git a/drivers/hwtracing/coresight/coresight-ctcu-core.c b/drivers/hwtracing/coresight/coresight-ctcu-core.c
index c6bafc96db96..de279efe3405 100644
--- a/drivers/hwtracing/coresight/coresight-ctcu-core.c
+++ b/drivers/hwtracing/coresight/coresight-ctcu-core.c
@@ -209,7 +209,7 @@ static int ctcu_probe(struct platform_device *pdev)
drvdata->apb_clk = coresight_get_enable_apb_pclk(dev);
if (IS_ERR(drvdata->apb_clk))
- return -ENODEV;
+ return PTR_ERR(drvdata->apb_clk);
cfgs = of_device_get_match_data(dev);
if (cfgs) {
@@ -233,12 +233,8 @@ static int ctcu_probe(struct platform_device *pdev)
desc.access = CSDEV_ACCESS_IOMEM(base);
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- if (!IS_ERR_OR_NULL(drvdata->apb_clk))
- clk_put(drvdata->apb_clk);
-
+ if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
- }
return 0;
}
@@ -275,8 +271,6 @@ static void ctcu_platform_remove(struct platform_device *pdev)
ctcu_remove(pdev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->apb_clk))
- clk_put(drvdata->apb_clk);
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index d5efb085b30d..8e81b41eb222 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -730,12 +730,10 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
+
dev_set_drvdata(dev, drvdata);
/* validity for the resource is already checked by the AMBA core */
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 1c6204e14422..baba2245b1df 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -832,12 +832,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
- drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
drvdata->cpu = coresight_get_cpu(dev);
if (drvdata->cpu < 0)
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 42e5d37403ad..4b98a7bf4cb7 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -4,6 +4,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/kvm_host.h>
@@ -528,7 +529,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
}
- etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
+ if (drvdata->numextinsel)
+ etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
@@ -1423,6 +1425,7 @@ static void etm4_init_arch_data(void *info)
etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
+ drvdata->numextinsel = FIELD_GET(TRCIDR5_NUMEXTINSEL_MASK, etmidr5);
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
/* ATBTRIG, bit[22] implementation can support ATB triggers? */
@@ -1852,7 +1855,9 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
}
- state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
+
+ if (drvdata->numextinsel)
+ state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
@@ -1984,7 +1989,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
}
- etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
+ if (drvdata->numextinsel)
+ etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
@@ -2215,6 +2221,10 @@ static int etm4_probe(struct device *dev)
if (WARN_ON(!drvdata))
return -ENOMEM;
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
+
if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
pm_save_enable = coresight_loses_context_with_cpu(dev) ?
PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
@@ -2299,14 +2309,12 @@ static int etm4_probe_platform_dev(struct platform_device *pdev)
drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ return PTR_ERR(drvdata->pclk);
if (res) {
drvdata->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(drvdata->base)) {
- clk_put(drvdata->pclk);
+ if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
- }
}
dev_set_drvdata(&pdev->dev, drvdata);
@@ -2413,9 +2421,6 @@ static void etm4_remove_platform_dev(struct platform_device *pdev)
if (drvdata)
etm4_remove_dev(drvdata);
pm_runtime_disable(&pdev->dev);
-
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
static const struct amba_id etm4_ids[] = {
@@ -2463,8 +2468,8 @@ static int etm4_runtime_suspend(struct device *dev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata->pclk && !IS_ERR(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
return 0;
}
@@ -2472,11 +2477,17 @@ static int etm4_runtime_suspend(struct device *dev)
static int etm4_runtime_resume(struct device *dev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata->pclk && !IS_ERR(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- return 0;
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index ab251865b893..e9eeea6240d5 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@...aro.org>
*/
+#include <linux/bitfield.h>
#include <linux/coresight.h>
#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index ac649515054d..13ec9ecef46f 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -162,6 +162,7 @@
#define TRCIDR4_NUMVMIDC_MASK GENMASK(31, 28)
#define TRCIDR5_NUMEXTIN_MASK GENMASK(8, 0)
+#define TRCIDR5_NUMEXTINSEL_MASK GENMASK(11, 9)
#define TRCIDR5_TRACEIDSIZE_MASK GENMASK(21, 16)
#define TRCIDR5_ATBTRIG BIT(22)
#define TRCIDR5_LPOVERRIDE BIT(23)
@@ -919,7 +920,8 @@ struct etmv4_save_state {
/**
* struct etm4_drvdata - specifics associated to an ETM component
- * @pclk APB clock if present, otherwise NULL
+ * @pclk: APB clock if present, otherwise NULL
+ * @atclk: Optional clock for the core parts of the ETMv4.
* @base: Memory mapped base address for this component.
* @csdev: Component vitals needed by the framework.
* @spinlock: Only one at a time pls.
@@ -988,6 +990,7 @@ struct etmv4_save_state {
*/
struct etmv4_drvdata {
struct clk *pclk;
+ struct clk *atclk;
void __iomem *base;
struct coresight_device *csdev;
raw_spinlock_t spinlock;
@@ -999,6 +1002,7 @@ struct etmv4_drvdata {
u8 nr_cntr;
u8 nr_ext_inp;
u8 numcidc;
+ u8 numextinsel;
u8 numvmidc;
u8 nrseqstate;
u8 nr_event;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index b1922dbe9292..b044a4125310 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -213,7 +213,6 @@ ATTRIBUTE_GROUPS(coresight_funnel);
static int funnel_probe(struct device *dev, struct resource *res)
{
- int ret;
void __iomem *base;
struct coresight_platform_data *pdata = NULL;
struct funnel_drvdata *drvdata;
@@ -231,16 +230,13 @@ static int funnel_probe(struct device *dev, struct resource *res)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
drvdata->pclk = coresight_get_enable_apb_pclk(dev);
if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ return PTR_ERR(drvdata->pclk);
/*
* Map the device base for dynamic-funnel, which has been
@@ -248,10 +244,8 @@ static int funnel_probe(struct device *dev, struct resource *res)
*/
if (res) {
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto out_disable_clk;
- }
+ if (IS_ERR(base))
+ return PTR_ERR(base);
drvdata->base = base;
desc.groups = coresight_funnel_groups;
desc.access = CSDEV_ACCESS_IOMEM(base);
@@ -261,10 +255,9 @@ static int funnel_probe(struct device *dev, struct resource *res)
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto out_disable_clk;
- }
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
dev->platform_data = pdata;
raw_spin_lock_init(&drvdata->spinlock);
@@ -274,19 +267,10 @@ static int funnel_probe(struct device *dev, struct resource *res)
desc.pdata = pdata;
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- ret = PTR_ERR(drvdata->csdev);
- goto out_disable_clk;
- }
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
- ret = 0;
-
-out_disable_clk:
- if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
- if (ret && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
- return ret;
+ return 0;
}
static int funnel_remove(struct device *dev)
@@ -355,8 +339,6 @@ static void funnel_platform_remove(struct platform_device *pdev)
funnel_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
static const struct of_device_id funnel_match[] = {
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 06efd2b01a0f..9e8bd36e7a9a 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -219,7 +219,6 @@ static const struct attribute_group *replicator_groups[] = {
static int replicator_probe(struct device *dev, struct resource *res)
{
- int ret = 0;
struct coresight_platform_data *pdata = NULL;
struct replicator_drvdata *drvdata;
struct coresight_desc desc = { 0 };
@@ -238,16 +237,13 @@ static int replicator_probe(struct device *dev, struct resource *res)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
drvdata->pclk = coresight_get_enable_apb_pclk(dev);
if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ return PTR_ERR(drvdata->pclk);
/*
* Map the device base for dynamic-replicator, which has been
@@ -255,10 +251,8 @@ static int replicator_probe(struct device *dev, struct resource *res)
*/
if (res) {
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto out_disable_clk;
- }
+ if (IS_ERR(base))
+ return PTR_ERR(base);
drvdata->base = base;
desc.groups = replicator_groups;
desc.access = CSDEV_ACCESS_IOMEM(base);
@@ -272,10 +266,8 @@ static int replicator_probe(struct device *dev, struct resource *res)
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto out_disable_clk;
- }
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
dev->platform_data = pdata;
raw_spin_lock_init(&drvdata->spinlock);
@@ -286,19 +278,11 @@ static int replicator_probe(struct device *dev, struct resource *res)
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- ret = PTR_ERR(drvdata->csdev);
- goto out_disable_clk;
- }
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
replicator_reset(drvdata);
-
-out_disable_clk:
- if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
- if (ret && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
- return ret;
+ return 0;
}
static int replicator_remove(struct device *dev)
@@ -335,8 +319,6 @@ static void replicator_platform_remove(struct platform_device *pdev)
replicator_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index e45c6c7204b4..57fbe3ad0fb2 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -842,16 +842,13 @@ static int __stm_probe(struct device *dev, struct resource *res)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
drvdata->pclk = coresight_get_enable_apb_pclk(dev);
if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ return PTR_ERR(drvdata->pclk);
dev_set_drvdata(dev, drvdata);
base = devm_ioremap_resource(dev, res);
@@ -1033,8 +1030,6 @@ static void stm_platform_remove(struct platform_device *pdev)
__stm_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_ACPI
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
index 83dad24e0116..6836b05986e8 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg.c
@@ -395,7 +395,7 @@ static void cscfg_remove_owned_csdev_configs(struct coresight_device *csdev, voi
if (list_empty(&csdev->config_csdev_list))
return;
- guard(raw_spinlock_irqsave)(&csdev->cscfg_csdev_lock);
+ guard(raw_spinlock_irqsave)(&csdev->cscfg_csdev_lock);
list_for_each_entry_safe(config_csdev, tmp, &csdev->config_csdev_list, node) {
if (config_csdev->config_desc->load_owner == load_owner)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 88afb16bb6be..e867198b03e8 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -789,6 +789,10 @@ static int __tmc_probe(struct device *dev, struct resource *res)
struct coresight_desc desc = { 0 };
struct coresight_dev_list *dev_list = NULL;
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
+
ret = -ENOMEM;
/* Validity for the resource is already checked by the AMBA core */
@@ -987,7 +991,7 @@ static int tmc_platform_probe(struct platform_device *pdev)
drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ return PTR_ERR(drvdata->pclk);
dev_set_drvdata(&pdev->dev, drvdata);
pm_runtime_get_noresume(&pdev->dev);
@@ -1011,8 +1015,6 @@ static void tmc_platform_remove(struct platform_device *pdev)
__tmc_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_PM
@@ -1020,18 +1022,26 @@ static int tmc_runtime_suspend(struct device *dev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
+
return 0;
}
static int tmc_runtime_resume(struct device *dev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 6541a27a018e..cbb4ba439158 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -210,6 +210,7 @@ struct tmc_resrv_buf {
/**
* struct tmc_drvdata - specifics associated to an TMC component
+ * @atclk: optional clock for the core parts of the TMC.
* @pclk: APB clock if present, otherwise NULL
* @base: memory mapped base address for this component.
* @csdev: component vitals needed by the framework.
@@ -244,6 +245,7 @@ struct tmc_resrv_buf {
* Used by ETR/ETF.
*/
struct tmc_drvdata {
+ struct clk *atclk;
struct clk *pclk;
void __iomem *base;
struct coresight_device *csdev;
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 0633f04beb24..333b3cb23685 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -71,6 +71,8 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata,
if (tpdm_data->dsb) {
rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
"qcom,dsb-element-bits", &drvdata->dsb_esize);
+ if (rc)
+ goto out;
}
if (tpdm_data->cmb) {
@@ -78,6 +80,7 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata,
"qcom,cmb-element-bits", &drvdata->cmb_esize);
}
+out:
if (rc)
dev_warn_once(&csdev->dev,
"Failed to read TPDM Element size: %d\n", rc);
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 3e0159288428..8d6179c83e5d 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -128,7 +128,6 @@ static const struct coresight_ops tpiu_cs_ops = {
static int __tpiu_probe(struct device *dev, struct resource *res)
{
- int ret;
void __iomem *base;
struct coresight_platform_data *pdata = NULL;
struct tpiu_drvdata *drvdata;
@@ -144,16 +143,13 @@ static int __tpiu_probe(struct device *dev, struct resource *res)
spin_lock_init(&drvdata->spinlock);
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
drvdata->pclk = coresight_get_enable_apb_pclk(dev);
if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ return PTR_ERR(drvdata->pclk);
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
@@ -293,8 +289,6 @@ static void tpiu_platform_remove(struct platform_device *pdev)
__tpiu_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_ACPI
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 8267dd1a2130..43643d2c5bdd 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -23,7 +23,8 @@
#include "coresight-self-hosted-trace.h"
#include "coresight-trbe.h"
-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+#define PERF_IDX2OFF(idx, buf) \
+ ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
/*
* A padding packet that will help the user space tools
@@ -257,6 +258,7 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
static void trbe_reset_local(struct trbe_cpudata *cpudata)
{
write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ isb();
trbe_drain_buffer();
write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1);
@@ -747,12 +749,12 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
if (!buf)
- return ERR_PTR(-ENOMEM);
+ return NULL;
pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
kfree(buf);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
for (i = 0; i < nr_pages; i++)
@@ -762,7 +764,7 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
if (!buf->trbe_base) {
kfree(pglist);
kfree(buf);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
buf->trbe_write = buf->trbe_base;
@@ -1279,7 +1281,7 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
* into the device for that purpose.
*/
desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL);
- if (IS_ERR(desc.pdata))
+ if (!desc.pdata)
goto cpu_clear;
desc.type = CORESIGHT_DEV_TYPE_SINK;
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h
index c4c111275627..323f0ccb6878 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.h
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.h
@@ -7,6 +7,7 @@
#ifndef _ULTRASOC_SMB_H
#define _ULTRASOC_SMB_H
+#include <linux/bitfield.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index a35e4c64a1d4..e37210d6c5f2 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -314,6 +314,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
exit_probe:
dw_i2c_plat_pm_cleanup(dev);
+ i2c_dw_prepare_clk(dev, false);
exit_reset:
reset_control_assert(dev->rst);
return ret;
@@ -331,9 +332,11 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
i2c_dw_disable(dev);
pm_runtime_dont_use_autosuspend(device);
- pm_runtime_put_sync(device);
+ pm_runtime_put_noidle(device);
dw_i2c_plat_pm_cleanup(dev);
+ i2c_dw_prepare_clk(dev, false);
+
i2c_dw_remove_lock_support(dev);
reset_control_assert(dev->rst);
diff --git a/drivers/i2c/busses/i2c-k1.c b/drivers/i2c/busses/i2c-k1.c
index b68a21fff0b5..6b918770e612 100644
--- a/drivers/i2c/busses/i2c-k1.c
+++ b/drivers/i2c/busses/i2c-k1.c
@@ -3,6 +3,7 @@
* Copyright (C) 2024-2025 Troy Mitchell <troymitchell988@...il.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/iopoll.h>
@@ -14,6 +15,7 @@
#define SPACEMIT_ICR 0x0 /* Control register */
#define SPACEMIT_ISR 0x4 /* Status register */
#define SPACEMIT_IDBR 0xc /* Data buffer register */
+#define SPACEMIT_IRCR 0x18 /* Reset cycle counter */
#define SPACEMIT_IBMR 0x1c /* Bus monitor register */
/* SPACEMIT_ICR register fields */
@@ -25,7 +27,8 @@
#define SPACEMIT_CR_MODE_FAST BIT(8) /* bus mode (master operation) */
/* Bit 9 is reserved */
#define SPACEMIT_CR_UR BIT(10) /* unit reset */
-/* Bits 11-12 are reserved */
+#define SPACEMIT_CR_RSTREQ BIT(11) /* i2c bus reset request */
+/* Bit 12 is reserved */
#define SPACEMIT_CR_SCLE BIT(13) /* master clock enable */
#define SPACEMIT_CR_IUE BIT(14) /* unit enable */
/* Bits 15-17 are reserved */
@@ -76,6 +79,10 @@
SPACEMIT_SR_GCAD | SPACEMIT_SR_IRF | SPACEMIT_SR_ITE | \
SPACEMIT_SR_ALD)
+#define SPACEMIT_RCR_SDA_GLITCH_NOFIX BIT(7) /* bypass the SDA glitch fix */
+/* the cycles of SCL during bus reset */
+#define SPACEMIT_RCR_FIELD_RST_CYC GENMASK(3, 0)
+
/* SPACEMIT_IBMR register fields */
#define SPACEMIT_BMR_SDA BIT(0) /* SDA line level */
#define SPACEMIT_BMR_SCL BIT(1) /* SCL line level */
@@ -88,6 +95,8 @@
#define SPACEMIT_SR_ERR (SPACEMIT_SR_BED | SPACEMIT_SR_RXOV | SPACEMIT_SR_ALD)
+#define SPACEMIT_BUS_RESET_CLK_CNT_MAX 9
+
enum spacemit_i2c_state {
SPACEMIT_STATE_IDLE,
SPACEMIT_STATE_START,
@@ -160,6 +169,7 @@ static int spacemit_i2c_handle_err(struct spacemit_i2c_dev *i2c)
static void spacemit_i2c_conditionally_reset_bus(struct spacemit_i2c_dev *i2c)
{
u32 status;
+ u8 clk_cnt;
/* if bus is locked, reset unit. 0: locked */
status = readl(i2c->base + SPACEMIT_IBMR);
@@ -169,9 +179,21 @@ static void spacemit_i2c_conditionally_reset_bus(struct spacemit_i2c_dev *i2c)
spacemit_i2c_reset(i2c);
usleep_range(10, 20);
- /* check scl status again */
+ for (clk_cnt = 0; clk_cnt < SPACEMIT_BUS_RESET_CLK_CNT_MAX; clk_cnt++) {
+ status = readl(i2c->base + SPACEMIT_IBMR);
+ if (status & SPACEMIT_BMR_SDA)
+ return;
+
+ /* There's nothing left to save here, we are about to exit */
+ writel(FIELD_PREP(SPACEMIT_RCR_FIELD_RST_CYC, 1),
+ i2c->base + SPACEMIT_IRCR);
+ writel(SPACEMIT_CR_RSTREQ, i2c->base + SPACEMIT_ICR);
+ usleep_range(20, 30);
+ }
+
+ /* check sda again here */
status = readl(i2c->base + SPACEMIT_IBMR);
- if (!(status & SPACEMIT_BMR_SCL))
+ if (!(status & SPACEMIT_BMR_SDA))
dev_warn_ratelimited(i2c->dev, "unit reset failed\n");
}
@@ -237,6 +259,14 @@ static void spacemit_i2c_init(struct spacemit_i2c_dev *i2c)
val |= SPACEMIT_CR_MSDE | SPACEMIT_CR_MSDIE;
writel(val, i2c->base + SPACEMIT_ICR);
+
+ /*
+ * The glitch fix in the K1 I2C controller introduces a delay
+ * on restart signals, so we disable the fix here.
+ */
+ val = readl(i2c->base + SPACEMIT_IRCR);
+ val |= SPACEMIT_RCR_SDA_GLITCH_NOFIX;
+ writel(val, i2c->base + SPACEMIT_IRCR);
}
static inline void
@@ -267,19 +297,6 @@ static void spacemit_i2c_start(struct spacemit_i2c_dev *i2c)
writel(val, i2c->base + SPACEMIT_ICR);
}
-static void spacemit_i2c_stop(struct spacemit_i2c_dev *i2c)
-{
- u32 val;
-
- val = readl(i2c->base + SPACEMIT_ICR);
- val |= SPACEMIT_CR_STOP | SPACEMIT_CR_ALDIE | SPACEMIT_CR_TB;
-
- if (i2c->read)
- val |= SPACEMIT_CR_ACKNAK;
-
- writel(val, i2c->base + SPACEMIT_ICR);
-}
-
static int spacemit_i2c_xfer_msg(struct spacemit_i2c_dev *i2c)
{
unsigned long time_left;
@@ -412,7 +429,6 @@ static irqreturn_t spacemit_i2c_irq_handler(int irq, void *devid)
val = readl(i2c->base + SPACEMIT_ICR);
val &= ~(SPACEMIT_CR_TB | SPACEMIT_CR_ACKNAK | SPACEMIT_CR_STOP | SPACEMIT_CR_START);
- writel(val, i2c->base + SPACEMIT_ICR);
switch (i2c->state) {
case SPACEMIT_STATE_START:
@@ -429,14 +445,16 @@ static irqreturn_t spacemit_i2c_irq_handler(int irq, void *devid)
}
if (i2c->state != SPACEMIT_STATE_IDLE) {
+ val |= SPACEMIT_CR_TB | SPACEMIT_CR_ALDIE;
+
if (spacemit_i2c_is_last_msg(i2c)) {
/* trigger next byte with stop */
- spacemit_i2c_stop(i2c);
- } else {
- /* trigger next byte */
- val |= SPACEMIT_CR_ALDIE | SPACEMIT_CR_TB;
- writel(val, i2c->base + SPACEMIT_ICR);
+ val |= SPACEMIT_CR_STOP;
+
+ if (i2c->read)
+ val |= SPACEMIT_CR_ACKNAK;
}
+ writel(val, i2c->base + SPACEMIT_ICR);
}
err_out:
@@ -476,12 +494,13 @@ static int spacemit_i2c_xfer(struct i2c_adapter *adapt, struct i2c_msg *msgs, in
spacemit_i2c_enable(i2c);
ret = spacemit_i2c_wait_bus_idle(i2c);
- if (!ret)
+ if (!ret) {
ret = spacemit_i2c_xfer_msg(i2c);
- else if (ret < 0)
- dev_dbg(i2c->dev, "i2c transfer error: %d\n", ret);
- else
+ if (ret < 0)
+ dev_dbg(i2c->dev, "i2c transfer error: %d\n", ret);
+ } else {
spacemit_i2c_check_bus_release(i2c);
+ }
spacemit_i2c_disable(i2c);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index ab456c3717db..dee40704825c 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1243,6 +1243,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
{
int ret;
int left_num = num;
+ bool write_then_read_en = false;
struct mtk_i2c *i2c = i2c_get_adapdata(adap);
ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks);
@@ -1256,6 +1257,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
if (!(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD) &&
msgs[0].addr == msgs[1].addr) {
i2c->auto_restart = 0;
+ write_then_read_en = true;
}
}
@@ -1280,12 +1282,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
else
i2c->op = I2C_MASTER_WR;
- if (!i2c->auto_restart) {
- if (num > 1) {
- /* combined two messages into one transaction */
- i2c->op = I2C_MASTER_WRRD;
- left_num--;
- }
+ if (write_then_read_en) {
+ /* combined two messages into one transaction */
+ i2c->op = I2C_MASTER_WRRD;
+ left_num--;
}
/* always use DMA mode. */
@@ -1293,7 +1293,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
if (ret < 0)
goto err_exit;
- msgs++;
+ if (i2c->op == I2C_MASTER_WRRD)
+ msgs += 2;
+ else
+ msgs++;
}
/* the return value is number of executed messages */
ret = num;
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
index 0d857cc68cc5..79ceaa5f5afd 100644
--- a/drivers/i3c/internals.h
+++ b/drivers/i3c/internals.h
@@ -38,7 +38,11 @@ static inline void i3c_writel_fifo(void __iomem *addr, const void *buf,
u32 tmp = 0;
memcpy(&tmp, buf + (nbytes & ~3), nbytes & 3);
- writel(tmp, addr);
+ /*
+ * writesl() instead of writel() to keep FIFO
+ * byteorder on big-endian targets
+ */
+ writesl(addr, &tmp, 1);
}
}
@@ -55,7 +59,11 @@ static inline void i3c_readl_fifo(const void __iomem *addr, void *buf,
if (nbytes & 3) {
u32 tmp;
- tmp = readl(addr);
+ /*
+ * readsl() instead of readl() to keep FIFO
+ * byteorder on big-endian targets
+ */
+ readsl(addr, &tmp, 1);
memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
}
}
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 701ae165b25b..9641e66a4e5f 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -417,6 +417,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
if (ret) {
dev_err(master->dev, "Timeout when polling for COMPLETE\n");
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
return ret;
}
@@ -517,9 +518,24 @@ static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
*/
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
- /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
- writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
- SVC_I3C_MCTRL_IBIRESP_AUTO,
+ /*
+ * Write REQUEST_START_ADDR request to emit broadcast address for arbitration,
+ * instend of using AUTO_IBI.
+ *
+ * Using AutoIBI request may cause controller to remain in AutoIBI state when
+ * there is a glitch on SDA line (high->low->high).
+ * 1. SDA high->low, raising an interrupt to execute IBI isr.
+ * 2. SDA low->high.
+ * 3. IBI isr writes an AutoIBI request.
+ * 4. The controller will not start AutoIBI process because SDA is not low.
+ * 5. IBIWON polling times out.
+ * 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request.
+ */
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ SVC_I3C_MCTRL_TYPE_I3C |
+ SVC_I3C_MCTRL_IBIRESP_MANUAL |
+ SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) |
+ SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR),
master->regs + SVC_I3C_MCTRL);
/* Wait for IBIWON, should take approximately 100us */
@@ -539,10 +555,15 @@ static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
- if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
+ if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) {
svc_i3c_master_nack_ibi(master);
- else
+ } else {
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ svc_i3c_master_ack_ibi(master, true);
+ else
+ svc_i3c_master_ack_ibi(master, false);
svc_i3c_master_handle_ibi(master, dev);
+ }
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index c174ebb7d5e6..642beb4b3360 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -11,6 +11,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
@@ -604,7 +605,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
{
int scale_type, scale_val, scale_val2;
int offset_type, offset_val, offset_val2;
- s64 raw64 = raw;
+ s64 denominator, raw64 = raw;
offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
IIO_CHAN_INFO_OFFSET);
@@ -639,7 +640,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
* If no channel scaling is available apply consumer scale to
* raw value and return.
*/
- *processed = raw * scale;
+ *processed = raw64 * scale;
return 0;
}
@@ -648,20 +649,19 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
*processed = raw64 * scale_val * scale;
break;
case IIO_VAL_INT_PLUS_MICRO:
- if (scale_val2 < 0)
- *processed = -raw64 * scale_val * scale;
- else
- *processed = raw64 * scale_val * scale;
- *processed += div_s64(raw64 * (s64)scale_val2 * scale,
- 1000000LL);
- break;
case IIO_VAL_INT_PLUS_NANO:
- if (scale_val2 < 0)
- *processed = -raw64 * scale_val * scale;
- else
- *processed = raw64 * scale_val * scale;
- *processed += div_s64(raw64 * (s64)scale_val2 * scale,
- 1000000000LL);
+ switch (scale_type) {
+ case IIO_VAL_INT_PLUS_MICRO:
+ denominator = MICRO;
+ break;
+ case IIO_VAL_INT_PLUS_NANO:
+ denominator = NANO;
+ break;
+ }
+ *processed = raw64 * scale * abs(scale_val);
+ *processed += div_s64(raw64 * scale * abs(scale_val2), denominator);
+ if (scale_val < 0 || scale_val2 < 0)
+ *processed *= -1;
break;
case IIO_VAL_FRACTIONAL:
*processed = div_s64(raw64 * (s64)scale_val * scale,
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index be0743dac3ff..929e89841c12 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -454,14 +454,10 @@ static int addr_resolve_neigh(const struct dst_entry *dst,
{
int ret = 0;
- if (ndev_flags & IFF_LOOPBACK) {
+ if (ndev_flags & IFF_LOOPBACK)
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
- } else {
- if (!(ndev_flags & IFF_NOARP)) {
- /* If the device doesn't do ARP internally */
- ret = fetch_ha(dst, addr, dst_in, seq);
- }
- }
+ else
+ ret = fetch_ha(dst, addr, dst_in, seq);
return ret;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 92678e438ff4..01bede8ba105 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1049,8 +1049,8 @@ static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
- cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 53571e6b3162..66df5bed6a56 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1013,6 +1013,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+
delta = timeout - sa_local_svc_timeout_ms;
if (delta < 0)
abs_delta = -delta;
@@ -1020,7 +1022,6 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
abs_delta = delta;
if (delta != 0) {
- spin_lock_irqsave(&ib_nl_request_lock, flags);
sa_local_svc_timeout_ms = timeout;
list_for_each_entry(query, &ib_nl_request_list, list) {
if (delta < 0 && abs_delta > query->timeout)
@@ -1038,9 +1039,10 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
if (delay)
mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
(unsigned long)delay);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
}
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+
settimeout_out:
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d456e4fde3e1..5fabd39b7492 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
@@ -883,6 +884,51 @@ static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
}
+/*
+ * Calculate maximum SQ overhead across all QP types.
+ * Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT)
+ * have smaller overhead than the types calculated below,
+ * so they are implicitly included.
+ */
+static u32 mlx5_ib_calc_max_sq_overhead(void)
+{
+ u32 max_overhead_xrc, overhead_ud_lso, a, b;
+
+ /* XRC_INI */
+ max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg);
+ max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg);
+ a = sizeof(struct mlx5_wqe_atomic_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg);
+ b = sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg) +
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD;
+ max_overhead_xrc += max(a, b);
+
+ /* UD with LSO */
+ overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg);
+
+ return max(max_overhead_xrc, overhead_ud_lso);
+}
+
+static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ u32 max_wqe_size;
+ /* max QP overhead + 1 SGE, no inline, no special features */
+ max_wqe_size = mlx5_ib_calc_max_sq_overhead() +
+ sizeof(struct mlx5_wqe_data_seg);
+
+ max_wqe_size = roundup_pow_of_two(max_wqe_size);
+
+ max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB);
+
+ return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size;
+}
+
static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
@@ -1041,7 +1087,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull;
props->page_size_cap = ~(min_page_size - 1);
props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
- props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev);
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
sizeof(struct mlx5_wqe_data_seg);
max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
@@ -1793,7 +1839,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
}
static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
+ struct mlx5_core_dev *slave,
+ struct mlx5_ib_lb_state *lb_state)
{
int err;
@@ -1805,6 +1852,7 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
if (err)
goto out;
+ lb_state->force_enable = true;
return 0;
out:
@@ -1813,16 +1861,22 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
}
static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
+ struct mlx5_core_dev *slave,
+ struct mlx5_ib_lb_state *lb_state)
{
mlx5_nic_vport_update_local_lb(slave, false);
mlx5_nic_vport_update_local_lb(master, false);
+
+ lb_state->force_enable = false;
}
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
int err = 0;
+ if (dev->lb.force_enable)
+ return 0;
+
mutex_lock(&dev->lb.mutex);
if (td)
dev->lb.user_td++;
@@ -1844,6 +1898,9 @@ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
+ if (dev->lb.force_enable)
+ return;
+
mutex_lock(&dev->lb.mutex);
if (td)
dev->lb.user_td--;
@@ -3523,7 +3580,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
lockdep_assert_held(&mlx5_ib_multiport_mutex);
- mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
+ mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
mlx5_core_mp_event_replay(ibdev->mdev,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
@@ -3620,7 +3677,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
&key);
- err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
+ err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
if (err)
goto unbind;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 7ffc7ee92cf0..15e3962633dc 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1109,6 +1109,7 @@ struct mlx5_ib_lb_state {
u32 user_td;
int qps;
bool enabled;
+ bool force_enable;
};
struct mlx5_ib_pf_eq {
@@ -1802,6 +1803,10 @@ mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
+ /* In KSM mode HW requires IOVA and mkey's page size to be aligned */
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KSM && iova)
+ bitmap &= GENMASK_ULL(__ffs64(iova), 0);
+
return ib_umem_find_best_pgsz(umem, bitmap, iova);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 6f8f353e9583..f522820b950c 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -132,8 +132,12 @@ static void do_task(struct rxe_task *task)
* yield the cpu and reschedule the task
*/
if (!ret) {
- task->state = TASK_STATE_IDLE;
- resched = 1;
+ if (task->state != TASK_STATE_DRAINING) {
+ task->state = TASK_STATE_IDLE;
+ resched = 1;
+ } else {
+ cont = 1;
+ }
goto exit;
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 35c3bde0d00a..efa2f097b582 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -769,7 +769,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
struct siw_wqe *wqe = tx_wqe(qp);
unsigned long flags;
- int rv = 0;
+ int rv = 0, imm_err = 0;
if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
@@ -955,9 +955,17 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
* Send directly if SQ processing is not in progress.
* Eventual immediate errors (rv < 0) do not affect the involved
* RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
- * processing, if new work is already pending. But rv must be passed
- * to caller.
+ * processing, if new work is already pending. But rv and pointer
+ * to failed work request must be passed to caller.
*/
+ if (unlikely(rv < 0)) {
+ /*
+ * Immediate error
+ */
+ siw_dbg_qp(qp, "Immediate error %d\n", rv);
+ imm_err = rv;
+ *bad_wr = wr;
+ }
if (wqe->wr_status != SIW_WR_IDLE) {
spin_unlock_irqrestore(&qp->sq_lock, flags);
goto skip_direct_sending;
@@ -982,15 +990,10 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
up_read(&qp->state_lock);
- if (rv >= 0)
- return 0;
- /*
- * Immediate error
- */
- siw_dbg_qp(qp, "error %d\n", rv);
+ if (unlikely(imm_err))
+ return imm_err;
- *bad_wr = wr;
- return rv;
+ return (rv >= 0) ? 0 : rv;
}
/*
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 2c51ea9d01d7..13336a2fd49c 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -775,6 +775,7 @@ static int uinput_ff_upload_to_user(char __user *buffer,
if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
+ memset(&ff_up_compat, 0, sizeof(ff_up_compat));
ff_up_compat.request_id = ff_up->request_id;
ff_up_compat.retval = ff_up->retval;
/*
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 322d5a3d40a0..ef6e2c3374ff 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3317,7 +3317,7 @@ static int mxt_probe(struct i2c_client *client)
if (data->reset_gpio) {
/* Wait a while and then de-assert the RESET GPIO line */
msleep(MXT_RESET_GPIO_TIME);
- gpiod_set_value(data->reset_gpio, 0);
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
msleep(MXT_RESET_INVALID_CHG);
}
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index affbf4a1558d..5aa7f46a420b 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -435,8 +435,21 @@ static int domain_translation_struct_show(struct seq_file *m,
}
pgd &= VTD_PAGE_MASK;
} else { /* legacy mode */
- pgd = context->lo & VTD_PAGE_MASK;
- agaw = context->hi & 7;
+ u8 tt = (u8)(context->lo & GENMASK_ULL(3, 2)) >> 2;
+
+ /*
+ * According to Translation Type(TT),
+ * get the page table pointer(SSPTPTR).
+ */
+ switch (tt) {
+ case CONTEXT_TT_MULTI_LEVEL:
+ case CONTEXT_TT_DEV_IOTLB:
+ pgd = context->lo & VTD_PAGE_MASK;
+ agaw = context->hi & 7;
+ break;
+ default:
+ goto iommu_unlock;
+ }
}
seq_printf(m, "Device %04x:%02x:%02x.%x ",
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index d09b92871659..2c261c069001 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -541,7 +541,8 @@ enum {
#define pasid_supported(iommu) (sm_supported(iommu) && \
ecap_pasid((iommu)->ecap))
#define ssads_supported(iommu) (sm_supported(iommu) && \
- ecap_slads((iommu)->ecap))
+ ecap_slads((iommu)->ecap) && \
+ ecap_smpwc(iommu->ecap))
#define nested_supported(iommu) (sm_supported(iommu) && \
ecap_nest((iommu)->ecap))
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index e236b932e766..c95394cd03a7 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -37,6 +37,8 @@ void iommu_device_unregister_bus(struct iommu_device *iommu,
const struct bus_type *bus,
struct notifier_block *nb);
+int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu);
+
struct iommu_attach_handle *iommu_attach_handle_get(struct iommu_group *group,
ioasid_t pasid,
unsigned int type);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 060ebe330ee1..59244c744eab 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -304,6 +304,7 @@ void iommu_device_unregister_bus(struct iommu_device *iommu,
struct notifier_block *nb)
{
bus_unregister_notifier(bus, nb);
+ fwnode_remove_software_node(iommu->fwnode);
iommu_device_unregister(iommu);
}
EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
@@ -326,6 +327,12 @@ int iommu_device_register_bus(struct iommu_device *iommu,
if (err)
return err;
+ iommu->fwnode = fwnode_create_software_node(NULL, NULL);
+ if (IS_ERR(iommu->fwnode)) {
+ bus_unregister_notifier(bus, nb);
+ return PTR_ERR(iommu->fwnode);
+ }
+
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
@@ -335,9 +342,28 @@ int iommu_device_register_bus(struct iommu_device *iommu,
iommu_device_unregister_bus(iommu, bus, nb);
return err;
}
+ WRITE_ONCE(iommu->ready, true);
return 0;
}
EXPORT_SYMBOL_GPL(iommu_device_register_bus);
+
+int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu)
+{
+ int rc;
+
+ mutex_lock(&iommu_probe_device_lock);
+ rc = iommu_fwspec_init(dev, iommu->fwnode);
+ mutex_unlock(&iommu_probe_device_lock);
+
+ if (rc)
+ return rc;
+
+ rc = device_add(dev);
+ if (rc)
+ iommu_fwspec_free(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iommu_mock_device_add);
#endif
static struct dev_iommu *dev_iommu_get(struct device *dev)
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 61686603c769..de178827a078 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -1126,7 +1126,7 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
goto err_put;
}
- rc = device_add(&mdev->dev);
+ rc = iommu_mock_device_add(&mdev->dev, &mock_iommu.iommu_dev);
if (rc)
goto err_put;
return mdev;
diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c
index 9290ac741949..2fb58d76f521 100644
--- a/drivers/irqchip/irq-gic-v5-its.c
+++ b/drivers/irqchip/irq-gic-v5-its.c
@@ -191,9 +191,9 @@ static int gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its,
unsigned int num_events)
{
unsigned int l1_bits, l2_bits, span, events_per_l2_table;
- unsigned int i, complete_tables, final_span, num_ents;
+ unsigned int complete_tables, final_span, num_ents;
__le64 *itt_l1, *itt_l2, **l2ptrs;
- int ret;
+ int i, ret;
u64 val;
ret = gicv5_its_l2sz_to_l2_bits(itt_l2sz);
@@ -949,15 +949,18 @@ static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int vi
device_id = its_dev->device_id;
for (i = 0; i < nr_irqs; i++) {
- lpi = gicv5_alloc_lpi();
+ ret = gicv5_alloc_lpi();
if (ret < 0) {
pr_debug("Failed to find free LPI!\n");
- goto out_eventid;
+ goto out_free_irqs;
}
+ lpi = ret;
ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi);
- if (ret)
- goto out_free_lpi;
+ if (ret) {
+ gicv5_free_lpi(lpi);
+ goto out_free_irqs;
+ }
/*
* Store eventid and deviceid into the hwirq for later use.
@@ -977,8 +980,13 @@ static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int vi
return 0;
-out_free_lpi:
- gicv5_free_lpi(lpi);
+out_free_irqs:
+ while (--i >= 0) {
+ irqd = irq_domain_get_irq_data(domain, virq + i);
+ gicv5_free_lpi(irqd->parent_data->hwirq);
+ irq_domain_reset_irq_data(irqd);
+ irq_domain_free_irqs_parent(domain, virq + i, 1);
+ }
out_eventid:
gicv5_its_free_eventid(its_dev, event_id_base, nr_irqs);
return ret;
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
index bcfddc51bc6a..2fd4d94f9bd7 100644
--- a/drivers/irqchip/irq-sg2042-msi.c
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -85,6 +85,8 @@ static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *m
static const struct irq_chip sg2042_msi_middle_irq_chip = {
.name = "SG2042 MSI",
+ .irq_startup = irq_chip_startup_parent,
+ .irq_shutdown = irq_chip_shutdown_parent,
.irq_ack = sg2042_msi_irq_ack,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
@@ -114,6 +116,8 @@ static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *m
static struct irq_chip sg2044_msi_middle_irq_chip = {
.name = "SG2044 MSI",
+ .irq_startup = irq_chip_startup_parent,
+ .irq_shutdown = irq_chip_shutdown_parent,
.irq_ack = sg2044_msi_irq_ack,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
@@ -185,8 +189,10 @@ static const struct irq_domain_ops sg204x_msi_middle_domain_ops = {
.select = msi_lib_irq_domain_select,
};
-#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
- MSI_FLAG_USE_DEF_CHIP_OPS)
+#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT)
#define SG2042_MSI_FLAGS_SUPPORTED MSI_GENERIC_FLAGS_MASK
@@ -200,10 +206,12 @@ static const struct msi_parent_ops sg2042_msi_parent_ops = {
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
- MSI_FLAG_USE_DEF_CHIP_OPS)
+#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT)
-#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
MSI_FLAG_PCI_MSIX)
static const struct msi_parent_ops sg2044_msi_parent_ops = {
diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
index 89cf5120f5d5..db7c2c743adc 100644
--- a/drivers/leds/flash/leds-qcom-flash.c
+++ b/drivers/leds/flash/leds-qcom-flash.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bitfield.h>
@@ -114,36 +114,39 @@ enum {
REG_THERM_THRSH1,
REG_THERM_THRSH2,
REG_THERM_THRSH3,
+ REG_TORCH_CLAMP,
REG_MAX_COUNT,
};
static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
- REG_FIELD(0x08, 0, 7), /* status1 */
- REG_FIELD(0x09, 0, 7), /* status2 */
- REG_FIELD(0x0a, 0, 7), /* status3 */
- REG_FIELD_ID(0x40, 0, 7, 3, 1), /* chan_timer */
- REG_FIELD_ID(0x43, 0, 6, 3, 1), /* itarget */
- REG_FIELD(0x46, 7, 7), /* module_en */
- REG_FIELD(0x47, 0, 5), /* iresolution */
- REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */
- REG_FIELD(0x4c, 0, 2), /* chan_en */
- REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */
- REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */
- REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */
+ [REG_STATUS1] = REG_FIELD(0x08, 0, 7),
+ [REG_STATUS2] = REG_FIELD(0x09, 0, 7),
+ [REG_STATUS3] = REG_FIELD(0x0a, 0, 7),
+ [REG_CHAN_TIMER] = REG_FIELD_ID(0x40, 0, 7, 3, 1),
+ [REG_ITARGET] = REG_FIELD_ID(0x43, 0, 6, 3, 1),
+ [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7),
+ [REG_IRESOLUTION] = REG_FIELD(0x47, 0, 5),
+ [REG_CHAN_STROBE] = REG_FIELD_ID(0x49, 0, 2, 3, 1),
+ [REG_CHAN_EN] = REG_FIELD(0x4c, 0, 2),
+ [REG_THERM_THRSH1] = REG_FIELD(0x56, 0, 2),
+ [REG_THERM_THRSH2] = REG_FIELD(0x57, 0, 2),
+ [REG_THERM_THRSH3] = REG_FIELD(0x58, 0, 2),
+ [REG_TORCH_CLAMP] = REG_FIELD(0xec, 0, 6),
};
static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
- REG_FIELD(0x06, 0, 7), /* status1 */
- REG_FIELD(0x07, 0, 6), /* status2 */
- REG_FIELD(0x09, 0, 7), /* status3 */
- REG_FIELD_ID(0x3e, 0, 7, 4, 1), /* chan_timer */
- REG_FIELD_ID(0x42, 0, 6, 4, 1), /* itarget */
- REG_FIELD(0x46, 7, 7), /* module_en */
- REG_FIELD(0x49, 0, 3), /* iresolution */
- REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */
- REG_FIELD(0x4e, 0, 3), /* chan_en */
- REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */
- REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */
+ [REG_STATUS1] = REG_FIELD(0x06, 0, 7),
+ [REG_STATUS2] = REG_FIELD(0x07, 0, 6),
+ [REG_STATUS3] = REG_FIELD(0x09, 0, 7),
+ [REG_CHAN_TIMER] = REG_FIELD_ID(0x3e, 0, 7, 4, 1),
+ [REG_ITARGET] = REG_FIELD_ID(0x42, 0, 6, 4, 1),
+ [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7),
+ [REG_IRESOLUTION] = REG_FIELD(0x49, 0, 3),
+ [REG_CHAN_STROBE] = REG_FIELD_ID(0x4a, 0, 6, 4, 1),
+ [REG_CHAN_EN] = REG_FIELD(0x4e, 0, 3),
+ [REG_THERM_THRSH1] = REG_FIELD(0x7a, 0, 2),
+ [REG_THERM_THRSH2] = REG_FIELD(0x78, 0, 2),
+ [REG_TORCH_CLAMP] = REG_FIELD(0xed, 0, 6),
};
struct qcom_flash_data {
@@ -156,6 +159,7 @@ struct qcom_flash_data {
u8 max_channels;
u8 chan_en_bits;
u8 revision;
+ u8 torch_clamp;
};
struct qcom_flash_led {
@@ -702,6 +706,7 @@ static int qcom_flash_register_led_device(struct device *dev,
u32 current_ua, timeout_us;
u32 channels[4];
int i, rc, count;
+ u8 torch_clamp;
count = fwnode_property_count_u32(node, "led-sources");
if (count <= 0) {
@@ -751,6 +756,12 @@ static int qcom_flash_register_led_device(struct device *dev,
current_ua = min_t(u32, current_ua, TORCH_CURRENT_MAX_UA * led->chan_count);
led->max_torch_current_ma = current_ua / UA_PER_MA;
+ torch_clamp = (current_ua / led->chan_count) / TORCH_IRES_UA;
+ if (torch_clamp != 0)
+ torch_clamp--;
+
+ flash_data->torch_clamp = max_t(u8, flash_data->torch_clamp, torch_clamp);
+
if (fwnode_property_present(node, "flash-max-microamp")) {
flash->led_cdev.flags |= LED_DEV_CAP_FLASH;
@@ -917,8 +928,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
flash_data->leds_count++;
}
- return 0;
-
+ return regmap_field_write(flash_data->r_fields[REG_TORCH_CLAMP], flash_data->torch_clamp);
release:
while (flash_data->v4l2_flash[flash_data->leds_count] && flash_data->leds_count)
v4l2_flash_release(flash_data->v4l2_flash[flash_data->leds_count--]);
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index e71456a56ab8..fd447eb7eb15 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -212,7 +212,7 @@ int lp55xx_update_program_memory(struct lp55xx_chip *chip,
* For LED chip that support page, PAGE is already set in load_engine.
*/
if (!cfg->pages_per_engine)
- start_addr += LP55xx_BYTES_PER_PAGE * idx;
+ start_addr += LP55xx_BYTES_PER_PAGE * (idx - 1);
for (page = 0; page < program_length / LP55xx_BYTES_PER_PAGE; page++) {
/* Write to the next page each 32 bytes (if supported) */
diff --git a/drivers/leds/leds-max77705.c b/drivers/leds/leds-max77705.c
index 933cb4f19be9..b7403b3fcf5e 100644
--- a/drivers/leds/leds-max77705.c
+++ b/drivers/leds/leds-max77705.c
@@ -180,7 +180,7 @@ static int max77705_add_led(struct device *dev, struct regmap *regmap, struct fw
ret = fwnode_property_read_u32(np, "reg", ®);
if (ret || reg >= MAX77705_LED_NUM_LEDS)
- ret = -EINVAL;
+ return -EINVAL;
info = devm_kcalloc(dev, num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index c889332e533b..0070e4462ee2 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -162,6 +162,7 @@ struct mapped_device {
#define DMF_SUSPENDED_INTERNALLY 7
#define DMF_POST_SUSPENDING 8
#define DMF_EMULATE_ZONE_APPEND 9
+#define DMF_QUEUE_STOPPED 10
static inline sector_t dm_get_size(struct mapped_device *md)
{
diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c
index 12f954a0c532..afb062e1f1fb 100644
--- a/drivers/md/dm-vdo/indexer/volume-index.c
+++ b/drivers/md/dm-vdo/indexer/volume-index.c
@@ -836,7 +836,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
"%zu bytes decoded of %zu expected", offset,
sizeof(buffer));
if (result != VDO_SUCCESS)
- result = UDS_CORRUPT_DATA;
+ return UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) {
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
@@ -928,7 +928,7 @@ static int start_restoring_volume_index(struct volume_index *volume_index,
"%zu bytes decoded of %zu expected", offset,
sizeof(buffer));
if (result != VDO_SUCCESS)
- result = UDS_CORRUPT_DATA;
+ return UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0)
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a44e8c2dccee..66dd5f6ce778 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2908,7 +2908,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
- int r;
+ int r = 0;
lockdep_assert_held(&md->suspend_lock);
@@ -2960,8 +2960,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
- if (dm_request_based(md))
+ if (map && dm_request_based(md)) {
dm_stop_queue(md->queue);
+ set_bit(DMF_QUEUE_STOPPED, &md->flags);
+ }
flush_workqueue(md->wq);
@@ -2970,7 +2972,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
- r = dm_wait_for_completion(md, task_state);
+ if (map)
+ r = dm_wait_for_completion(md, task_state);
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
@@ -2983,7 +2986,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (r < 0) {
dm_queue_flush(md);
- if (dm_request_based(md))
+ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
dm_start_queue(md->queue);
unlock_fs(md);
@@ -3067,7 +3070,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
* so that mapping of targets can work correctly.
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
- if (dm_request_based(md))
+ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
dm_start_queue(md->queue);
unlock_fs(md);
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index b7ca39f63dba..6dfc91216851 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -1329,10 +1329,13 @@ static int rj54n1_probe(struct i2c_client *client)
V4L2_CID_GAIN, 0, 127, 1, 66);
v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
- rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
- if (rj54n1->hdl.error)
- return rj54n1->hdl.error;
+ if (rj54n1->hdl.error) {
+ ret = rj54n1->hdl.error;
+ goto err_free_ctrl;
+ }
+
+ rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
rj54n1->clk_div = clk_div;
rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
rj54n1->rect.top = RJ54N1_ROW_SKIP;
diff --git a/drivers/media/i2c/vd55g1.c b/drivers/media/i2c/vd55g1.c
index 7c39183dd44b..4a62d3500682 100644
--- a/drivers/media/i2c/vd55g1.c
+++ b/drivers/media/i2c/vd55g1.c
@@ -66,7 +66,7 @@
#define VD55G1_REG_READOUT_CTRL CCI_REG8(0x052e)
#define VD55G1_READOUT_CTRL_BIN_MODE_NORMAL 0
#define VD55G1_READOUT_CTRL_BIN_MODE_DIGITAL_X2 1
-#define VD55G1_REG_DUSTER_CTRL CCI_REG8(0x03ea)
+#define VD55G1_REG_DUSTER_CTRL CCI_REG8(0x03ae)
#define VD55G1_DUSTER_ENABLE BIT(0)
#define VD55G1_DUSTER_DISABLE 0
#define VD55G1_DUSTER_DYN_ENABLE BIT(1)
diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
index 1cd990468d3d..d05e222b3921 100644
--- a/drivers/media/pci/zoran/zoran.h
+++ b/drivers/media/pci/zoran/zoran.h
@@ -154,12 +154,6 @@ struct zoran_jpg_settings {
struct zoran;
-/* zoran_fh contains per-open() settings */
-struct zoran_fh {
- struct v4l2_fh fh;
- struct zoran *zr;
-};
-
struct card_info {
enum card_type type;
char name[32];
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index f42f596d3e62..ec7fc1da4cc0 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -511,12 +511,11 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
- struct zoran_fh *fh = __fh;
int i;
int res = 0;
if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
- return zoran_s_fmt_vid_out(file, fh, fmt);
+ return zoran_s_fmt_vid_out(file, __fh, fmt);
for (i = 0; i < NUM_FORMATS; i++)
if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc)
diff --git a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
index 0533d4a083d2..a078f1107300 100644
--- a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
@@ -239,7 +239,7 @@ static int delta_mjpeg_ipc_open(struct delta_ctx *pctx)
return 0;
}
-static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
+static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, dma_addr_t pstart, dma_addr_t pend)
{
struct delta_dev *delta = pctx->dev;
struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
@@ -256,8 +256,8 @@ static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
memset(params, 0, sizeof(*params));
- params->picture_start_addr_p = (u32)(au->paddr);
- params->picture_end_addr_p = (u32)(au->paddr + au->size - 1);
+ params->picture_start_addr_p = pstart;
+ params->picture_end_addr_p = pend;
/*
* !WARNING!
@@ -374,12 +374,14 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
struct delta_dev *delta = pctx->dev;
struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
int ret;
- struct delta_au au = *pau;
+ void *au_vaddr = pau->vaddr;
+ dma_addr_t au_dma = pau->paddr;
+ size_t au_size = pau->size;
unsigned int data_offset = 0;
struct mjpeg_header *header = &ctx->header_struct;
if (!ctx->header) {
- ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size,
header, &data_offset);
if (ret) {
pctx->stream_errors++;
@@ -405,17 +407,17 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
goto err;
}
- ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size,
ctx->header, &data_offset);
if (ret) {
pctx->stream_errors++;
goto err;
}
- au.paddr += data_offset;
- au.vaddr += data_offset;
+ au_dma += data_offset;
+ au_vaddr += data_offset;
- ret = delta_mjpeg_ipc_decode(pctx, &au);
+ ret = delta_mjpeg_ipc_decode(pctx, au_dma, au_dma + au_size - 1);
if (ret)
goto err;
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 4c1a68c9f575..6daf33e07ea0 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -82,6 +82,8 @@ static const struct regmap_config chtdc_ti_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
+ /* The hardware does not support reading multiple registers at once */
+ .use_single_read = true,
};
static const struct regmap_irq chtdc_ti_irqs[] = {
diff --git a/drivers/mfd/max77705.c b/drivers/mfd/max77705.c
index 6b263bacb8c2..e1a9bfd65856 100644
--- a/drivers/mfd/max77705.c
+++ b/drivers/mfd/max77705.c
@@ -61,21 +61,21 @@ static const struct regmap_config max77705_regmap_config = {
.max_register = MAX77705_PMIC_REG_USBC_RESET,
};
-static const struct regmap_irq max77705_topsys_irqs[] = {
- { .mask = MAX77705_SYSTEM_IRQ_BSTEN_INT, },
- { .mask = MAX77705_SYSTEM_IRQ_SYSUVLO_INT, },
- { .mask = MAX77705_SYSTEM_IRQ_SYSOVLO_INT, },
- { .mask = MAX77705_SYSTEM_IRQ_TSHDN_INT, },
- { .mask = MAX77705_SYSTEM_IRQ_TM_INT, },
+static const struct regmap_irq max77705_irqs[] = {
+ { .mask = MAX77705_SRC_IRQ_CHG, },
+ { .mask = MAX77705_SRC_IRQ_TOP, },
+ { .mask = MAX77705_SRC_IRQ_FG, },
+ { .mask = MAX77705_SRC_IRQ_USBC, },
};
-static const struct regmap_irq_chip max77705_topsys_irq_chip = {
- .name = "max77705-topsys",
- .status_base = MAX77705_PMIC_REG_SYSTEM_INT,
- .mask_base = MAX77705_PMIC_REG_SYSTEM_INT_MASK,
+static const struct regmap_irq_chip max77705_irq_chip = {
+ .name = "max77705",
+ .status_base = MAX77705_PMIC_REG_INTSRC,
+ .ack_base = MAX77705_PMIC_REG_INTSRC,
+ .mask_base = MAX77705_PMIC_REG_INTSRC_MASK,
.num_regs = 1,
- .irqs = max77705_topsys_irqs,
- .num_irqs = ARRAY_SIZE(max77705_topsys_irqs),
+ .irqs = max77705_irqs,
+ .num_irqs = ARRAY_SIZE(max77705_irqs),
};
static int max77705_i2c_probe(struct i2c_client *i2c)
@@ -108,21 +108,17 @@ static int max77705_i2c_probe(struct i2c_client *i2c)
if (pmic_rev != MAX77705_PASS3)
return dev_err_probe(dev, -ENODEV, "Rev.0x%x is not tested\n", pmic_rev);
+ /* Active Discharge Enable */
+ regmap_update_bits(max77705->regmap, MAX77705_PMIC_REG_MAINCTRL1, 1, 1);
+
ret = devm_regmap_add_irq_chip(dev, max77705->regmap,
i2c->irq,
- IRQF_ONESHOT | IRQF_SHARED, 0,
- &max77705_topsys_irq_chip,
+ IRQF_ONESHOT, 0,
+ &max77705_irq_chip,
&irq_data);
if (ret)
return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
- /* Unmask interrupts from all blocks in interrupt source register */
- ret = regmap_update_bits(max77705->regmap,
- MAX77705_PMIC_REG_INTSRC_MASK,
- MAX77705_SRC_IRQ_ALL, (unsigned int)~MAX77705_SRC_IRQ_ALL);
- if (ret < 0)
- return dev_err_probe(dev, ret, "Could not unmask interrupts in INTSRC\n");
-
domain = regmap_irq_get_domain(irq_data);
ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
diff --git a/drivers/mfd/rz-mtu3.c b/drivers/mfd/rz-mtu3.c
index f3dac4a29a83..9cdfef610398 100644
--- a/drivers/mfd/rz-mtu3.c
+++ b/drivers/mfd/rz-mtu3.c
@@ -32,7 +32,7 @@ static const unsigned long rz_mtu3_8bit_ch_reg_offs[][13] = {
[RZ_MTU3_CHAN_2] = MTU_8BIT_CH_1_2(0x204, 0x092, 0x205, 0x200, 0x20c, 0x201, 0x202),
[RZ_MTU3_CHAN_3] = MTU_8BIT_CH_3_4_6_7(0x008, 0x093, 0x02c, 0x000, 0x04c, 0x002, 0x004, 0x005, 0x038),
[RZ_MTU3_CHAN_4] = MTU_8BIT_CH_3_4_6_7(0x009, 0x094, 0x02d, 0x001, 0x04d, 0x003, 0x006, 0x007, 0x039),
- [RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x1eb, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6),
+ [RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x895, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6),
[RZ_MTU3_CHAN_6] = MTU_8BIT_CH_3_4_6_7(0x808, 0x893, 0x82c, 0x800, 0x84c, 0x802, 0x804, 0x805, 0x838),
[RZ_MTU3_CHAN_7] = MTU_8BIT_CH_3_4_6_7(0x809, 0x894, 0x82d, 0x801, 0x84d, 0x803, 0x806, 0x807, 0x839),
[RZ_MTU3_CHAN_8] = MTU_8BIT_CH_8(0x404, 0x098, 0x400, 0x406, 0x401, 0x402, 0x403)
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index fc2daffc4352..77245c1e5d7d 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -99,6 +99,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
struct resource *mem;
void __iomem *base;
struct gpio_chip *mmc_gpio_chip;
+ int ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
@@ -119,7 +120,10 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI,
NULL, NULL, NULL, NULL, 0);
mmc_gpio_chip->ngpio = 2;
- devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
+
+ ret = devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
+ if (ret)
+ return ret;
return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
vexpress_sysreg_cells,
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 53e88a1bc430..7eec907ed454 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -323,11 +323,11 @@ static void fastrpc_free_map(struct kref *ref)
perm.vmid = QCOM_SCM_VMID_HLOS;
perm.perm = QCOM_SCM_PERM_RWX;
- err = qcom_scm_assign_mem(map->phys, map->size,
+ err = qcom_scm_assign_mem(map->phys, map->len,
&src_perms, &perm, 1);
if (err) {
dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
- map->phys, map->size, err);
+ map->phys, map->len, err);
return;
}
}
@@ -363,26 +363,21 @@ static int fastrpc_map_get(struct fastrpc_map *map)
static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
- struct fastrpc_map **ppmap, bool take_ref)
+ struct fastrpc_map **ppmap)
{
- struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
+ struct dma_buf *buf;
int ret = -ENOENT;
+ buf = dma_buf_get(fd);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
spin_lock(&fl->lock);
list_for_each_entry(map, &fl->maps, node) {
- if (map->fd != fd)
+ if (map->fd != fd || map->buf != buf)
continue;
- if (take_ref) {
- ret = fastrpc_map_get(map);
- if (ret) {
- dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
- __func__, fd, ret);
- break;
- }
- }
-
*ppmap = map;
ret = 0;
break;
@@ -752,16 +747,14 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = {
.release = fastrpc_release,
};
-static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+static int fastrpc_map_attach(struct fastrpc_user *fl, int fd,
u64 len, u32 attr, struct fastrpc_map **ppmap)
{
struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
struct sg_table *table;
- int err = 0;
-
- if (!fastrpc_map_lookup(fl, fd, ppmap, true))
- return 0;
+ struct scatterlist *sgl = NULL;
+ int err = 0, sgl_index = 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
@@ -798,7 +791,15 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
map->phys = sg_dma_address(map->table->sgl);
map->phys += ((u64)fl->sctx->sid << 32);
}
- map->size = len;
+ for_each_sg(map->table->sgl, sgl, map->table->nents,
+ sgl_index)
+ map->size += sg_dma_len(sgl);
+ if (len > map->size) {
+ dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n",
+ len, map->size);
+ err = -EINVAL;
+ goto map_err;
+ }
map->va = sg_virt(map->table->sgl);
map->len = len;
@@ -815,10 +816,10 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
dst_perms[1].perm = QCOM_SCM_PERM_RWX;
map->attr = attr;
- err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2);
+ err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2);
if (err) {
dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
- map->phys, map->size, err);
+ map->phys, map->len, err);
goto map_err;
}
}
@@ -839,6 +840,24 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
return err;
}
+static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ u64 len, u32 attr, struct fastrpc_map **ppmap)
+{
+ struct fastrpc_session_ctx *sess = fl->sctx;
+ int err = 0;
+
+ if (!fastrpc_map_lookup(fl, fd, ppmap)) {
+ if (!fastrpc_map_get(*ppmap))
+ return 0;
+ dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n",
+ __func__, fd);
+ }
+
+ err = fastrpc_map_attach(fl, fd, len, attr, ppmap);
+
+ return err;
+}
+
/*
* Fastrpc payload buffer with metadata looks like:
*
@@ -911,8 +930,12 @@ static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
ctx->args[i].length == 0)
continue;
- err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
- ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
+ if (i < ctx->nbufs)
+ err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
+ ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
+ else
+ err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd,
+ ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
if (err) {
dev_err(dev, "Error Creating map %d\n", err);
return -EINVAL;
@@ -1071,6 +1094,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
struct fastrpc_phy_page *pages;
u64 *fdlist;
int i, inbufs, outbufs, handles;
+ int ret = 0;
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
@@ -1086,23 +1110,26 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
u64 len = rpra[i].buf.len;
if (!kernel) {
- if (copy_to_user((void __user *)dst, src, len))
- return -EFAULT;
+ if (copy_to_user((void __user *)dst, src, len)) {
+ ret = -EFAULT;
+ goto cleanup_fdlist;
+ }
} else {
memcpy(dst, src, len);
}
}
}
+cleanup_fdlist:
/* Clean up fdlist which is updated by DSP */
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
if (!fdlist[i])
break;
- if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
+ if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
fastrpc_map_put(mmap);
}
- return 0;
+ return ret;
}
static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
@@ -2046,7 +2073,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
args[0].length = sizeof(req_msg);
pages.addr = map->phys;
- pages.size = map->size;
+ pages.size = map->len;
args[1].ptr = (u64) (uintptr_t) &pages;
args[1].length = sizeof(pages);
@@ -2061,7 +2088,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
if (err) {
dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
- req.fd, req.vaddrin, map->size);
+ req.fd, req.vaddrin, map->len);
goto err_invoke;
}
@@ -2074,7 +2101,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
/* unmap the memory and release the buffer */
req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
- req_unmap.length = map->size;
+ req_unmap.length = map->len;
fastrpc_req_mem_unmap_impl(fl, &req_unmap);
return -EFAULT;
}
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 500b1feaf1f6..fd7d5cd50d39 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -923,7 +923,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
}
if (cmd->asv_length > DDCB_ASV_LENGTH) {
dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
- __func__, cmd->asiv_length);
+ __func__, cmd->asv_length);
return -EINVAL;
}
rc = __genwqe_enqueue_ddcb(cd, req, f_flags);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 1c156a3f845e..f935175d8bf5 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -937,7 +937,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case PCITEST_BAR:
bar = arg;
- if (bar > BAR_5)
+ if (bar <= NO_BAR || bar > BAR_5)
goto ret;
if (is_am654_pci_dev(pdev) && bar == BAR_0)
goto ret;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 9cc47bf94804..dd6cffc0df72 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2936,15 +2936,15 @@ static int mmc_route_rpmb_frames(struct device *dev, u8 *req,
return -ENOMEM;
if (write) {
- struct rpmb_frame *frm = (struct rpmb_frame *)resp;
+ struct rpmb_frame *resp_frm = (struct rpmb_frame *)resp;
/* Send write request frame(s) */
set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK,
1 | MMC_CMD23_ARG_REL_WR, req, req_len);
/* Send result request frame */
- memset(frm, 0, sizeof(*frm));
- frm->req_resp = cpu_to_be16(RPMB_RESULT_READ);
+ memset(resp_frm, 0, sizeof(*resp_frm));
+ resp_frm->req_resp = cpu_to_be16(RPMB_RESULT_READ);
set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp,
resp_len);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 7232de1c0688..5cc415ba4f55 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -1115,6 +1115,7 @@ config MMC_LOONGSON2
tristate "Loongson-2K SD/SDIO/eMMC Host Interface support"
depends on LOONGARCH || COMPILE_TEST
depends on HAS_DMA
+ select REGMAP_MMIO
help
This selects support for the SD/SDIO/eMMC Host Controller on
Loongson-2K series CPUs.
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index db94d14a3807..49e00458eebe 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1858,7 +1858,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
{
- struct device_node *np, *nand_np;
+ struct device_node *np;
struct device *dev = nc->dev;
int ret, reg_cells;
u32 val;
@@ -1885,7 +1885,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
reg_cells += val;
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
struct atmel_nand *nand;
nand = atmel_nand_create(nc, nand_np, reg_cells);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 57be04f6cb11..f4f0feddd9fa 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4411,7 +4411,7 @@ void bond_work_init_all(struct bonding *bond)
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
-static void bond_work_cancel_all(struct bonding *bond)
+void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 57fff2421f1b..7a9d73ec8e91 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -579,20 +579,22 @@ static int bond_newlink(struct net_device *bond_dev,
struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct bonding *bond = netdev_priv(bond_dev);
struct nlattr **data = params->data;
struct nlattr **tb = params->tb;
int err;
- err = bond_changelink(bond_dev, tb, data, extack);
- if (err < 0)
+ err = register_netdevice(bond_dev);
+ if (err)
return err;
- err = register_netdevice(bond_dev);
- if (!err) {
- struct bonding *bond = netdev_priv(bond_dev);
+ netif_carrier_off(bond_dev);
+ bond_work_init_all(bond);
- netif_carrier_off(bond_dev);
- bond_work_init_all(bond);
+ err = bond_changelink(bond_dev, tb, data, extack);
+ if (err) {
+ bond_work_cancel_all(bond);
+ unregister_netdevice(bond_dev);
}
return err;
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index a81d3a7a3bb9..fe3479b84a1f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -865,7 +865,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
static u32 ena_get_rxfh_key_size(struct net_device *netdev)
{
- return ENA_HASH_KEY_SIZE;
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_rss *rss = &adapter->ena_dev->rss;
+
+ return rss->hash_key ? ENA_HASH_KEY_SIZE : 0;
}
static int ena_indirection_table_set(struct ena_adapter *adapter,
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index c9a5c8beb2fa..a7e845fee4b3 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -213,10 +213,8 @@
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
-#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
-#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
@@ -1214,10 +1212,8 @@ struct macb_queue {
unsigned int IDR;
unsigned int IMR;
unsigned int TBQP;
- unsigned int TBQPH;
unsigned int RBQS;
unsigned int RBQP;
- unsigned int RBQPH;
/* Lock to protect tx_head and tx_tail */
spinlock_t tx_ptr_lock;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index c769b7dbd3ba..fc082a7a5a31 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -51,14 +51,10 @@ struct sifive_fu540_macb_mgmt {
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
-#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
-#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
@@ -495,19 +491,19 @@ static void macb_init_buffers(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH,
- upper_32_bits(queue->rx_ring_dma));
+ /* Single register for all queues' high 32 bits. */
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->queues[0].rx_ring_dma));
+ macb_writel(bp, TBQPH,
+ upper_32_bits(bp->queues[0].tx_ring_dma));
+ }
#endif
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH,
- upper_32_bits(queue->tx_ring_dma));
-#endif
}
}
@@ -1166,10 +1162,6 @@ static void macb_tx_error_task(struct work_struct *work)
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
-#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
@@ -2474,35 +2466,42 @@ static void macb_free_rx_buffers(struct macb *bp)
}
}
+static unsigned int macb_tx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch;
+}
+
+static unsigned int macb_rx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch;
+}
+
static void macb_free_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
struct macb_queue *queue;
unsigned int q;
- int size;
+ size_t size;
if (bp->rx_ring_tieoff) {
- dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ dma_free_coherent(dev, macb_dma_desc_get_size(bp),
bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
bp->rx_ring_tieoff = NULL;
}
bp->macbgem_ops.mog_free_rx_buffers(bp);
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
- if (queue->tx_ring) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->tx_ring, queue->tx_ring_dma);
- queue->tx_ring = NULL;
- }
- if (queue->rx_ring) {
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->rx_ring, queue->rx_ring_dma);
- queue->rx_ring = NULL;
- }
+ queue->tx_ring = NULL;
+ queue->rx_ring = NULL;
}
}
@@ -2544,35 +2543,45 @@ static int macb_alloc_rx_buffers(struct macb *bp)
static int macb_alloc_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
+ dma_addr_t tx_dma, rx_dma;
struct macb_queue *queue;
unsigned int q;
- int size;
+ void *tx, *rx;
+ size_t size;
+
+ /*
+ * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
+ * We cannot enforce this guarantee, the best we can do is do a single
+ * allocation and hope it will land into alloc_pages() that guarantees
+ * natural alignment of physical addresses.
+ */
+
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
+ if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)tx_dma, tx);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
+ if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)rx_dma, rx);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->tx_ring_dma,
- GFP_KERNEL);
- if (!queue->tx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
- q, size, (unsigned long)queue->tx_ring_dma,
- queue->tx_ring);
+ queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
+ queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
+
+ queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
+ queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
-
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->rx_ring_dma, GFP_KERNEL);
- if (!queue->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -4309,12 +4318,6 @@ static int macb_init(struct platform_device *pdev)
queue->TBQP = GEM_TBQP(hw_q - 1);
queue->RBQP = GEM_RBQP(hw_q - 1);
queue->RBQS = GEM_RBQS(hw_q - 1);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = GEM_TBQPH(hw_q - 1);
- queue->RBQPH = GEM_RBQPH(hw_q - 1);
- }
-#endif
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
@@ -4323,12 +4326,6 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
queue->RBQP = MACB_RBQP;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = MACB_TBQPH;
- queue->RBQPH = MACB_RBQPH;
- }
-#endif
}
/* get irq: here we use the linux queue index, not the hardware
@@ -5452,6 +5449,11 @@ static int __maybe_unused macb_suspend(struct device *dev)
*/
tmp = macb_readl(bp, NCR);
macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->rx_ring_tieoff_dma));
+#endif
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
/* Disable RX queues */
@@ -5461,10 +5463,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
/* Tie off RX queues */
queue_writel(queue, RBQP,
lower_32_bits(bp->rx_ring_tieoff_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue_writel(queue, RBQPH,
- upper_32_bits(bp->rx_ring_tieoff_dma));
-#endif
}
/* Disable all interrupts */
queue_writel(queue, IDR, -1);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 6bbf6e5584e5..1996d2e4e3e2 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -964,15 +964,18 @@ receive_packet (struct net_device *dev)
} else {
struct sk_buff *skb;
+ skb = NULL;
/* Small skbuffs for short packets */
- if (pkt_len > copy_thresh) {
+ if (pkt_len <= copy_thresh)
+ skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ if (!skb) {
dma_unmap_single(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
- } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
+ } else {
dma_sync_single_for_cpu(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index b3dc1afeefd1..a5c1f1cef3b0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -1030,7 +1030,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
err = enetc_get_driver_data(si);
if (err)
return dev_err_probe(dev, err,
- "Could not get VF driver data\n");
+ "Could not get PF driver data\n");
err = enetc4_pf_struct_init(si);
if (err)
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
index ba32c1bbd9e1..0c1d343253bf 100644
--- a/drivers/net/ethernet/freescale/enetc/ntmp.c
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -52,24 +52,19 @@ int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
NTMP_BASE_ADDR_ALIGN);
- cbdr->next_to_clean = 0;
- cbdr->next_to_use = 0;
spin_lock_init(&cbdr->ring_lock);
+ cbdr->next_to_use = netc_read(cbdr->regs.pir);
+ cbdr->next_to_clean = netc_read(cbdr->regs.cir);
+
/* Step 1: Configure the base address of the Control BD Ring */
netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
- /* Step 2: Configure the producer index register */
- netc_write(cbdr->regs.pir, cbdr->next_to_clean);
-
- /* Step 3: Configure the consumer index register */
- netc_write(cbdr->regs.cir, cbdr->next_to_use);
-
- /* Step4: Configure the number of BDs of the Control BD Ring */
+ /* Step 2: Configure the number of BDs of the Control BD Ring */
netc_write(cbdr->regs.lenr, cbdr->bd_num);
- /* Step 5: Enable the Control BD Ring */
+ /* Step 3: Enable the Control BD Ring */
netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
return 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index eaad52a83b04..50f90ed3107e 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3187,18 +3187,14 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
/* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc
- */
- dma_rmb();
-
/* if the descriptor isn't done, no work yet to do */
gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
-
if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
break;
+ dma_rmb();
+
rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
rx_desc->rxdid_ucast);
if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 6330d4a0ae07..c1f34381333d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -702,9 +702,9 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
/* If post failed clear the only buffer we supplied */
if (post_err) {
if (dma_mem)
- dmam_free_coherent(&adapter->pdev->dev,
- dma_mem->size, dma_mem->va,
- dma_mem->pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
break;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 5027fae0aa77..e808995703cf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -3542,6 +3542,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
+ bitmap_free(pf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 7ebb6e656884..25381f079b97 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -854,6 +854,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
+ bitmap_free(vf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e395ef5f356e..722282cebce9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -294,6 +294,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
return;
}
cond_resched();
+ if (mlx5_cmd_is_down(dev)) {
+ ent->ret = -ENXIO;
+ return;
+ }
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
@@ -1070,7 +1074,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
- mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
index 66d276a1be83..f4a19ffbb641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
@@ -66,23 +66,11 @@ struct mlx5e_port_buffer {
struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER];
};
-#ifdef CONFIG_MLX5_CORE_EN_DCB
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu,
struct ieee_pfc *pfc,
u32 *buffer_size,
u8 *prio2buffer);
-#else
-static inline int
-mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
- u32 change, unsigned int mtu,
- void *pfc,
- u32 *buffer_size,
- u8 *prio2buffer)
-{
- return 0;
-}
-#endif
int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 15eded36b872..21bb88c5d3dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -49,7 +49,6 @@
#include "en.h"
#include "en/dim.h"
#include "en/txrx.h"
-#include "en/port_buffer.h"
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
@@ -3041,11 +3040,9 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
struct mlx5e_params *params = &priv->channels.params;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- u16 mtu, prev_mtu;
+ u16 mtu;
int err;
- mlx5e_query_mtu(mdev, params, &prev_mtu);
-
err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
if (err)
return err;
@@ -3055,18 +3052,6 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
__func__, mtu, params->sw_mtu);
- if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) {
- err = mlx5e_port_manual_buffer_config(priv, 0, mtu,
- NULL, NULL, NULL);
- if (err) {
- netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n",
- __func__, mtu, err, prev_mtu);
-
- mlx5e_set_mtu(mdev, params, prev_mtu);
- return err;
- }
- }
-
params->sw_mtu = mtu;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 22995131824a..89e399606877 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -27,6 +27,7 @@ struct mlx5_fw_reset {
struct work_struct reset_reload_work;
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
+ struct delayed_work reset_timeout_work;
unsigned long reset_flags;
u8 reset_method;
struct timer_list timer;
@@ -259,6 +260,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool
return -EALREADY;
}
+ if (current_work() != &fw_reset->reset_timeout_work.work)
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
mlx5_stop_sync_reset_poll(dev);
if (poll_health)
mlx5_start_health_poll(dev);
@@ -330,6 +333,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
}
mlx5_stop_health_poll(dev, true);
mlx5_start_sync_reset_poll(dev);
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
+ &fw_reset->reset_flags))
+ schedule_delayed_work(&fw_reset->reset_timeout_work,
+ msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE)));
return 0;
}
@@ -739,6 +747,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
}
}
+static void mlx5_sync_reset_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_fw_reset *fw_reset =
+ container_of(dwork, struct mlx5_fw_reset, reset_timeout_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+
+ if (mlx5_sync_reset_clear_reset_requested(dev, true))
+ return;
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n");
+}
+
static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
@@ -822,6 +843,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
}
static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
@@ -865,6 +887,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
+ INIT_DELAYED_WORK(&fw_reset->reset_timeout_work,
+ mlx5_sync_reset_timeout_work);
init_completion(&fw_reset->done);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9bc9bd83c232..cd68c4b2c0bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 func_id;
u32 npages;
u32 i = 0;
+ int err;
- if (!mlx5_cmd_is_down(dev))
- return mlx5_cmd_do(dev, in, in_size, out, out_size);
+ err = mlx5_cmd_do(dev, in, in_size, out, out_size);
+ /* If FW is gone (-ENXIO), proceed to forceful reclaim */
+ if (err != -ENXIO)
+ return err;
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index a36215195923..16c828dd5c1a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1788,7 +1788,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
- return -EOPNOTSUPP;
+ return 0;
return nfp_net_rss_key_sz(nn);
}
diff --git a/drivers/net/phy/as21xxx.c b/drivers/net/phy/as21xxx.c
index 92697f43087d..005277360656 100644
--- a/drivers/net/phy/as21xxx.c
+++ b/drivers/net/phy/as21xxx.c
@@ -884,11 +884,12 @@ static int as21xxx_match_phy_device(struct phy_device *phydev,
u32 phy_id;
int ret;
- /* Skip PHY that are not AS21xxx or already have firmware loaded */
- if (phydev->c45_ids.device_ids[MDIO_MMD_PCS] != PHY_ID_AS21XXX)
+ /* Skip PHY that are not AS21xxx */
+ if (!phy_id_compare_vendor(phydev->c45_ids.device_ids[MDIO_MMD_PCS],
+ PHY_VENDOR_AEONSEMI))
return genphy_match_phy_device(phydev, phydrv);
- /* Read PHY ID to handle firmware just loaded */
+ /* Read PHY ID to handle firmware loaded or HW reset */
ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID1);
if (ret < 0)
return ret;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 792ddda1ad49..85bd5d845409 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -625,6 +625,21 @@ static void ax88772_suspend(struct usbnet *dev)
asix_read_medium_status(dev, 1));
}
+/* Notes on PM callbacks and locking context:
+ *
+ * - asix_suspend()/asix_resume() are invoked for both runtime PM and
+ * system-wide suspend/resume. For struct usb_driver the ->resume()
+ * callback does not receive pm_message_t, so the resume type cannot
+ * be distinguished here.
+ *
+ * - The MAC driver must hold RTNL when calling phylink interfaces such as
+ * phylink_suspend()/resume(). Those calls will also perform MDIO I/O.
+ *
+ * - Taking RTNL and doing MDIO from a runtime-PM resume callback (while
+ * the USB PM lock is held) is fragile. Since autosuspend brings no
+ * measurable power saving here, we block it by holding a PM usage
+ * reference in ax88772_bind().
+ */
static int asix_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -919,6 +934,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
goto initphy_err;
+ /* Keep this interface runtime-PM active by taking a usage ref.
+ * Prevents runtime suspend while bound and avoids resume paths
+ * that could deadlock (autoresume under RTNL while USB PM lock
+ * is held, phylink/MDIO wants RTNL).
+ */
+ pm_runtime_get_noresume(&intf->dev);
+
return 0;
initphy_err:
@@ -948,6 +970,8 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
phylink_destroy(priv->phylink);
ax88772_mdio_unregister(priv);
asix_rx_fixup_common_free(dev->driver_priv);
+ /* Drop the PM usage ref taken in bind() */
+ pm_runtime_put(&intf->dev);
}
static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -1600,6 +1624,11 @@ static struct usb_driver asix_driver = {
.resume = asix_resume,
.reset_resume = asix_resume,
.disconnect = usbnet_disconnect,
+ /* usbnet enables autosuspend by default (supports_autosuspend=1).
+ * We keep runtime-PM active for AX88772* by taking a PM usage
+ * reference in ax88772_bind() (pm_runtime_get_noresume()) and
+ * dropping it in unbind(), which effectively blocks autosuspend.
+ */
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index ddff6f19ff98..92add3daadbb 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -664,7 +664,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rtl8150_t *dev = netdev_priv(netdev);
u16 rx_creg = 0x9e;
- netif_stop_queue(netdev);
if (netdev->flags & IFF_PROMISC) {
rx_creg |= 0x0001;
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
@@ -678,7 +677,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rx_creg &= 0x00fc;
}
async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
- netif_wake_queue(netdev);
}
static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cb8ae751eb31..e595b0979a56 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1764,33 +1764,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
+ unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ;
unsigned long time_left, i;
- time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
- WMI_SERVICE_READY_TIMEOUT_HZ);
- if (!time_left) {
- /* Sometimes the PCI HIF doesn't receive interrupt
- * for the service ready message even if the buffer
- * was completed. PCIe sniffer shows that it's
- * because the corresponding CE ring doesn't fires
- * it. Workaround here by polling CE rings once.
- */
- ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
-
+ /* Sometimes the PCI HIF doesn't receive interrupt
+ * for the service ready message even if the buffer
+ * was completed. PCIe sniffer shows that it's
+ * because the corresponding CE ring doesn't fires
+ * it. Workaround here by polling CE rings. Since
+ * the message could arrive at any time, continue
+ * polling until timeout.
+ */
+ do {
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(ar, i, 1);
+ /* The 100 ms granularity is a tradeoff considering scheduler
+ * overhead and response latency
+ */
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
- WMI_SERVICE_READY_TIMEOUT_HZ);
- if (!time_left) {
- ath10k_warn(ar, "polling timed out\n");
- return -ETIMEDOUT;
- }
-
- ath10k_warn(ar, "service ready completion received, continuing normally\n");
- }
+ msecs_to_jiffies(100));
+ if (time_left)
+ return 0;
+ } while (time_before(jiffies, timeout));
- return 0;
+ ath10k_warn(ar, "failed to receive service ready completion\n");
+ return -ETIMEDOUT;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
index f93a419abf65..c5aadbc6367c 100644
--- a/drivers/net/wireless/ath/ath12k/ce.c
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -478,7 +478,7 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
}
while ((skb = __skb_dequeue(&list))) {
- ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
+ ath12k_dbg(ab, ATH12K_DBG_CE, "rx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->recv_cb(ab, skb);
}
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index 48916e4e1f60..bf254e43a68d 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -26,6 +26,7 @@ enum ath12k_debug_mask {
ATH12K_DBG_DP_TX = 0x00002000,
ATH12K_DBG_DP_RX = 0x00004000,
ATH12K_DBG_WOW = 0x00008000,
+ ATH12K_DBG_CE = 0x00010000,
ATH12K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 8189e52ed007..009c49502148 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1440,6 +1440,34 @@ static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
*errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
}
+static void
+ath12k_parse_cmn_usr_info(const struct hal_phyrx_common_user_info *cmn_usr_info,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data, cp_setting, ltf_size;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_GI |
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF;
+ eht->known = cpu_to_le32(known);
+
+ cp_setting = le32_get_bits(cmn_usr_info->info0,
+ HAL_RX_CMN_USR_INFO0_CP_SETTING);
+ ltf_size = le32_get_bits(cmn_usr_info->info0,
+ HAL_RX_CMN_USR_INFO0_LTF_SIZE);
+
+ data = __le32_to_cpu(eht->data[0]);
+ data |= u32_encode_bits(cp_setting, IEEE80211_RADIOTAP_EHT_DATA0_GI);
+ data |= u32_encode_bits(ltf_size, IEEE80211_RADIOTAP_EHT_DATA0_LTF);
+ eht->data[0] = cpu_to_le32(data);
+
+ if (!ppdu_info->ltf_size)
+ ppdu_info->ltf_size = ltf_size;
+ if (!ppdu_info->gi)
+ ppdu_info->gi = cp_setting;
+}
+
static void
ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
const struct hal_rx_msdu_end *msdu_end)
@@ -1627,25 +1655,22 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data;
info[0] = __le32_to_cpu(rssi->info0);
- info[1] = __le32_to_cpu(rssi->info1);
+ info[2] = __le32_to_cpu(rssi->info2);
/* TODO: Please note that the combined rssi will not be accurate
* in MU case. Rssi in MU needs to be retrieved from
* PHYRX_OTHER_RECEIVE_INFO TLV.
*/
ppdu_info->rssi_comb =
- u32_get_bits(info[1],
- HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB);
+ u32_get_bits(info[2],
+ HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU);
ppdu_info->bw = u32_get_bits(info[0],
- HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW);
+ HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW);
break;
}
- case HAL_PHYRX_OTHER_RECEIVE_INFO: {
- const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data;
-
- ppdu_info->gi = le32_get_bits(cmn_usr_info->info0,
- HAL_RX_PHY_CMN_USER_INFO0_GI);
+ case HAL_PHYRX_COMMON_USER_INFO: {
+ ath12k_parse_cmn_usr_info(tlv_data, ppdu_info);
break;
}
case HAL_RX_PPDU_START_USER_INFO:
@@ -2154,8 +2179,12 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
spin_unlock_bh(&ar->data_lock);
rxs->flag |= RX_FLAG_MACTIME_START;
- rxs->signal = ppduinfo->rssi_comb + noise_floor;
rxs->nss = ppduinfo->nss + 1;
+ if (test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ar->ab->wmi_ab.svc_map))
+ rxs->signal = ppduinfo->rssi_comb;
+ else
+ rxs->signal = ppduinfo->rssi_comb + noise_floor;
if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
rxs->flag |= RX_FLAG_AMPDU_DETAILS;
@@ -2244,6 +2273,7 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
+ const struct hal_rx_mon_ppdu_info *ppduinfo,
struct ieee80211_rx_status *status,
u8 decap)
{
@@ -2257,7 +2287,6 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
struct ieee80211_sta *pubsta = NULL;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct ath12k_dp_rx_info rx_info;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol_tkip = rxcb->is_eapol;
@@ -2271,8 +2300,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
}
spin_lock_bh(&ar->ab->base_lock);
- rx_info.addr2_present = false;
- peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, &rx_info);
+ peer = ath12k_peer_find_by_id(ar->ab, ppduinfo->peer_id);
if (peer && peer->sta) {
pubsta = peer->sta;
if (pubsta->valid_links) {
@@ -2365,7 +2393,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
decap = mon_mpdu->decap_format;
ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
- ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs, decap);
+ ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, ppduinfo, rxs, decap);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 8ab91273592c..9048818984f1 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -21,6 +21,9 @@
#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
+static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid *rx_tid);
+
static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
@@ -769,6 +772,23 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
rx_tid->qbuf.vaddr = NULL;
}
+static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid *rx_tid)
+{
+ struct ath12k_hal_reo_cmd cmd = {};
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+ /* Observed flush cache failure, to avoid that set vld bit during delete */
+ cmd.upd1 |= HAL_REO_CMD_UPD1_VLD;
+
+ return ath12k_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath12k_dp_rx_tid_del_func);
+}
+
static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
dma_addr_t paddr)
{
@@ -828,20 +848,13 @@ static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u
void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
struct ath12k_peer *peer, u8 tid)
{
- struct ath12k_hal_reo_cmd cmd = {};
struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
int ret;
if (!rx_tid->active)
return;
- cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
- ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
- HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
- ath12k_dp_rx_tid_del_func);
+ ret = ath12k_dp_rx_tid_delete_handler(ar->ab, rx_tid);
if (ret) {
ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
@@ -2533,6 +2546,8 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
channel_num = meta_data;
center_freq = meta_data >> 16;
+ rx_status->band = NUM_NL80211_BANDS;
+
if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
center_freq <= ATH12K_MAX_6GHZ_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
@@ -2541,21 +2556,33 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
rx_status->band = NL80211_BAND_5GHZ;
- } else {
+ }
+
+ if (unlikely(rx_status->band == NUM_NL80211_BANDS ||
+ !ath12k_ar_to_hw(ar)->wiphy->bands[rx_status->band])) {
+ ath12k_warn(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
+ rx_status->band, channel_num, center_freq, ar->pdev_idx);
+
spin_lock_bh(&ar->data_lock);
channel = ar->rx_channel;
if (channel) {
rx_status->band = channel->band;
channel_num =
ieee80211_frequency_to_channel(channel->center_freq);
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
+ } else {
+ ath12k_err(ar->ab, "unable to determine channel, band for rx packet");
}
spin_unlock_bh(&ar->data_lock);
+ goto h_rate;
}
if (rx_status->band != NL80211_BAND_6GHZ)
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
rx_status->band);
+h_rate:
ath12k_dp_rx_h_rate(ar, rx_info);
}
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index a3ab588aae19..d1ad7747b82c 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -483,15 +483,16 @@ enum hal_rx_ul_reception_type {
HAL_RECEPTION_TYPE_FRAMELESS
};
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0)
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5)
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU GENMASK(7, 0)
struct hal_rx_phyrx_rssi_legacy_info {
__le32 info0;
__le32 rsvd0[39];
__le32 info1;
- __le32 rsvd1;
+ __le32 info2;
} __packed;
#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
@@ -695,7 +696,8 @@ struct hal_rx_resp_req_info {
#define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6)
#define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
-#define HAL_RX_PHY_CMN_USER_INFO0_GI GENMASK(17, 16)
+#define HAL_RX_CMN_USR_INFO0_CP_SETTING GENMASK(17, 16)
+#define HAL_RX_CMN_USR_INFO0_LTF_SIZE GENMASK(19, 18)
struct hal_phyrx_common_user_info {
__le32 rsvd[2];
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 3a3965b79942..2644b5d4b0bc 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -11240,8 +11240,8 @@ void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
struct ieee80211_channel *chan, *temp_chan;
u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
bool is_psd_power = false, is_tpe_present = false;
- s8 max_tx_power[ATH12K_NUM_PWR_LEVELS],
- psd_power, tx_power, eirp_power;
+ s8 max_tx_power[ATH12K_NUM_PWR_LEVELS], psd_power, tx_power;
+ s8 eirp_power = 0;
struct ath12k_vif *ahvif = arvif->ahvif;
u16 start_freq, center_freq;
u8 reg_6ghz_power_mode;
@@ -11447,8 +11447,10 @@ static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
tpc_info->num_pwr_levels = max(local_psd->count,
reg_psd->count);
- if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
- tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
+ tpc_info->num_pwr_levels =
+ min3(tpc_info->num_pwr_levels,
+ IEEE80211_TPE_PSD_ENTRIES_320MHZ,
+ ATH12K_NUM_PWR_LEVELS);
for (i = 0; i < tpc_info->num_pwr_levels; i++) {
tpc_info->tpe[i] = min(local_psd->power[i],
@@ -11463,8 +11465,10 @@ static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
tpc_info->num_pwr_levels = max(local_non_psd->count,
reg_non_psd->count);
- if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
- tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
+ tpc_info->num_pwr_levels =
+ min3(tpc_info->num_pwr_levels,
+ IEEE80211_TPE_EIRP_ENTRIES_320MHZ,
+ ATH12K_NUM_PWR_LEVELS);
for (i = 0; i < tpc_info->num_pwr_levels; i++) {
tpc_info->tpe[i] = min(local_non_psd->power[i],
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 8ab7d1e34a6e..6a3f187320fc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -997,9 +997,9 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43751, WCC),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43752, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373, CYW),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012, CYW),
- BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752, CYW),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359, CYW),
CYW_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439, CYW),
{ /* end: all zeroes */ }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 9074ab49e806..4239f2b21e54 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -738,8 +738,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_4364_CHIP_ID:
case CY_CC_4373_CHIP_ID:
return 0x160000;
- case CY_CC_43752_CHIP_ID:
case BRCM_CC_43751_CHIP_ID:
+ case BRCM_CC_43752_CHIP_ID:
case BRCM_CC_4377_CHIP_ID:
return 0x170000;
case BRCM_CC_4378_CHIP_ID:
@@ -1452,7 +1452,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
return (reg & CC_SR_CTL0_ENABLE_MASK) != 0;
case BRCM_CC_4359_CHIP_ID:
case BRCM_CC_43751_CHIP_ID:
- case CY_CC_43752_CHIP_ID:
+ case BRCM_CC_43752_CHIP_ID:
case CY_CC_43012_CHIP_ID:
addr = CORE_CC_REG(pmu->base, retention_ctl);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8a0bad5119a0..8cf9d7e7c3f7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -655,10 +655,10 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_ENTRY(BRCM_CC_43751_CHIP_ID, 0xFFFFFFFF, 43752),
+ BRCMF_FW_ENTRY(BRCM_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012),
BRCMF_FW_ENTRY(CY_CC_43439_CHIP_ID, 0xFFFFFFFF, 43439),
- BRCMF_FW_ENTRY(CY_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752)
};
#define TXCTL_CREDITS 2
@@ -3426,8 +3426,8 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus)
{
if (bus->ci->chip == BRCM_CC_43751_CHIP_ID ||
- bus->ci->chip == CY_CC_43012_CHIP_ID ||
- bus->ci->chip == CY_CC_43752_CHIP_ID)
+ bus->ci->chip == BRCM_CC_43752_CHIP_ID ||
+ bus->ci->chip == CY_CC_43012_CHIP_ID)
return true;
else
return false;
@@ -4278,8 +4278,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
switch (sdiod->func1->device) {
case SDIO_DEVICE_ID_BROADCOM_43751:
+ case SDIO_DEVICE_ID_BROADCOM_43752:
case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
- case SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752:
brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_4373_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index b39c5c1ee18b..df3b67ba4db2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -60,7 +60,6 @@
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
#define CY_CC_43439_CHIP_ID 43439
-#define CY_CC_43752_CHIP_ID 43752
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index a07c512b6ed4..735482e7adf5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -12,7 +12,6 @@
#include "fw/api/phy.h"
#include "fw/api/config.h"
#include "fw/api/nvm-reg.h"
-#include "fw/img.h"
#include "iwl-trans.h"
#define BIOS_SAR_MAX_PROFILE_NUM 4
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 4c8c7a5fdf23..be23a29e7de0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -686,10 +686,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
return;
}
- /* Don't send world or same regdom info to firmware */
- if (strncmp(request->alpha2, "00", 2) &&
- strncmp(request->alpha2, adapter->country_code,
- sizeof(request->alpha2))) {
+ /* Don't send same regdom info to firmware */
+ if (strncmp(request->alpha2, adapter->country_code,
+ sizeof(request->alpha2)) != 0) {
memcpy(adapter->country_code, request->alpha2,
sizeof(request->alpha2));
mwifiex_send_domain_info_cmd_fw(wiphy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index 08590aa68356..1dd372372048 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -48,7 +48,7 @@ mt76_wmac_probe(struct platform_device *pdev)
return 0;
error:
- ieee80211_free_hw(mt76_hw(dev));
+ mt76_free_device(mdev);
return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index 31aec0f40232..73611c9d26e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -50,9 +50,9 @@ enum mt7915_eeprom_field {
#define MT_EE_CAL_GROUP_SIZE_7975 (54 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_GROUP_SIZE_7976 (94 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_GROUP_SIZE_7916_6G (94 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7981 (144 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_DPD_SIZE_V1 (54 * MT_EE_CAL_UNIT)
#define MT_EE_CAL_DPD_SIZE_V2 (300 * MT_EE_CAL_UNIT)
-#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_RX_PATH GENMASK(5, 3)
@@ -180,6 +180,8 @@ mt7915_get_cal_group_size(struct mt7915_dev *dev)
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
return (val == MT_EE_V2_BAND_SEL_6GHZ) ? MT_EE_CAL_GROUP_SIZE_7916_6G :
MT_EE_CAL_GROUP_SIZE_7916;
+ } else if (is_mt7981(&dev->mt76)) {
+ return MT_EE_CAL_GROUP_SIZE_7981;
} else if (mt7915_check_adie(dev, false)) {
return MT_EE_CAL_GROUP_SIZE_7976;
} else {
@@ -192,8 +194,6 @@ mt7915_get_cal_dpd_size(struct mt7915_dev *dev)
{
if (is_mt7915(&dev->mt76))
return MT_EE_CAL_DPD_SIZE_V1;
- else if (is_mt7981(&dev->mt76))
- return MT_EE_CAL_DPD_SIZE_V2_7981;
else
return MT_EE_CAL_DPD_SIZE_V2;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 2928e75b2397..c1fdd3c4f1ba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -3052,30 +3052,15 @@ static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw)
/* 5G BW160 */
5250, 5570, 5815
};
- static const u16 freq_list_v2_7981[] = {
- /* 5G BW20 */
- 5180, 5200, 5220, 5240,
- 5260, 5280, 5300, 5320,
- 5500, 5520, 5540, 5560,
- 5580, 5600, 5620, 5640,
- 5660, 5680, 5700, 5720,
- 5745, 5765, 5785, 5805,
- 5825, 5845, 5865, 5885,
- /* 5G BW160 */
- 5250, 5570, 5815
- };
- const u16 *freq_list = freq_list_v1;
- int n_freqs = ARRAY_SIZE(freq_list_v1);
- int idx;
+ const u16 *freq_list;
+ int idx, n_freqs;
if (!is_mt7915(&dev->mt76)) {
- if (is_mt7981(&dev->mt76)) {
- freq_list = freq_list_v2_7981;
- n_freqs = ARRAY_SIZE(freq_list_v2_7981);
- } else {
- freq_list = freq_list_v2;
- n_freqs = ARRAY_SIZE(freq_list_v2);
- }
+ freq_list = freq_list_v2;
+ n_freqs = ARRAY_SIZE(freq_list_v2);
+ } else {
+ freq_list = freq_list_v1;
+ n_freqs = ARRAY_SIZE(freq_list_v1);
}
if (freq < 4000) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index a9599c286328..84015ab24af6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -671,13 +671,20 @@ static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
/* init wiphy according to mphy and phy */
mt7996_init_wiphy_band(mphy->hw, phy);
- ret = mt7996_init_tx_queues(mphy->priv,
- MT_TXQ_ID(band),
- MT7996_TX_RING_SIZE,
- MT_TXQ_RING_BASE(band) + hif1_ofs,
- wed);
- if (ret)
- goto error;
+
+ if (is_mt7996(&dev->mt76) && !dev->hif2 && band == MT_BAND1) {
+ int i;
+
+ for (i = 0; i <= MT_TXQ_PSD; i++)
+ mphy->q_tx[i] = dev->mt76.phys[MT_BAND0]->q_tx[0];
+ } else {
+ ret = mt7996_init_tx_queues(mphy->priv, MT_TXQ_ID(band),
+ MT7996_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(band) + hif1_ofs,
+ wed);
+ if (ret)
+ goto error;
+ }
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
@@ -727,6 +734,7 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
static int mt7996_wed_rro_init(struct mt7996_dev *dev)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
struct mt7996_wed_rro_addr *addr;
@@ -766,7 +774,7 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
addr = dev->wed_rro.addr_elem[i].ptr;
for (j = 0; j < MT7996_RRO_WINDOW_MAX_SIZE; j++) {
- addr->signature = 0xff;
+ addr->data = cpu_to_le32(val);
addr++;
}
@@ -784,7 +792,7 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
dev->wed_rro.session.ptr = ptr;
addr = dev->wed_rro.session.ptr;
for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) {
- addr->signature = 0xff;
+ addr->data = cpu_to_le32(val);
addr++;
}
@@ -884,6 +892,7 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
static void mt7996_wed_rro_work(struct work_struct *work)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff);
struct mt7996_dev *dev;
LIST_HEAD(list);
@@ -920,7 +929,7 @@ static void mt7996_wed_rro_work(struct work_struct *work)
MT7996_RRO_WINDOW_MAX_LEN;
reset:
elem = ptr + elem_id * sizeof(*elem);
- elem->signature = 0xff;
+ elem->data |= cpu_to_le32(val);
}
mt7996_mcu_wed_rro_reset_sessions(dev, e->id);
out:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index b3fcca9bbb95..28477702c18b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -1766,13 +1766,10 @@ void mt7996_tx_token_put(struct mt7996_dev *dev)
static int
mt7996_mac_restart(struct mt7996_dev *dev)
{
- struct mt7996_phy *phy2, *phy3;
struct mt76_dev *mdev = &dev->mt76;
+ struct mt7996_phy *phy;
int i, ret;
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
-
if (dev->hif2) {
mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
@@ -1784,20 +1781,14 @@ mt7996_mac_restart(struct mt7996_dev *dev)
mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
}
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt7996_for_each_phy(dev, phy)
+ set_bit(MT76_RESET, &phy->mt76->state);
wake_up(&dev->mt76.mcu.wait);
- if (phy2)
- set_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- set_bit(MT76_RESET, &phy3->mt76->state);
/* lock/unlock all queues to ensure that no tx is pending */
- mt76_txq_schedule_all(&dev->mphy);
- if (phy2)
- mt76_txq_schedule_all(phy2->mt76);
- if (phy3)
- mt76_txq_schedule_all(phy3->mt76);
+ mt7996_for_each_phy(dev, phy)
+ mt76_txq_schedule_all(phy->mt76);
/* disable all tx/rx napi */
mt76_worker_disable(&dev->mt76.tx_worker);
@@ -1855,36 +1846,25 @@ mt7996_mac_restart(struct mt7996_dev *dev)
goto out;
mt7996_mac_init(dev);
- mt7996_init_txpower(&dev->phy);
- mt7996_init_txpower(phy2);
- mt7996_init_txpower(phy3);
+ mt7996_for_each_phy(dev, phy)
+ mt7996_init_txpower(phy);
ret = mt7996_txbf_init(dev);
+ if (ret)
+ goto out;
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
- ret = mt7996_run(&dev->phy);
- if (ret)
- goto out;
- }
-
- if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
- ret = mt7996_run(phy2);
- if (ret)
- goto out;
- }
+ mt7996_for_each_phy(dev, phy) {
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ continue;
- if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
- ret = mt7996_run(phy3);
+ ret = mt7996_run(&dev->phy);
if (ret)
goto out;
}
out:
/* reset done */
- clear_bit(MT76_RESET, &dev->mphy.state);
- if (phy2)
- clear_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- clear_bit(MT76_RESET, &phy3->mt76->state);
+ mt7996_for_each_phy(dev, phy)
+ clear_bit(MT76_RESET, &phy->mt76->state);
napi_enable(&dev->mt76.tx_napi);
local_bh_disable();
@@ -1898,26 +1878,18 @@ mt7996_mac_restart(struct mt7996_dev *dev)
static void
mt7996_mac_full_reset(struct mt7996_dev *dev)
{
- struct mt7996_phy *phy2, *phy3;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7996_phy *phy;
int i;
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
dev->recovery.hw_full_reset = true;
wake_up(&dev->mt76.mcu.wait);
- ieee80211_stop_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_stop_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_stop_queues(phy3->mt76->hw);
+ ieee80211_stop_queues(hw);
cancel_work_sync(&dev->wed_rro.work);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2)
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- if (phy3)
- cancel_delayed_work_sync(&phy3->mt76->mac_work);
+ mt7996_for_each_phy(dev, phy)
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
mutex_lock(&dev->mt76.mutex);
for (i = 0; i < 10; i++) {
@@ -1930,40 +1902,23 @@ mt7996_mac_full_reset(struct mt7996_dev *dev)
dev_err(dev->mt76.dev, "chip full reset failed\n");
ieee80211_restart_hw(mt76_hw(dev));
- if (phy2)
- ieee80211_restart_hw(phy2->mt76->hw);
- if (phy3)
- ieee80211_restart_hw(phy3->mt76->hw);
-
ieee80211_wake_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_wake_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_wake_queues(phy3->mt76->hw);
dev->recovery.hw_full_reset = false;
- ieee80211_queue_delayed_work(mt76_hw(dev),
- &dev->mphy.mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(phy2->mt76->hw,
- &phy2->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy3)
- ieee80211_queue_delayed_work(phy3->mt76->hw,
- &phy3->mt76->mac_work,
+ mt7996_for_each_phy(dev, phy)
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7996_WATCHDOG_TIME);
}
void mt7996_mac_reset_work(struct work_struct *work)
{
- struct mt7996_phy *phy2, *phy3;
+ struct ieee80211_hw *hw;
struct mt7996_dev *dev;
+ struct mt7996_phy *phy;
int i;
dev = container_of(work, struct mt7996_dev, reset_work);
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
+ hw = mt76_hw(dev);
/* chip full reset */
if (dev->recovery.restart) {
@@ -1994,7 +1949,7 @@ void mt7996_mac_reset_work(struct work_struct *work)
return;
dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
- wiphy_name(dev->mt76.hw->wiphy));
+ wiphy_name(hw->wiphy));
if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
@@ -2003,25 +1958,17 @@ void mt7996_mac_reset_work(struct work_struct *work)
mtk_wed_device_stop(&dev->mt76.mmio.wed);
ieee80211_stop_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_stop_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_stop_queues(phy3->mt76->hw);
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
cancel_work_sync(&dev->wed_rro.work);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2) {
- set_bit(MT76_RESET, &phy2->mt76->state);
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- }
- if (phy3) {
- set_bit(MT76_RESET, &phy3->mt76->state);
- cancel_delayed_work_sync(&phy3->mt76->mac_work);
+ mt7996_for_each_phy(dev, phy) {
+ set_bit(MT76_RESET, &phy->mt76->state);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
}
+
mt76_worker_disable(&dev->mt76.tx_worker);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
@@ -2074,11 +2021,8 @@ void mt7996_mac_reset_work(struct work_struct *work)
}
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
- clear_bit(MT76_RESET, &dev->mphy.state);
- if (phy2)
- clear_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- clear_bit(MT76_RESET, &phy3->mt76->state);
+ mt7996_for_each_phy(dev, phy)
+ clear_bit(MT76_RESET, &phy->mt76->state);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
@@ -2100,25 +2044,14 @@ void mt7996_mac_reset_work(struct work_struct *work)
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
- ieee80211_wake_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_wake_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_wake_queues(phy3->mt76->hw);
+ ieee80211_wake_queues(hw);
mutex_unlock(&dev->mt76.mutex);
mt7996_update_beacons(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(phy2->mt76->hw,
- &phy2->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy3)
- ieee80211_queue_delayed_work(phy3->mt76->hw,
- &phy3->mt76->mac_work,
+ mt7996_for_each_phy(dev, phy)
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7996_WATCHDOG_TIME);
dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
wiphy_name(dev->mt76.hw->wiphy));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 84f731b387d2..d01b5778da20 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -138,6 +138,28 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
return -1;
}
+static int get_own_mld_idx(u64 mask, bool group_mld)
+{
+ u8 start = group_mld ? 0 : 16;
+ u8 end = group_mld ? 15 : 63;
+ int idx;
+
+ idx = get_free_idx(mask, start, end);
+ if (idx)
+ return idx - 1;
+
+ /* If the 16-63 range is not available, perform another lookup in the
+ * range 0-15
+ */
+ if (!group_mld) {
+ idx = get_free_idx(mask, 0, 15);
+ if (idx)
+ return idx - 1;
+ }
+
+ return -EINVAL;
+}
+
static void
mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlink)
{
@@ -279,7 +301,7 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt7996_dev *dev = phy->dev;
u8 band_idx = phy->mt76->band_idx;
struct mt76_txq *mtxq;
- int idx, ret;
+ int mld_idx, idx, ret;
mlink->idx = __ffs64(~dev->mt76.vif_mask);
if (mlink->idx >= mt7996_max_interface_num(dev))
@@ -289,6 +311,17 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
+ if (!dev->mld_idx_mask) { /* first link in the group */
+ mvif->mld_group_idx = get_own_mld_idx(dev->mld_idx_mask, true);
+ mvif->mld_remap_idx = get_free_idx(dev->mld_remap_idx_mask,
+ 0, 15);
+ }
+
+ mld_idx = get_own_mld_idx(dev->mld_idx_mask, false);
+ if (mld_idx < 0)
+ return -ENOSPC;
+
+ link->mld_idx = mld_idx;
link->phy = phy;
mlink->omac_idx = idx;
mlink->band_idx = band_idx;
@@ -301,6 +334,11 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
return ret;
dev->mt76.vif_mask |= BIT_ULL(mlink->idx);
+ if (!dev->mld_idx_mask) {
+ dev->mld_idx_mask |= BIT_ULL(mvif->mld_group_idx);
+ dev->mld_remap_idx_mask |= BIT_ULL(mvif->mld_remap_idx);
+ }
+ dev->mld_idx_mask |= BIT_ULL(link->mld_idx);
phy->omac_mask |= BIT_ULL(mlink->omac_idx);
idx = MT7996_WTBL_RESERVED - mlink->idx;
@@ -380,7 +418,13 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
}
dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx);
+ dev->mld_idx_mask &= ~BIT_ULL(link->mld_idx);
phy->omac_mask &= ~BIT_ULL(mlink->omac_idx);
+ if (!(dev->mld_idx_mask & ~BIT_ULL(mvif->mld_group_idx))) {
+ /* last link */
+ dev->mld_idx_mask &= ~BIT_ULL(mvif->mld_group_idx);
+ dev->mld_remap_idx_mask &= ~BIT_ULL(mvif->mld_remap_idx);
+ }
spin_lock_bh(&dev->mt76.sta_poll_lock);
if (!list_empty(&msta_link->wcid.poll_list))
@@ -1036,16 +1080,17 @@ mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
goto error_unlink;
}
- err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
- link_id);
- if (err)
- goto error_unlink;
-
mphy = mt76_vif_link_phy(&link->mt76);
if (!mphy) {
err = -EINVAL;
goto error_unlink;
}
+
+ err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
+ link_id);
+ if (err)
+ goto error_unlink;
+
mphy->num_sta++;
}
@@ -1327,11 +1372,13 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta_link->wcid, tid,
ssn, params->buf_size);
- ret = mt7996_mcu_add_rx_ba(dev, params, link, true);
+ ret = mt7996_mcu_add_rx_ba(dev, params, link,
+ msta_link, true);
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, tid);
- ret = mt7996_mcu_add_rx_ba(dev, params, link, false);
+ ret = mt7996_mcu_add_rx_ba(dev, params, link,
+ msta_link, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
@@ -1617,19 +1664,13 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
}
}
-static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
+static void mt7996_link_rate_ctrl_update(void *data,
+ struct mt7996_sta_link *msta_link)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta *msta = msta_link->sta;
struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
- struct mt7996_sta_link *msta_link;
u32 *changed = data;
- rcu_read_lock();
-
- msta_link = rcu_dereference(msta->link[msta->deflink_id]);
- if (!msta_link)
- goto out;
-
spin_lock_bh(&dev->mt76.sta_poll_lock);
msta_link->changed |= *changed;
@@ -1637,8 +1678,6 @@ static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
list_add_tail(&msta_link->rc_list, &dev->sta_rc_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
-out:
- rcu_read_unlock();
}
static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw,
@@ -1646,11 +1685,32 @@ static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_link_sta *link_sta,
u32 changed)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = link_sta->sta;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
- mt7996_link_rate_ctrl_update(&changed, sta);
- ieee80211_queue_work(hw, &dev->rc_work);
+ rcu_read_lock();
+
+ msta_link = rcu_dereference(msta->link[link_sta->link_id]);
+ if (msta_link) {
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+
+ mt7996_link_rate_ctrl_update(&changed, msta_link);
+ ieee80211_queue_work(hw, &dev->rc_work);
+ }
+
+ rcu_read_unlock();
+}
+
+static void mt7996_sta_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
+ u32 *changed = data;
+
+ msta_link = rcu_dereference(msta->link[msta->deflink_id]);
+ if (msta_link)
+ mt7996_link_rate_ctrl_update(&changed, msta_link);
}
static int
@@ -1671,7 +1731,7 @@ mt7996_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT
* then multiple MCS setting (MCS 4,5,6) is not supported.
*/
- ieee80211_iterate_stations_atomic(hw, mt7996_link_rate_ctrl_update,
+ ieee80211_iterate_stations_atomic(hw, mt7996_sta_rate_ctrl_update,
&changed);
ieee80211_queue_work(hw, &dev->rc_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 0be03eb3cf46..aad58f7831c7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -899,17 +899,28 @@ mt7996_mcu_bss_txcmd_tlv(struct sk_buff *skb, bool en)
}
static void
-mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct mt76_vif_link *mlink)
+mt7996_mcu_bss_mld_tlv(struct sk_buff *skb,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt7996_vif_link *link)
{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct bss_mld_tlv *mld;
struct tlv *tlv;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld));
-
mld = (struct bss_mld_tlv *)tlv;
- mld->group_mld_id = 0xff;
- mld->own_mld_id = mlink->idx;
- mld->remap_idx = 0xff;
+ mld->own_mld_id = link->mld_idx;
+ mld->link_id = link_conf->link_id;
+
+ if (ieee80211_vif_is_mld(vif)) {
+ mld->group_mld_id = mvif->mld_group_idx;
+ mld->remap_idx = mvif->mld_remap_idx;
+ memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
+ } else {
+ mld->group_mld_id = 0xff;
+ mld->remap_idx = 0xff;
+ }
}
static void
@@ -1108,6 +1119,8 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
goto out;
if (enable) {
+ struct mt7996_vif_link *link;
+
mt7996_mcu_bss_rfch_tlv(skb, phy);
mt7996_mcu_bss_bmc_tlv(skb, mlink, phy);
mt7996_mcu_bss_ra_tlv(skb, phy);
@@ -1118,7 +1131,8 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
mt7996_mcu_bss_he_tlv(skb, vif, link_conf, phy);
/* this tag is necessary no matter if the vif is MLD */
- mt7996_mcu_bss_mld_tlv(skb, mlink);
+ link = container_of(mlink, struct mt7996_vif_link, mt76);
+ mt7996_mcu_bss_mld_tlv(skb, link_conf, link);
}
mt7996_mcu_bss_mbssid_tlv(skb, link_conf, enable);
@@ -1149,9 +1163,8 @@ int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif,
static int
mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+ struct mt76_wcid *wcid, bool enable, bool tx)
{
- struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct sta_rec_ba_uni *ba;
struct sk_buff *skb;
struct tlv *tlv;
@@ -1185,14 +1198,17 @@ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
if (enable && !params->amsdu)
msta_link->wcid.amsdu = false;
- return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, true);
+ return mt7996_mcu_sta_ba(dev, &link->mt76, params, &msta_link->wcid,
+ enable, true);
}
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link, bool enable)
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, bool enable)
{
- return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, false);
+ return mt7996_mcu_sta_ba(dev, &link->mt76, params, &msta_link->wcid,
+ enable, false);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 130ea95626d5..7b21d6ae7e43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -481,7 +481,8 @@ struct bss_mld_tlv {
u8 own_mld_id;
u8 mac_addr[ETH_ALEN];
u8 remap_idx;
- u8 __rsv[3];
+ u8 link_id;
+ u8 __rsv[2];
} __packed;
struct sta_rec_ht_uni {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 8509d508e1e1..048d9a9898c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -248,11 +248,16 @@ struct mt7996_vif_link {
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
struct cfg80211_bitrate_mask bitrate_mask;
+
+ u8 mld_idx;
};
struct mt7996_vif {
struct mt7996_vif_link deflink; /* must be first */
struct mt76_vif_data mt76;
+
+ u8 mld_group_idx;
+ u8 mld_remap_idx;
};
/* crash-dump */
@@ -272,13 +277,12 @@ struct mt7996_hif {
int irq;
};
+#define WED_RRO_ADDR_SIGNATURE_MASK GENMASK(31, 24)
+#define WED_RRO_ADDR_COUNT_MASK GENMASK(14, 4)
+#define WED_RRO_ADDR_HEAD_HIGH_MASK GENMASK(3, 0)
struct mt7996_wed_rro_addr {
- u32 head_low;
- u32 head_high : 4;
- u32 count: 11;
- u32 oor: 1;
- u32 rsv : 8;
- u32 signature : 8;
+ __le32 head_low;
+ __le32 data;
};
struct mt7996_wed_rro_session_id {
@@ -337,6 +341,9 @@ struct mt7996_dev {
u32 q_int_mask[MT7996_MAX_QUEUE];
u32 q_wfdma_mask;
+ u64 mld_idx_mask;
+ u64 mld_remap_idx_mask;
+
const struct mt76_bus_ops *bus_ops;
struct mt7996_phy phy;
@@ -608,7 +615,8 @@ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct mt7996_sta_link *msta_link, bool enable);
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link, bool enable);
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, bool enable);
int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
struct mt76_vif_link *mlink,
struct cfg80211_he_bss_color *he_bss_color);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
index 19e99bc1c6c4..f5ce50056ee9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
@@ -137,6 +137,7 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
mdev = &dev->mt76;
mt7996_wfsys_reset(dev);
hif2 = mt7996_pci_init_hif2(pdev);
+ dev->hif2 = hif2;
ret = mt7996_mmio_wed_init(dev, pdev, false, &irq);
if (ret < 0)
@@ -161,7 +162,6 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
if (hif2) {
hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
- dev->hif2 = hif2;
ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &hif2_irq);
if (ret < 0)
diff --git a/drivers/net/wireless/realtek/rtw88/led.c b/drivers/net/wireless/realtek/rtw88/led.c
index 25aa6cbaa728..4cc62e49d167 100644
--- a/drivers/net/wireless/realtek/rtw88/led.c
+++ b/drivers/net/wireless/realtek/rtw88/led.c
@@ -6,13 +6,17 @@
#include "debug.h"
#include "led.h"
-static int rtw_led_set_blocking(struct led_classdev *led,
- enum led_brightness brightness)
+static int rtw_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
{
struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ mutex_lock(&rtwdev->mutex);
+
rtwdev->chip->ops->led_set(led, brightness);
+ mutex_unlock(&rtwdev->mutex);
+
return 0;
}
@@ -36,10 +40,7 @@ void rtw_led_init(struct rtw_dev *rtwdev)
if (!rtwdev->chip->ops->led_set)
return;
- if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
- led->brightness_set = rtwdev->chip->ops->led_set;
- else
- led->brightness_set_blocking = rtw_led_set_blocking;
+ led->brightness_set_blocking = rtw_led_set;
snprintf(rtwdev->led_name, sizeof(rtwdev->led_name),
"rtw88-%s", dev_name(rtwdev->dev));
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index b9c2224dde4a..1837f17239ab 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -3456,6 +3456,7 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
rtwsta_link = rtwsta->links[rtwvif_link->link_id];
if (unlikely(!rtwsta_link)) {
ret = -ENOLINK;
+ dev_kfree_skb_any(skb);
goto out;
}
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index fe7beff8c424..f99e179f7ff9 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -205,7 +205,6 @@ static void rtw89_ser_hdl_work(struct work_struct *work)
static int ser_send_msg(struct rtw89_ser *ser, u8 event)
{
- struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
struct ser_msg *msg = NULL;
if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
@@ -221,7 +220,7 @@ static int ser_send_msg(struct rtw89_ser *ser, u8 event)
list_add(&msg->list, &ser->msg_q);
spin_unlock_irq(&ser->msg_q_lock);
- ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
+ schedule_work(&ser->ser_hdl_work);
return 0;
}
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 201fc8809a62..012fcfc79a73 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -331,9 +331,10 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
} else {
memset(chap->c2, 0, chap->hash_len);
}
- if (ctrl->opts->concat)
+ if (ctrl->opts->concat) {
chap->s2 = 0;
- else
+ chap->bi_directional = false;
+ } else
chap->s2 = nvme_auth_get_seqnum();
data->seqnum = cpu_to_le32(chap->s2);
if (chap->host_key_len) {
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c0fe8cfb7229..1413788ca7d5 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2250,6 +2250,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
if (error)
goto out_cleanup_tagset;
+ if (ctrl->opts->concat && !ctrl->tls_pskid)
+ return 0;
+
error = nvme_enable_ctrl(ctrl);
if (error)
goto out_stop_queue;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index a9b18c051f5b..6725c34dd7c9 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -54,6 +54,8 @@ struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
int ls_error;
struct list_head lsreq_list; /* tgtport->ls_req_list */
bool req_queued;
+
+ struct work_struct put_work;
};
@@ -111,8 +113,6 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
-
- struct work_struct put_work;
};
struct nvmet_fc_port_entry {
@@ -235,12 +235,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
-static void nvmet_fc_put_tgtport_work(struct work_struct *work)
+static void nvmet_fc_put_lsop_work(struct work_struct *work)
{
- struct nvmet_fc_tgtport *tgtport =
- container_of(work, struct nvmet_fc_tgtport, put_work);
+ struct nvmet_fc_ls_req_op *lsop =
+ container_of(work, struct nvmet_fc_ls_req_op, put_work);
- nvmet_fc_tgtport_put(tgtport);
+ nvmet_fc_tgtport_put(lsop->tgtport);
+ kfree(lsop);
}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
@@ -367,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
DMA_BIDIRECTIONAL);
out_putwork:
- queue_work(nvmet_wq, &tgtport->put_work);
+ queue_work(nvmet_wq, &lsop->put_work);
}
static int
@@ -388,6 +389,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
lsreq->done = done;
lsop->req_queued = false;
INIT_LIST_HEAD(&lsop->lsreq_list);
+ INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work);
lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
lsreq->rqstlen + lsreq->rsplen,
@@ -447,8 +449,6 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
__nvmet_fc_finish_ls_req(lsop);
/* fc-nvme target doesn't care about success or failure of cmd */
-
- kfree(lsop);
}
/*
@@ -1410,7 +1410,6 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
- INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 257b497d515a..5dffcc5becae 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -496,13 +496,15 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
if (!targetport) {
/*
* The target port is gone. The target doesn't expect any
- * response anymore and the ->done call is not valid
- * because the resources have been freed by
- * nvmet_fc_free_pending_reqs.
+ * response anymore and thus lsreq can't be accessed anymore.
*
* We end up here from delete association exchange:
* nvmet_fc_xmt_disconnect_assoc sends an async request.
+ *
+ * Return success because this is what LLDDs do; silently
+ * drop the response.
*/
+ lsrsp->done(lsrsp);
kmem_cache_free(lsreq_cache, tls_req);
return 0;
}
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 6c93f39d0288..5e445a7bda33 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -549,7 +549,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
ret = j721e_pcie_ctrl_init(pcie);
if (ret < 0) {
- dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "j721e_pcie_ctrl_init failed\n");
goto err_get_sync;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 00f52d472dcd..cc71a2d90cd4 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -123,7 +123,6 @@
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
-#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT 0x1
#define GEN3_EQ_CONTROL_OFF 0x8A8
#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
index 3aad19b56da8..0c6f4514f922 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-common.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -8,9 +8,11 @@
#include "pcie-designware.h"
#include "pcie-qcom-common.h"
-void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci)
{
+ struct device *dev = pci->dev;
u32 reg;
+ u16 speed;
/*
* GEN3_RELATED_OFF register is repurposed to apply equalization
@@ -19,32 +21,40 @@ void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
* determines the data rate for which these equalization settings are
* applied.
*/
- reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
- reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
- reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
- reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
- GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT);
- dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
- reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
- reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
- GEN3_EQ_FMDC_N_EVALS |
- GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA |
- GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA);
- reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
- FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
- FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA, 0x5) |
- FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA, 0x5);
- dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+ for (speed = PCIE_SPEED_8_0GT; speed <= pcie_link_speed[pci->max_link_speed]; speed++) {
+ if (speed > PCIE_SPEED_32_0GT) {
+ dev_warn(dev, "Skipped equalization settings for unsupported data rate\n");
+ break;
+ }
- reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
- GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
- GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
- GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
- dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+ reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
+ speed - PCIE_SPEED_8_0GT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
+ reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
+ GEN3_EQ_FMDC_N_EVALS |
+ GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA |
+ GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA);
+ reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
+ FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA, 0x5) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA, 0x5);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
+ GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
+ GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+ }
}
-EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_equalization);
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_equalization);
void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
{
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h
index 7d88d29e4766..7f5ca2fd9a72 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-common.h
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.h
@@ -8,7 +8,7 @@
struct dw_pcie;
-void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci);
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci);
void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
#endif
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index bf7c6ac0f3e3..aaf060bf39d4 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -511,10 +511,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
goto err_disable_resources;
}
- if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
- qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
qcom_pcie_common_set_16gt_lane_margining(pci);
- }
/*
* The physical address of the MMIO region which is exposed as the BAR
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 294babe1816e..a93740ae602f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -322,10 +322,10 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
- if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
- qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
qcom_pcie_common_set_16gt_lane_margining(pci);
- }
/* Enable Link Training state machine */
if (pcie->cfg->ops->ltssm_enable)
@@ -1740,6 +1740,8 @@ static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
int ret = -ENOENT;
for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ if (!of_node_is_type(of_port, "pci"))
+ continue;
ret = qcom_pcie_parse_port(pcie, of_port);
if (ret)
goto err_port_del;
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index 18055807a4f5..c16c4c2be499 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -182,8 +182,17 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
return ret;
}
- if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc))
+ if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) {
reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ /*
+ * R-Car V4H Reference Manual R19UH0186EJ0130 Rev.1.30 Apr.
+ * 21, 2025 page 585 Figure 9.3.2 Software Reset flow (B)
+ * indicates that for peripherals in HSC domain, after
+ * reset has been asserted by writing a matching reset bit
+ * into register SRCR, it is mandatory to wait 1ms.
+ */
+ fsleep(1000);
+ }
val = readl(rcar->base + PCIEMSR0);
if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
@@ -204,6 +213,19 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
if (ret)
goto err_unprepare;
+ /*
+ * Assure the reset is latched and the core is ready for DBI access.
+ * On R-Car V4H, the PCIe reset is asynchronous and does not take
+ * effect immediately, but needs a short time to complete. In case
+ * DBI access happens in that short time, that access generates an
+ * SError. To make sure that condition can never happen, read back the
+ * state of the reset, which should turn the asynchronous reset into
+ * synchronous one, and wait a little over 1ms to add additional
+ * safety margin.
+ */
+ reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ fsleep(1000);
+
if (rcar->drvdata->additional_common_init)
rcar->drvdata->additional_common_init(rcar);
@@ -711,7 +733,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable
val &= ~APP_HOLD_PHY_RST;
writel(val, rcar->base + PCIERSTCTRL1);
- ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000);
+ ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, val & BIT(18), 100, 10000);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 4f26086f25da..0c0734aa14b6 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1722,9 +1722,9 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
ret);
}
- ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
if (ret)
- dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+ dev_err(pcie->dev, "Failed to disable controller: %d\n", ret);
pcie->ep_state = EP_STATE_DISABLED;
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 467ddc701adc..bb88767a3797 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -1344,7 +1344,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
unsigned int i;
int err;
- port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
+ port->phys = devm_kcalloc(dev, port->lanes, sizeof(phy), GFP_KERNEL);
if (!port->phys)
return -ENOMEM;
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 0a37a3f1809c..654639bccd10 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -311,7 +311,7 @@ static int xgene_msi_handler_setup(struct platform_device *pdev)
msi_val = xgene_msi_int_read(xgene_msi, i);
if (msi_val) {
dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
- return EINVAL;
+ return -EINVAL;
}
irq = platform_get_irq(pdev, i);
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index fe288fd770c4..4780e0109e58 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -584,7 +584,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
unsigned int index = find_first_bit(®, 32);
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index e091193bd8a8..044f5ea0716d 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -301,15 +301,20 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
if (!epf_test->dma_supported)
return;
- dma_release_channel(epf_test->dma_chan_tx);
- if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ if (epf_test->dma_chan_tx) {
+ dma_release_channel(epf_test->dma_chan_tx);
+ if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
+ return;
+ }
epf_test->dma_chan_tx = NULL;
- epf_test->dma_chan_rx = NULL;
- return;
}
- dma_release_channel(epf_test->dma_chan_rx);
- epf_test->dma_chan_rx = NULL;
+ if (epf_test->dma_chan_rx) {
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_rx = NULL;
+ }
}
static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
@@ -772,12 +777,24 @@ static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
u32 status = le32_to_cpu(reg->status);
struct pci_epf *epf = epf_test->epf;
struct pci_epc *epc = epf->epc;
+ int ret;
if (bar < BAR_0)
goto set_status_err;
pci_epf_test_doorbell_cleanup(epf_test);
- pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
+
+ /*
+ * The doorbell feature temporarily overrides the inbound translation
+ * to point to the address stored in epf_test->db_bar.phys_addr, i.e.,
+ * it calls set_bar() twice without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by
+ * the host. Thus, when disabling the doorbell, restore the inbound
+ * translation to point to the memory allocated for the BAR.
+ */
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]);
+ if (ret)
+ goto set_status_err;
status |= STATUS_DOORBELL_DISABLE_SUCCESS;
reg->status = cpu_to_le32(status);
diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c
index 9ca89cbfec15..1b58357b905f 100644
--- a/drivers/pci/endpoint/pci-ep-msi.c
+++ b/drivers/pci/endpoint/pci-ep-msi.c
@@ -24,7 +24,7 @@ static void pci_epf_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
struct pci_epf *epf;
epc = pci_epc_get(dev_name(msi_desc_to_dev(desc)));
- if (!epc)
+ if (IS_ERR(epc))
return;
epf = list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list);
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index 0938ef7ebabf..b11b7f63f0d6 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -148,6 +148,28 @@ static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *d
arg->hwirq = desc->msi_index;
}
+static void cond_shutdown_parent(struct irq_data *data)
+{
+ struct msi_domain_info *info = data->domain->host_data;
+
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_STARTUP_PARENT))
+ irq_chip_shutdown_parent(data);
+ else if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ irq_chip_mask_parent(data);
+}
+
+static unsigned int cond_startup_parent(struct irq_data *data)
+{
+ struct msi_domain_info *info = data->domain->host_data;
+
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_STARTUP_PARENT))
+ return irq_chip_startup_parent(data);
+ else if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ irq_chip_unmask_parent(data);
+
+ return 0;
+}
+
static __always_inline void cond_mask_parent(struct irq_data *data)
{
struct msi_domain_info *info = data->domain->host_data;
@@ -164,6 +186,23 @@ static __always_inline void cond_unmask_parent(struct irq_data *data)
irq_chip_unmask_parent(data);
}
+static void pci_irq_shutdown_msi(struct irq_data *data)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
+
+ pci_msi_mask(desc, BIT(data->irq - desc->irq));
+ cond_shutdown_parent(data);
+}
+
+static unsigned int pci_irq_startup_msi(struct irq_data *data)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
+ unsigned int ret = cond_startup_parent(data);
+
+ pci_msi_unmask(desc, BIT(data->irq - desc->irq));
+ return ret;
+}
+
static void pci_irq_mask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
@@ -194,6 +233,8 @@ static void pci_irq_unmask_msi(struct irq_data *data)
static const struct msi_domain_template pci_msi_template = {
.chip = {
.name = "PCI-MSI",
+ .irq_startup = pci_irq_startup_msi,
+ .irq_shutdown = pci_irq_shutdown_msi,
.irq_mask = pci_irq_mask_msi,
.irq_unmask = pci_irq_unmask_msi,
.irq_write_msi_msg = pci_msi_domain_write_msg,
@@ -210,6 +251,20 @@ static const struct msi_domain_template pci_msi_template = {
},
};
+static void pci_irq_shutdown_msix(struct irq_data *data)
+{
+ pci_msix_mask(irq_data_get_msi_desc(data));
+ cond_shutdown_parent(data);
+}
+
+static unsigned int pci_irq_startup_msix(struct irq_data *data)
+{
+ unsigned int ret = cond_startup_parent(data);
+
+ pci_msix_unmask(irq_data_get_msi_desc(data));
+ return ret;
+}
+
static void pci_irq_mask_msix(struct irq_data *data)
{
pci_msix_mask(irq_data_get_msi_desc(data));
@@ -234,6 +289,8 @@ EXPORT_SYMBOL_GPL(pci_msix_prepare_desc);
static const struct msi_domain_template pci_msix_template = {
.chip = {
.name = "PCI-MSIX",
+ .irq_startup = pci_irq_startup_msix,
+ .irq_shutdown = pci_irq_shutdown_msix,
.irq_mask = pci_irq_mask_msix,
.irq_unmask = pci_irq_unmask_msix,
.irq_write_msi_msg = pci_msi_domain_write_msg,
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index ddb25960ea47..9369377725fa 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -122,6 +122,8 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
{
+ bool ret = false;
+
if (ACPI_HANDLE(&host_bridge->dev)) {
union acpi_object *obj;
@@ -135,11 +137,11 @@ bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
1, DSM_PCI_PRESERVE_BOOT_CONFIG,
NULL, ACPI_TYPE_INTEGER);
if (obj && obj->integer.value == 0)
- return true;
+ ret = true;
ACPI_FREE(obj);
}
- return false;
+ return ret;
}
/* _HPX PCI Setting Record (Type 0); same as _HPP */
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index e286c197d716..55abc5e17b8b 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -786,6 +786,9 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
static int aer_ratelimit(struct pci_dev *dev, unsigned int severity)
{
+ if (!dev->aer_info)
+ return 1;
+
switch (severity) {
case AER_NONFATAL:
return __ratelimit(&dev->aer_info->nonfatal_ratelimit);
diff --git a/drivers/pci/pwrctrl/slot.c b/drivers/pci/pwrctrl/slot.c
index 6e138310b45b..3320494b62d8 100644
--- a/drivers/pci/pwrctrl/slot.c
+++ b/drivers/pci/pwrctrl/slot.c
@@ -49,13 +49,14 @@ static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
ret = regulator_bulk_enable(slot->num_supplies, slot->supplies);
if (ret < 0) {
dev_err_probe(dev, ret, "Failed to enable slot regulators\n");
- goto err_regulator_free;
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+ return ret;
}
ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_slot_power_off,
slot);
if (ret)
- goto err_regulator_disable;
+ return ret;
clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(clk)) {
@@ -70,13 +71,6 @@ static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret, "Failed to register pwrctrl driver\n");
return 0;
-
-err_regulator_disable:
- regulator_bulk_disable(slot->num_supplies, slot->supplies);
-err_regulator_free:
- regulator_bulk_free(slot->num_supplies, slot->supplies);
-
- return ret;
}
static const struct of_device_id pci_pwrctrl_slot_of_match[] = {
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 369e77ad5f13..8f14cb324e01 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -97,7 +97,8 @@ struct arm_spe_pmu {
#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
/* Convert a free-running index from perf into an SPE buffer offset */
-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+#define PERF_IDX2OFF(idx, buf) \
+ ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
/* Keep track of our dynamic hotplug state */
static enum cpuhp_state arm_spe_pmu_online;
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index ce91fb1d5167..17c6310f4b54 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -137,6 +137,8 @@ struct rockchip_combphy_grfcfg {
struct combphy_reg pipe_xpcs_phy_ready;
struct combphy_reg pipe_pcie1l0_sel;
struct combphy_reg pipe_pcie1l1_sel;
+ struct combphy_reg u3otg0_port_en;
+ struct combphy_reg u3otg1_port_en;
};
struct rockchip_combphy_cfg {
@@ -594,6 +596,14 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true);
+ switch (priv->id) {
+ case 0:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg0_port_en, true);
+ break;
+ case 1:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg1_port_en, true);
+ break;
+ }
break;
case PHY_TYPE_SATA:
@@ -737,6 +747,8 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
/* pipe-grf */
.pipe_con0_for_sata = { 0x0000, 15, 0, 0x00, 0x2220 },
.pipe_xpcs_phy_ready = { 0x0040, 2, 2, 0x00, 0x01 },
+ .u3otg0_port_en = { 0x0104, 15, 0, 0x0181, 0x1100 },
+ .u3otg1_port_en = { 0x0144, 15, 0, 0x0181, 0x1100 },
};
static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index be1ca8e85754..0402626c4b98 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -211,6 +211,8 @@ config PINCTRL_EIC7700
depends on ARCH_ESWIN || COMPILE_TEST
select PINMUX
select GENERIC_PINCONF
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
help
This driver support for the pin controller in ESWIN's EIC7700 SoC,
which supports pin multiplexing, pin configuration,and rgmii voltage
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 9171de657f97..a75762e4d264 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -187,6 +187,9 @@ static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 };
static const unsigned int i2c_sck_c_dv19_pins[] = { GPIODV_19 };
static const unsigned int i2c_sda_c_dv18_pins[] = { GPIODV_18 };
+static const unsigned int i2c_sck_d_pins[] = { GPIOX_11 };
+static const unsigned int i2c_sda_d_pins[] = { GPIOX_10 };
+
static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 };
@@ -411,6 +414,8 @@ static const struct meson_pmx_group meson_gxl_periphs_groups[] = {
GPIO_GROUP(GPIO_TEST_N),
/* Bank X */
+ GROUP(i2c_sda_d, 5, 5),
+ GROUP(i2c_sck_d, 5, 4),
GROUP(sdio_d0, 5, 31),
GROUP(sdio_d1, 5, 30),
GROUP(sdio_d2, 5, 29),
@@ -651,6 +656,10 @@ static const char * const i2c_c_groups[] = {
"i2c_sck_c", "i2c_sda_c", "i2c_sda_c_dv18", "i2c_sck_c_dv19",
};
+static const char * const i2c_d_groups[] = {
+ "i2c_sck_d", "i2c_sda_d",
+};
+
static const char * const eth_groups[] = {
"eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv",
"eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3",
@@ -777,6 +786,7 @@ static const struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(i2c_a),
FUNCTION(i2c_b),
FUNCTION(i2c_c),
+ FUNCTION(i2c_d),
FUNCTION(eth),
FUNCTION(pwm_a),
FUNCTION(pwm_b),
diff --git a/drivers/pinctrl/pinctrl-eic7700.c b/drivers/pinctrl/pinctrl-eic7700.c
index 4874b5532343..ffcd0ec5c2dc 100644
--- a/drivers/pinctrl/pinctrl-eic7700.c
+++ b/drivers/pinctrl/pinctrl-eic7700.c
@@ -634,7 +634,7 @@ static int eic7700_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(pc->base);
regulator = devm_regulator_get(dev, "vrgmii");
- if (IS_ERR_OR_NULL(regulator)) {
+ if (IS_ERR(regulator)) {
return dev_err_probe(dev, PTR_ERR(regulator),
"failed to get vrgmii regulator\n");
}
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 79814758a084..07a478b2c487 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -337,7 +337,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
while (selector < nfuncs) {
const char *fname = ops->get_function_name(pctldev, selector);
- if (!strcmp(function, fname))
+ if (fname && !strcmp(function, fname))
return selector;
selector++;
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index c52263c2a7b0..22bc5b8f65fd 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -1124,7 +1124,7 @@ static u32 rzg3s_oen_read(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
bit = rzg3s_pin_to_oen_bit(pctrl, _pin);
if (bit < 0)
- return bit;
+ return 0;
return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
}
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index 29d16c9c1bd1..3a742f74ecd1 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -726,7 +726,8 @@ static int sh_pfc_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
const unsigned int *pins;
unsigned int num_pins;
- unsigned int i, ret;
+ unsigned int i;
+ int ret;
pins = pmx->pfc->info->groups[group].pins;
num_pins = pmx->pfc->info->groups[group].nr_pins;
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index f63c3c410451..382dff8805c6 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -702,8 +702,7 @@ static int cw_bat_probe(struct i2c_client *client)
if (!cw_bat->battery_workqueue)
return -ENOMEM;
- devm_delayed_work_autocancel(&client->dev,
- &cw_bat->battery_delay_work, cw_bat_work);
+ devm_delayed_work_autocancel(&client->dev, &cw_bat->battery_delay_work, cw_bat_work);
queue_delayed_work(cw_bat->battery_workqueue,
&cw_bat->battery_delay_work, msecs_to_jiffies(10));
return 0;
diff --git a/drivers/power/supply/max77705_charger.c b/drivers/power/supply/max77705_charger.c
index 329b430d0e50..a8762bdd2c7c 100644
--- a/drivers/power/supply/max77705_charger.c
+++ b/drivers/power/supply/max77705_charger.c
@@ -40,13 +40,13 @@ static enum power_supply_property max77705_charger_props[] = {
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
};
-static int max77705_chgin_irq(void *irq_drv_data)
+static irqreturn_t max77705_chgin_irq(int irq, void *irq_drv_data)
{
- struct max77705_charger_data *charger = irq_drv_data;
+ struct max77705_charger_data *chg = irq_drv_data;
- queue_work(charger->wqueue, &charger->chgin_work);
+ queue_work(chg->wqueue, &chg->chgin_work);
- return 0;
+ return IRQ_HANDLED;
}
static const struct regmap_irq max77705_charger_irqs[] = {
@@ -64,7 +64,6 @@ static struct regmap_irq_chip max77705_charger_irq_chip = {
.name = "max77705-charger",
.status_base = MAX77705_CHG_REG_INT,
.mask_base = MAX77705_CHG_REG_INT_MASK,
- .handle_post_irq = max77705_chgin_irq,
.num_regs = 1,
.irqs = max77705_charger_irqs,
.num_irqs = ARRAY_SIZE(max77705_charger_irqs),
@@ -74,8 +73,7 @@ static int max77705_charger_enable(struct max77705_charger_data *chg)
{
int rv;
- rv = regmap_update_bits(chg->regmap, MAX77705_CHG_REG_CNFG_09,
- MAX77705_CHG_EN_MASK, MAX77705_CHG_EN_MASK);
+ rv = regmap_field_write(chg->rfield[MAX77705_CHG_EN], 1);
if (rv)
dev_err(chg->dev, "unable to enable the charger: %d\n", rv);
@@ -87,10 +85,7 @@ static void max77705_charger_disable(void *data)
struct max77705_charger_data *chg = data;
int rv;
- rv = regmap_update_bits(chg->regmap,
- MAX77705_CHG_REG_CNFG_09,
- MAX77705_CHG_EN_MASK,
- MAX77705_CHG_DISABLE);
+ rv = regmap_field_write(chg->rfield[MAX77705_CHG_EN], MAX77705_CHG_DISABLE);
if (rv)
dev_err(chg->dev, "unable to disable the charger: %d\n", rv);
}
@@ -109,19 +104,19 @@ static int max77705_get_online(struct regmap *regmap, int *val)
return 0;
}
-static int max77705_check_battery(struct max77705_charger_data *charger, int *val)
+static int max77705_check_battery(struct max77705_charger_data *chg, int *val)
{
unsigned int reg_data;
unsigned int reg_data2;
- struct regmap *regmap = charger->regmap;
+ struct regmap *regmap = chg->regmap;
regmap_read(regmap, MAX77705_CHG_REG_INT_OK, ®_data);
- dev_dbg(charger->dev, "CHG_INT_OK(0x%x)\n", reg_data);
+ dev_dbg(chg->dev, "CHG_INT_OK(0x%x)\n", reg_data);
regmap_read(regmap, MAX77705_CHG_REG_DETAILS_00, ®_data2);
- dev_dbg(charger->dev, "CHG_DETAILS00(0x%x)\n", reg_data2);
+ dev_dbg(chg->dev, "CHG_DETAILS00(0x%x)\n", reg_data2);
if ((reg_data & MAX77705_BATP_OK) || !(reg_data2 & MAX77705_BATP_DTLS))
*val = true;
@@ -131,13 +126,13 @@ static int max77705_check_battery(struct max77705_charger_data *charger, int *va
return 0;
}
-static int max77705_get_charge_type(struct max77705_charger_data *charger, int *val)
+static int max77705_get_charge_type(struct max77705_charger_data *chg, int *val)
{
- struct regmap *regmap = charger->regmap;
- unsigned int reg_data;
+ struct regmap *regmap = chg->regmap;
+ unsigned int reg_data, chg_en;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, ®_data);
- if (!MAX77705_CHARGER_CHG_CHARGING(reg_data)) {
+ regmap_field_read(chg->rfield[MAX77705_CHG_EN], &chg_en);
+ if (!chg_en) {
*val = POWER_SUPPLY_CHARGE_TYPE_NONE;
return 0;
}
@@ -159,13 +154,13 @@ static int max77705_get_charge_type(struct max77705_charger_data *charger, int *
return 0;
}
-static int max77705_get_status(struct max77705_charger_data *charger, int *val)
+static int max77705_get_status(struct max77705_charger_data *chg, int *val)
{
- struct regmap *regmap = charger->regmap;
- unsigned int reg_data;
+ struct regmap *regmap = chg->regmap;
+ unsigned int reg_data, chg_en;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, ®_data);
- if (!MAX77705_CHARGER_CHG_CHARGING(reg_data)) {
+ regmap_field_read(chg->rfield[MAX77705_CHG_EN], &chg_en);
+ if (!chg_en) {
*val = POWER_SUPPLY_CHARGE_TYPE_NONE;
return 0;
}
@@ -234,10 +229,10 @@ static int max77705_get_vbus_state(struct regmap *regmap, int *value)
return 0;
}
-static int max77705_get_battery_health(struct max77705_charger_data *charger,
+static int max77705_get_battery_health(struct max77705_charger_data *chg,
int *value)
{
- struct regmap *regmap = charger->regmap;
+ struct regmap *regmap = chg->regmap;
unsigned int bat_dtls;
regmap_read(regmap, MAX77705_CHG_REG_DETAILS_01, &bat_dtls);
@@ -245,16 +240,16 @@ static int max77705_get_battery_health(struct max77705_charger_data *charger,
switch (bat_dtls) {
case MAX77705_BATTERY_NOBAT:
- dev_dbg(charger->dev, "%s: No battery and the charger is suspended\n",
+ dev_dbg(chg->dev, "%s: No battery and the chg is suspended\n",
__func__);
*value = POWER_SUPPLY_HEALTH_NO_BATTERY;
break;
case MAX77705_BATTERY_PREQUALIFICATION:
- dev_dbg(charger->dev, "%s: battery is okay but its voltage is low(~VPQLB)\n",
+ dev_dbg(chg->dev, "%s: battery is okay but its voltage is low(~VPQLB)\n",
__func__);
break;
case MAX77705_BATTERY_DEAD:
- dev_dbg(charger->dev, "%s: battery dead\n", __func__);
+ dev_dbg(chg->dev, "%s: battery dead\n", __func__);
*value = POWER_SUPPLY_HEALTH_DEAD;
break;
case MAX77705_BATTERY_GOOD:
@@ -262,11 +257,11 @@ static int max77705_get_battery_health(struct max77705_charger_data *charger,
*value = POWER_SUPPLY_HEALTH_GOOD;
break;
case MAX77705_BATTERY_OVERVOLTAGE:
- dev_dbg(charger->dev, "%s: battery ovp\n", __func__);
+ dev_dbg(chg->dev, "%s: battery ovp\n", __func__);
*value = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
break;
default:
- dev_dbg(charger->dev, "%s: battery unknown\n", __func__);
+ dev_dbg(chg->dev, "%s: battery unknown\n", __func__);
*value = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
break;
}
@@ -274,9 +269,9 @@ static int max77705_get_battery_health(struct max77705_charger_data *charger,
return 0;
}
-static int max77705_get_health(struct max77705_charger_data *charger, int *val)
+static int max77705_get_health(struct max77705_charger_data *chg, int *val)
{
- struct regmap *regmap = charger->regmap;
+ struct regmap *regmap = chg->regmap;
int ret, is_online = 0;
ret = max77705_get_online(regmap, &is_online);
@@ -287,24 +282,19 @@ static int max77705_get_health(struct max77705_charger_data *charger, int *val)
if (ret || (*val != POWER_SUPPLY_HEALTH_GOOD))
return ret;
}
- return max77705_get_battery_health(charger, val);
+ return max77705_get_battery_health(chg, val);
}
-static int max77705_get_input_current(struct max77705_charger_data *charger,
+static int max77705_get_input_current(struct max77705_charger_data *chg,
int *val)
{
unsigned int reg_data;
int get_current = 0;
- struct regmap *regmap = charger->regmap;
-
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, ®_data);
- reg_data &= MAX77705_CHG_CHGIN_LIM_MASK;
+ regmap_field_read(chg->rfield[MAX77705_CHG_CHGIN_LIM], ®_data);
if (reg_data <= 3)
get_current = MAX77705_CURRENT_CHGIN_MIN;
- else if (reg_data >= MAX77705_CHG_CHGIN_LIM_MASK)
- get_current = MAX77705_CURRENT_CHGIN_MAX;
else
get_current = (reg_data + 1) * MAX77705_CURRENT_CHGIN_STEP;
@@ -313,26 +303,23 @@ static int max77705_get_input_current(struct max77705_charger_data *charger,
return 0;
}
-static int max77705_get_charge_current(struct max77705_charger_data *charger,
+static int max77705_get_charge_current(struct max77705_charger_data *chg,
int *val)
{
unsigned int reg_data;
- struct regmap *regmap = charger->regmap;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_02, ®_data);
- reg_data &= MAX77705_CHG_CC;
+ regmap_field_read(chg->rfield[MAX77705_CHG_CC_LIM], ®_data);
*val = reg_data <= 0x2 ? MAX77705_CURRENT_CHGIN_MIN : reg_data * MAX77705_CURRENT_CHG_STEP;
return 0;
}
-static int max77705_set_float_voltage(struct max77705_charger_data *charger,
+static int max77705_set_float_voltage(struct max77705_charger_data *chg,
int float_voltage)
{
int float_voltage_mv;
unsigned int reg_data = 0;
- struct regmap *regmap = charger->regmap;
float_voltage_mv = float_voltage / 1000;
reg_data = float_voltage_mv <= 4000 ? 0x0 :
@@ -340,20 +327,16 @@ static int max77705_set_float_voltage(struct max77705_charger_data *charger,
(float_voltage_mv <= 4200) ? (float_voltage_mv - 4000) / 50 :
(((float_voltage_mv - 4200) / 10) + 0x04);
- return regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_04,
- MAX77705_CHG_CV_PRM_MASK,
- (reg_data << MAX77705_CHG_CV_PRM_SHIFT));
+ return regmap_field_write(chg->rfield[MAX77705_CHG_CV_PRM], reg_data);
}
-static int max77705_get_float_voltage(struct max77705_charger_data *charger,
+static int max77705_get_float_voltage(struct max77705_charger_data *chg,
int *val)
{
unsigned int reg_data = 0;
int voltage_mv;
- struct regmap *regmap = charger->regmap;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_04, ®_data);
- reg_data &= MAX77705_CHG_PRM_MASK;
+ regmap_field_read(chg->rfield[MAX77705_CHG_CV_PRM], ®_data);
voltage_mv = reg_data <= 0x04 ? reg_data * 50 + 4000 :
(reg_data - 4) * 10 + 4200;
*val = voltage_mv * 1000;
@@ -365,28 +348,28 @@ static int max77705_chg_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct max77705_charger_data *charger = power_supply_get_drvdata(psy);
- struct regmap *regmap = charger->regmap;
+ struct max77705_charger_data *chg = power_supply_get_drvdata(psy);
+ struct regmap *regmap = chg->regmap;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
return max77705_get_online(regmap, &val->intval);
case POWER_SUPPLY_PROP_PRESENT:
- return max77705_check_battery(charger, &val->intval);
+ return max77705_check_battery(chg, &val->intval);
case POWER_SUPPLY_PROP_STATUS:
- return max77705_get_status(charger, &val->intval);
+ return max77705_get_status(chg, &val->intval);
case POWER_SUPPLY_PROP_CHARGE_TYPE:
- return max77705_get_charge_type(charger, &val->intval);
+ return max77705_get_charge_type(chg, &val->intval);
case POWER_SUPPLY_PROP_HEALTH:
- return max77705_get_health(charger, &val->intval);
+ return max77705_get_health(chg, &val->intval);
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
- return max77705_get_input_current(charger, &val->intval);
+ return max77705_get_input_current(chg, &val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- return max77705_get_charge_current(charger, &val->intval);
+ return max77705_get_charge_current(chg, &val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
- return max77705_get_float_voltage(charger, &val->intval);
+ return max77705_get_float_voltage(chg, &val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = charger->bat_info->voltage_max_design_uv;
+ val->intval = chg->bat_info->voltage_max_design_uv;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = max77705_charger_model;
@@ -410,15 +393,14 @@ static const struct power_supply_desc max77705_charger_psy_desc = {
static void max77705_chgin_isr_work(struct work_struct *work)
{
- struct max77705_charger_data *charger =
+ struct max77705_charger_data *chg =
container_of(work, struct max77705_charger_data, chgin_work);
- power_supply_changed(charger->psy_chg);
+ power_supply_changed(chg->psy_chg);
}
static void max77705_charger_initialize(struct max77705_charger_data *chg)
{
- u8 reg_data;
struct power_supply_battery_info *info;
struct regmap *regmap = chg->regmap;
@@ -429,45 +411,31 @@ static void max77705_charger_initialize(struct max77705_charger_data *chg)
/* unlock charger setting protect */
/* slowest LX slope */
- reg_data = MAX77705_CHGPROT_MASK | MAX77705_SLOWEST_LX_SLOPE;
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_06, reg_data,
- reg_data);
+ regmap_field_write(chg->rfield[MAX77705_CHGPROT], MAX77705_CHGPROT_UNLOCKED);
+ regmap_field_write(chg->rfield[MAX77705_LX_SLOPE], MAX77705_SLOWEST_LX_SLOPE);
/* fast charge timer disable */
/* restart threshold disable */
/* pre-qual charge disable */
- reg_data = (MAX77705_FCHGTIME_DISABLE << MAX77705_FCHGTIME_SHIFT) |
- (MAX77705_CHG_RSTRT_DISABLE << MAX77705_CHG_RSTRT_SHIFT) |
- (MAX77705_CHG_PQEN_DISABLE << MAX77705_PQEN_SHIFT);
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_01,
- (MAX77705_FCHGTIME_MASK |
- MAX77705_CHG_RSTRT_MASK |
- MAX77705_PQEN_MASK),
- reg_data);
-
- /* OTG off(UNO on), boost off */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_00,
- MAX77705_OTG_CTRL, 0);
+ regmap_field_write(chg->rfield[MAX77705_FCHGTIME], MAX77705_FCHGTIME_DISABLE);
+ regmap_field_write(chg->rfield[MAX77705_CHG_RSTRT], MAX77705_CHG_RSTRT_DISABLE);
+ regmap_field_write(chg->rfield[MAX77705_CHG_PQEN], MAX77705_CHG_PQEN_DISABLE);
+
+ regmap_field_write(chg->rfield[MAX77705_MODE],
+ MAX77705_CHG_MASK | MAX77705_BUCK_MASK);
/* charge current 450mA(default) */
/* otg current limit 900mA */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_02,
- MAX77705_OTG_ILIM_MASK,
- MAX77705_OTG_ILIM_900 << MAX77705_OTG_ILIM_SHIFT);
+ regmap_field_write(chg->rfield[MAX77705_OTG_ILIM], MAX77705_OTG_ILIM_900);
/* BAT to SYS OCP 4.80A */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_05,
- MAX77705_REG_B2SOVRC_MASK,
- MAX77705_B2SOVRC_4_8A << MAX77705_REG_B2SOVRC_SHIFT);
+ regmap_field_write(chg->rfield[MAX77705_REG_B2SOVRC], MAX77705_B2SOVRC_4_8A);
+
/* top off current 150mA */
/* top off timer 30min */
- reg_data = (MAX77705_TO_ITH_150MA << MAX77705_TO_ITH_SHIFT) |
- (MAX77705_TO_TIME_30M << MAX77705_TO_TIME_SHIFT) |
- (MAX77705_SYS_TRACK_DISABLE << MAX77705_SYS_TRACK_DIS_SHIFT);
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_03,
- (MAX77705_TO_ITH_MASK |
- MAX77705_TO_TIME_MASK |
- MAX77705_SYS_TRACK_DIS_MASK), reg_data);
+ regmap_field_write(chg->rfield[MAX77705_TO], MAX77705_TO_ITH_150MA);
+ regmap_field_write(chg->rfield[MAX77705_TO_TIME], MAX77705_TO_TIME_30M);
+ regmap_field_write(chg->rfield[MAX77705_SYS_TRACK], MAX77705_SYS_TRACK_DISABLE);
/* cv voltage 4.2V or 4.35V */
/* MINVSYS 3.6V(default) */
@@ -478,28 +446,21 @@ static void max77705_charger_initialize(struct max77705_charger_data *chg)
max77705_set_float_voltage(chg, info->voltage_max_design_uv);
}
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12,
- MAX77705_VCHGIN_REG_MASK, MAX77705_VCHGIN_4_5);
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12,
- MAX77705_WCIN_REG_MASK, MAX77705_WCIN_4_5);
+ regmap_field_write(chg->rfield[MAX77705_VCHGIN], MAX77705_VCHGIN_4_5);
+ regmap_field_write(chg->rfield[MAX77705_WCIN], MAX77705_WCIN_4_5);
/* Watchdog timer */
regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_00,
MAX77705_WDTEN_MASK, 0);
- /* Active Discharge Enable */
- regmap_update_bits(regmap, MAX77705_PMIC_REG_MAINCTRL1, 1, 1);
-
/* VBYPSET=5.0V */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_11, MAX77705_VBYPSET_MASK, 0);
+ regmap_field_write(chg->rfield[MAX77705_VBYPSET], 0);
/* Switching Frequency : 1.5MHz */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_08, MAX77705_REG_FSW_MASK,
- (MAX77705_CHG_FSW_1_5MHz << MAX77705_REG_FSW_SHIFT));
+ regmap_field_write(chg->rfield[MAX77705_REG_FSW], MAX77705_CHG_FSW_1_5MHz);
/* Auto skip mode */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12, MAX77705_REG_DISKIP_MASK,
- (MAX77705_AUTO_SKIP << MAX77705_REG_DISKIP_SHIFT));
+ regmap_field_write(chg->rfield[MAX77705_REG_DISKIP], MAX77705_AUTO_SKIP);
}
static int max77705_charger_probe(struct i2c_client *i2c)
@@ -523,11 +484,13 @@ static int max77705_charger_probe(struct i2c_client *i2c)
if (IS_ERR(chg->regmap))
return PTR_ERR(chg->regmap);
- ret = regmap_update_bits(chg->regmap,
- MAX77705_CHG_REG_INT_MASK,
- MAX77705_CHGIN_IM, 0);
- if (ret)
- return ret;
+ for (int i = 0; i < MAX77705_N_REGMAP_FIELDS; i++) {
+ chg->rfield[i] = devm_regmap_field_alloc(dev, chg->regmap,
+ max77705_reg_field[i]);
+ if (IS_ERR(chg->rfield[i]))
+ return dev_err_probe(dev, PTR_ERR(chg->rfield[i]),
+ "cannot allocate regmap field\n");
+ }
pscfg.fwnode = dev_fwnode(dev);
pscfg.drv_data = chg;
@@ -538,7 +501,7 @@ static int max77705_charger_probe(struct i2c_client *i2c)
max77705_charger_irq_chip.irq_drv_data = chg;
ret = devm_regmap_add_irq_chip(chg->dev, chg->regmap, i2c->irq,
- IRQF_ONESHOT | IRQF_SHARED, 0,
+ IRQF_ONESHOT, 0,
&max77705_charger_irq_chip,
&irq_data);
if (ret)
@@ -556,6 +519,15 @@ static int max77705_charger_probe(struct i2c_client *i2c)
max77705_charger_initialize(chg);
+ ret = devm_request_threaded_irq(dev, regmap_irq_get_virq(irq_data, MAX77705_CHGIN_I),
+ NULL, max77705_chgin_irq,
+ IRQF_TRIGGER_NONE,
+ "chgin-irq", chg);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to Request chgin IRQ\n");
+ goto destroy_wq;
+ }
+
ret = max77705_charger_enable(chg);
if (ret) {
dev_err_probe(dev, ret, "failed to enable charge\n");
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index 92d1b62ea239..e9389876229e 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -109,16 +109,13 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
if (err < 0) {
pr_err("%s: unable to create char device\n",
info->name);
- goto kfree_pps;
+ goto pps_register_source_exit;
}
dev_dbg(&pps->dev, "new PPS source %s\n", info->name);
return pps;
-kfree_pps:
- kfree(pps);
-
pps_register_source_exit:
pr_err("%s: unable to register source\n", info->name);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 9463232af8d2..c6b8b6478276 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -374,6 +374,7 @@ int pps_register_cdev(struct pps_device *pps)
pps->info.name);
err = -EBUSY;
}
+ kfree(pps);
goto out_unlock;
}
pps->id = err;
@@ -383,13 +384,11 @@ int pps_register_cdev(struct pps_device *pps)
pps->dev.devt = MKDEV(pps_major, pps->id);
dev_set_drvdata(&pps->dev, pps);
dev_set_name(&pps->dev, "pps%d", pps->id);
+ pps->dev.release = pps_device_destruct;
err = device_register(&pps->dev);
if (err)
goto free_idr;
- /* Override the release function with our own */
- pps->dev.release = pps_device_destruct;
-
pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major,
pps->id);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index b352df4cd3f9..f329263f33aa 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -22,6 +22,7 @@
#define PTP_MAX_TIMESTAMPS 128
#define PTP_BUF_TIMESTAMPS 30
#define PTP_DEFAULT_MAX_VCLOCKS 20
+#define PTP_MAX_VCLOCKS_LIMIT (KMALLOC_MAX_SIZE/(sizeof(int)))
#define PTP_MAX_CHANNELS 2048
enum {
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 6b1b8f57cd95..200eaf500696 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -284,7 +284,7 @@ static ssize_t max_vclocks_store(struct device *dev,
size_t size;
u32 max;
- if (kstrtou32(buf, 0, &max) || max == 0)
+ if (kstrtou32(buf, 0, &max) || max == 0 || max > PTP_MAX_VCLOCKS_LIMIT)
return -EINVAL;
if (max == ptp->max_vclocks)
diff --git a/drivers/pwm/pwm-loongson.c b/drivers/pwm/pwm-loongson.c
index 1ba16168cbb4..31a57edecfd0 100644
--- a/drivers/pwm/pwm-loongson.c
+++ b/drivers/pwm/pwm-loongson.c
@@ -49,7 +49,7 @@
#define LOONGSON_PWM_CTRL_REG_DZONE BIT(10) /* Anti-dead Zone Enable Bit */
/* default input clk frequency for the ACPI case */
-#define LOONGSON_PWM_FREQ_DEFAULT 50000 /* Hz */
+#define LOONGSON_PWM_FREQ_DEFAULT 50000000 /* Hz */
struct pwm_loongson_ddata {
struct clk *clk;
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 0125e73b98df..7a86cb090f76 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -36,7 +36,7 @@
#define CLKDIV_MAX 7
#define HSPCLKDIV_MAX 7
-#define PERIOD_MAX 0xFFFF
+#define PERIOD_MAX 0x10000
/* compare module registers */
#define CMPA 0x12
@@ -65,14 +65,10 @@
#define AQCTL_ZRO_FRCHIGH BIT(1)
#define AQCTL_ZRO_FRCTOGGLE (BIT(1) | BIT(0))
-#define AQCTL_CHANA_POLNORMAL (AQCTL_CAU_FRCLOW | AQCTL_PRD_FRCHIGH | \
- AQCTL_ZRO_FRCHIGH)
-#define AQCTL_CHANA_POLINVERSED (AQCTL_CAU_FRCHIGH | AQCTL_PRD_FRCLOW | \
- AQCTL_ZRO_FRCLOW)
-#define AQCTL_CHANB_POLNORMAL (AQCTL_CBU_FRCLOW | AQCTL_PRD_FRCHIGH | \
- AQCTL_ZRO_FRCHIGH)
-#define AQCTL_CHANB_POLINVERSED (AQCTL_CBU_FRCHIGH | AQCTL_PRD_FRCLOW | \
- AQCTL_ZRO_FRCLOW)
+#define AQCTL_CHANA_POLNORMAL (AQCTL_CAU_FRCLOW | AQCTL_ZRO_FRCHIGH)
+#define AQCTL_CHANA_POLINVERSED (AQCTL_CAU_FRCHIGH | AQCTL_ZRO_FRCLOW)
+#define AQCTL_CHANB_POLNORMAL (AQCTL_CBU_FRCLOW | AQCTL_ZRO_FRCHIGH)
+#define AQCTL_CHANB_POLINVERSED (AQCTL_CBU_FRCHIGH | AQCTL_ZRO_FRCLOW)
#define AQSFRC_RLDCSF_MASK (BIT(7) | BIT(6))
#define AQSFRC_RLDCSF_ZRO 0
@@ -108,7 +104,6 @@ struct ehrpwm_pwm_chip {
unsigned long clk_rate;
void __iomem *mmio_base;
unsigned long period_cycles[NUM_PWM_CHANNEL];
- enum pwm_polarity polarity[NUM_PWM_CHANNEL];
struct clk *tbclk;
struct ehrpwm_context ctx;
};
@@ -166,7 +161,7 @@ static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div,
*prescale_div = (1 << clkdiv) *
(hspclkdiv ? (hspclkdiv * 2) : 1);
- if (*prescale_div > rqst_prescaler) {
+ if (*prescale_div >= rqst_prescaler) {
*tb_clk_div = (clkdiv << TBCTL_CLKDIV_SHIFT) |
(hspclkdiv << TBCTL_HSPCLKDIV_SHIFT);
return 0;
@@ -177,51 +172,20 @@ static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div,
return 1;
}
-static void configure_polarity(struct ehrpwm_pwm_chip *pc, int chan)
-{
- u16 aqctl_val, aqctl_mask;
- unsigned int aqctl_reg;
-
- /*
- * Configure PWM output to HIGH/LOW level on counter
- * reaches compare register value and LOW/HIGH level
- * on counter value reaches period register value and
- * zero value on counter
- */
- if (chan == 1) {
- aqctl_reg = AQCTLB;
- aqctl_mask = AQCTL_CBU_MASK;
-
- if (pc->polarity[chan] == PWM_POLARITY_INVERSED)
- aqctl_val = AQCTL_CHANB_POLINVERSED;
- else
- aqctl_val = AQCTL_CHANB_POLNORMAL;
- } else {
- aqctl_reg = AQCTLA;
- aqctl_mask = AQCTL_CAU_MASK;
-
- if (pc->polarity[chan] == PWM_POLARITY_INVERSED)
- aqctl_val = AQCTL_CHANA_POLINVERSED;
- else
- aqctl_val = AQCTL_CHANA_POLNORMAL;
- }
-
- aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK;
- ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val);
-}
-
/*
* period_ns = 10^9 * (ps_divval * period_cycles) / PWM_CLK_RATE
* duty_ns = 10^9 * (ps_divval * duty_cycles) / PWM_CLK_RATE
*/
static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- u64 duty_ns, u64 period_ns)
+ u64 duty_ns, u64 period_ns, enum pwm_polarity polarity)
{
struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
u32 period_cycles, duty_cycles;
u16 ps_divval, tb_divval;
unsigned int i, cmp_reg;
unsigned long long c;
+ u16 aqctl_val, aqctl_mask;
+ unsigned int aqctl_reg;
if (period_ns > NSEC_PER_SEC)
return -ERANGE;
@@ -231,15 +195,10 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
do_div(c, NSEC_PER_SEC);
period_cycles = (unsigned long)c;
- if (period_cycles < 1) {
- period_cycles = 1;
- duty_cycles = 1;
- } else {
- c = pc->clk_rate;
- c = c * duty_ns;
- do_div(c, NSEC_PER_SEC);
- duty_cycles = (unsigned long)c;
- }
+ c = pc->clk_rate;
+ c = c * duty_ns;
+ do_div(c, NSEC_PER_SEC);
+ duty_cycles = (unsigned long)c;
/*
* Period values should be same for multiple PWM channels as IP uses
@@ -265,52 +224,73 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
pc->period_cycles[pwm->hwpwm] = period_cycles;
/* Configure clock prescaler to support Low frequency PWM wave */
- if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval,
+ if (set_prescale_div(DIV_ROUND_UP(period_cycles, PERIOD_MAX), &ps_divval,
&tb_divval)) {
dev_err(pwmchip_parent(chip), "Unsupported values\n");
return -EINVAL;
}
- pm_runtime_get_sync(pwmchip_parent(chip));
-
- /* Update clock prescaler values */
- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval);
-
/* Update period & duty cycle with presacler division */
period_cycles = period_cycles / ps_divval;
duty_cycles = duty_cycles / ps_divval;
- /* Configure shadow loading on Period register */
- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW);
+ if (period_cycles < 1)
+ period_cycles = 1;
- ehrpwm_write(pc->mmio_base, TBPRD, period_cycles);
+ pm_runtime_get_sync(pwmchip_parent(chip));
- /* Configure ehrpwm counter for up-count mode */
- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK,
- TBCTL_CTRMODE_UP);
+ /* Update clock prescaler values */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval);
- if (pwm->hwpwm == 1)
+ if (pwm->hwpwm == 1) {
/* Channel 1 configured with compare B register */
cmp_reg = CMPB;
- else
+
+ aqctl_reg = AQCTLB;
+ aqctl_mask = AQCTL_CBU_MASK;
+
+ if (polarity == PWM_POLARITY_INVERSED)
+ aqctl_val = AQCTL_CHANB_POLINVERSED;
+ else
+ aqctl_val = AQCTL_CHANB_POLNORMAL;
+
+ /* if duty_cycle is big, don't toggle on CBU */
+ if (duty_cycles > period_cycles)
+ aqctl_val &= ~AQCTL_CBU_MASK;
+
+ } else {
/* Channel 0 configured with compare A register */
cmp_reg = CMPA;
- ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
+ aqctl_reg = AQCTLA;
+ aqctl_mask = AQCTL_CAU_MASK;
- pm_runtime_put_sync(pwmchip_parent(chip));
+ if (polarity == PWM_POLARITY_INVERSED)
+ aqctl_val = AQCTL_CHANA_POLINVERSED;
+ else
+ aqctl_val = AQCTL_CHANA_POLNORMAL;
- return 0;
-}
+ /* if duty_cycle is big, don't toggle on CAU */
+ if (duty_cycles > period_cycles)
+ aqctl_val &= ~AQCTL_CAU_MASK;
+ }
-static int ehrpwm_pwm_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+ aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK;
+ ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val);
+
+ /* Configure shadow loading on Period register */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW);
+
+ ehrpwm_write(pc->mmio_base, TBPRD, period_cycles - 1);
- /* Configuration of polarity in hardware delayed, do at enable */
- pc->polarity[pwm->hwpwm] = polarity;
+ /* Configure ehrpwm counter for up-count mode */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK,
+ TBCTL_CTRMODE_UP);
+
+ if (!(duty_cycles > period_cycles))
+ ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
+
+ pm_runtime_put_sync(pwmchip_parent(chip));
return 0;
}
@@ -339,9 +319,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
- /* Channels polarity can be configured from action qualifier module */
- configure_polarity(pc, pwm->hwpwm);
-
/* Enable TBCLK */
ret = clk_enable(pc->tbclk);
if (ret) {
@@ -391,12 +368,7 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
- if (pwm_is_enabled(pwm)) {
- dev_warn(pwmchip_parent(chip), "Removing PWM device without disabling\n");
- pm_runtime_put_sync(pwmchip_parent(chip));
- }
-
- /* set period value to zero on free */
+ /* Don't let a pwm without consumer block requests to the other channel */
pc->period_cycles[pwm->hwpwm] = 0;
}
@@ -411,10 +383,6 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ehrpwm_pwm_disable(chip, pwm);
enabled = false;
}
-
- err = ehrpwm_pwm_set_polarity(chip, pwm, state->polarity);
- if (err)
- return err;
}
if (!state->enabled) {
@@ -423,7 +391,7 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period, state->polarity);
if (err)
return err;
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
index 9df726f10ad1..6d609c42e479 100644
--- a/drivers/regulator/scmi-regulator.c
+++ b/drivers/regulator/scmi-regulator.c
@@ -257,7 +257,8 @@ static int process_scmi_regulator_of_node(struct scmi_device *sdev,
struct device_node *np,
struct scmi_regulator_info *rinfo)
{
- u32 dom, ret;
+ u32 dom;
+ int ret;
ret = of_property_read_u32(np, "reg", &dom);
if (ret)
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 842e4b6cc5f9..5e3eb7b86a0e 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -340,7 +340,7 @@ EXPORT_SYMBOL_GPL(pru_rproc_put);
*/
int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr)
{
- struct pru_rproc *pru = rproc->priv;
+ struct pru_rproc *pru;
unsigned int reg;
u32 mask, set;
u16 idx;
@@ -352,6 +352,7 @@ int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr)
if (!rproc->dev.parent || !is_pru_rproc(rproc->dev.parent))
return -ENODEV;
+ pru = rproc->priv;
/* pointer is 16 bit and index is 8-bit so mask out the rest */
idx_mask = (c >= PRU_C28) ? 0xFFFF : 0xFF;
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 4ee5e67a9f03..769c6d6d6a73 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -156,9 +156,6 @@ int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout)
int ret;
ret = wait_for_completion_timeout(&q6v5->start_done, timeout);
- if (!ret)
- disable_irq(q6v5->handover_irq);
-
return !ret ? -ETIMEDOUT : 0;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_wait_for_start);
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 0c0199fb0e68..3087d895b87f 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -498,6 +498,8 @@ static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
release_firmware(dp_fw);
}
+#define MSM8974_B00_OFFSET 0x1000
+
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5 *qproc = rproc->priv;
@@ -516,7 +518,14 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
return -EBUSY;
}
- memcpy(mba_region, fw->data, fw->size);
+ if ((qproc->version == MSS_MSM8974 ||
+ qproc->version == MSS_MSM8226 ||
+ qproc->version == MSS_MSM8926) &&
+ fw->size > MSM8974_B00_OFFSET &&
+ !memcmp(fw->data, ELFMAG, SELFMAG))
+ memcpy(mba_region, fw->data + MSM8974_B00_OFFSET, fw->size - MSM8974_B00_OFFSET);
+ else
+ memcpy(mba_region, fw->data, fw->size);
q6v5_debug_policy_load(qproc, mba_region);
memunmap(mba_region);
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 02e29171cbbe..f3ec5b06261e 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -42,6 +42,7 @@ struct qcom_pas_data {
int pas_id;
int dtb_pas_id;
int lite_pas_id;
+ int lite_dtb_pas_id;
unsigned int minidump_id;
bool auto_boot;
bool decrypt_shutdown;
@@ -80,6 +81,7 @@ struct qcom_pas {
int pas_id;
int dtb_pas_id;
int lite_pas_id;
+ int lite_dtb_pas_id;
unsigned int minidump_id;
int crash_reason_smem;
unsigned int smem_host_id;
@@ -226,6 +228,8 @@ static int qcom_pas_load(struct rproc *rproc, const struct firmware *fw)
if (pas->lite_pas_id)
ret = qcom_scm_pas_shutdown(pas->lite_pas_id);
+ if (pas->lite_dtb_pas_id)
+ qcom_scm_pas_shutdown(pas->lite_dtb_pas_id);
if (pas->dtb_pas_id) {
ret = request_firmware(&pas->dtb_firmware, pas->dtb_firmware_name, pas->dev);
@@ -722,6 +726,7 @@ static int qcom_pas_probe(struct platform_device *pdev)
pas->minidump_id = desc->minidump_id;
pas->pas_id = desc->pas_id;
pas->lite_pas_id = desc->lite_pas_id;
+ pas->lite_dtb_pas_id = desc->lite_dtb_pas_id;
pas->info_name = desc->sysmon_name;
pas->smem_host_id = desc->smem_host_id;
pas->decrypt_shutdown = desc->decrypt_shutdown;
@@ -1085,6 +1090,7 @@ static const struct qcom_pas_data x1e80100_adsp_resource = {
.pas_id = 1,
.dtb_pas_id = 0x24,
.lite_pas_id = 0x1f,
+ .lite_dtb_pas_id = 0x29,
.minidump_id = 5,
.auto_boot = true,
.proxy_pd_names = (char*[]){
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 87c944d4b4f3..1cbe457b4e96 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1368,7 +1368,7 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->mbox_client.knows_txdone = true;
edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
if (IS_ERR(edge->mbox_chan)) {
- if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
+ if (PTR_ERR(edge->mbox_chan) != -ENOENT) {
ret = dev_err_probe(dev, PTR_ERR(edge->mbox_chan),
"failed to acquire IPC mailbox\n");
goto put_node;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 869b5d4db44c..d953225f6cc2 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1313,10 +1313,7 @@ static int sas_check_parent_topology(struct domain_device *child)
int i;
int res = 0;
- if (!child->parent)
- return 0;
-
- if (!dev_is_expander(child->parent->dev_type))
+ if (!dev_parent_is_expander(child))
return 0;
parent_ex = &child->parent->ex_dev;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index dc74ebc6405a..66fd301f03b0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -987,11 +987,9 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
list_for_each_entry_safe(mpt3sas_phy, next_phy,
&mpt3sas_port->phy_list, port_siblings) {
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
- dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
- "remove: sas_addr(0x%016llx), phy(%d)\n",
- (unsigned long long)
- mpt3sas_port->remote_identify.sas_address,
- mpt3sas_phy->phy_id);
+ ioc_info(ioc, "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long) mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
mpt3sas_phy->phy_belongs_to_port = 0;
if (!ioc->remove_host)
sas_port_delete_phy(mpt3sas_port->port,
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 95af3bb03834..a58abd796603 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -498,14 +498,14 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
/* Temporary dma mapping, used only in the scope of this function */
mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
&mbox_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, mbox_addr))
+ if (!mbox)
return false;
/* These are the base addresses for the command memory mailbox array */
cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
&cs->cmd_mbox_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
+ if (!cmd_mbox) {
dev_err(&pdev->dev, "Failed to map command mailbox\n");
goto out_free;
}
@@ -520,7 +520,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
&cs->stat_mbox_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
+ if (!stat_mbox) {
dev_err(&pdev->dev, "Failed to map status mailbox\n");
goto out_free;
}
@@ -533,7 +533,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
sizeof(struct myrs_fwstat),
&cs->fwstat_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
+ if (!cs->fwstat_buf) {
dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
cs->fwstat_buf = NULL;
goto out_free;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 42a4eeac24c9..8005995a317c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2163,8 +2163,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
- if (!((t->dev->parent) &&
- (dev_is_expander(t->dev->parent->dev_type)))) {
+ if (!dev_parent_is_expander(t->dev)) {
for (i = 0, j = 4; j <= 7 && i <= 3; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0, j = 0; j <= 3 && i <= 3; i++, j++)
@@ -4168,7 +4167,6 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 firstBurstSize = 0;
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
- struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
memset(&payload, 0, sizeof(payload));
@@ -4186,10 +4184,9 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
- phy_id = parent_dev->ex_dev.ex_phy->phy_id;
- else
- phy_id = pm8001_dev->attached_phy;
+
+ phy_id = pm80xx_get_local_phy_id(dev);
+
opc = OPC_INB_REG_DEV;
linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
pm8001_dev->sas_device->linkrate : dev->port->linkrate;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f7067878b34f..c5354263b45e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -130,6 +130,16 @@ static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
}
}
+u32 pm80xx_get_local_phy_id(struct domain_device *dev)
+{
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+
+ if (dev_parent_is_expander(dev))
+ return dev->parent->ex_dev.ex_phy->phy_id;
+
+ return pm8001_dev->attached_phy;
+}
+
void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *target_pm8001_dev)
{
@@ -477,7 +487,7 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
struct pm8001_device *pm8001_dev = dev->lldd_dev;
bool internal_abort = sas_is_internal_abort(task);
struct pm8001_hba_info *pm8001_ha;
- struct pm8001_port *port = NULL;
+ struct pm8001_port *port;
struct pm8001_ccb_info *ccb;
unsigned long flags;
u32 n_elem = 0;
@@ -502,8 +512,7 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
spin_lock_irqsave(&pm8001_ha->lock, flags);
- pm8001_dev = dev->lldd_dev;
- port = pm8001_ha->phy[pm8001_dev->attached_phy].port;
+ port = dev->port->lldd_port;
if (!internal_abort &&
(DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) {
@@ -701,7 +710,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
dev->lldd_dev = pm8001_device;
pm8001_device->dev_type = dev->dev_type;
pm8001_device->dcompletion = &completion;
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(dev)) {
int phy_id;
phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
@@ -766,7 +775,13 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
- pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
+
+ /*
+ * The phy array only contains local phys. Thus, we cannot clear
+ * phy_attached for a device behind an expander.
+ */
+ if (!dev_parent_is_expander(dev))
+ pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
pm8001_free_dev(pm8001_dev);
} else {
pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
@@ -1048,7 +1063,7 @@ int pm8001_abort_task(struct sas_task *task)
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev;
int rc = TMF_RESP_FUNC_FAILED, ret;
- u32 phy_id, port_id;
+ u32 port_id;
struct sas_task_slow slow_task;
if (!task->lldd_task || !task->dev)
@@ -1057,7 +1072,6 @@ int pm8001_abort_task(struct sas_task *task)
dev = task->dev;
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
- phy_id = pm8001_dev->attached_phy;
if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
// If the controller is seeing fatal errors
@@ -1089,7 +1103,8 @@ int pm8001_abort_task(struct sas_task *task)
if (pm8001_ha->chip_id == chip_8006) {
DECLARE_COMPLETION_ONSTACK(completion_reset);
DECLARE_COMPLETION_ONSTACK(completion);
- struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
+ u32 phy_id = pm80xx_get_local_phy_id(dev);
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
port_id = phy->port->port_id;
/* 1. Set Device state as Recovery */
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 334485bb2c12..91b2cdf3535c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -798,6 +798,7 @@ void pm8001_setds_completion(struct domain_device *dev);
void pm8001_tmf_aborted(struct sas_task *task);
void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *dev);
+u32 pm80xx_get_local_phy_id(struct domain_device *dev);
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index c1bae995a412..31960b72c1e9 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2340,8 +2340,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
- if (!((t->dev->parent) &&
- (dev_is_expander(t->dev->parent->dev_type)))) {
+ if (!dev_parent_is_expander(t->dev)) {
for (i = 0, j = 4; i <= 3 && j <= 7; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0, j = 0; i <= 3 && j <= 3; i++, j++)
@@ -4780,7 +4779,6 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 firstBurstSize = 0;
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
- struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
memset(&payload, 0, sizeof(payload));
@@ -4799,10 +4797,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
- phy_id = parent_dev->ex_dev.ex_phy->phy_id;
- else
- phy_id = pm8001_dev->attached_phy;
+
+ phy_id = pm80xx_get_local_phy_id(dev);
opc = OPC_INB_REG_DEV;
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 91bbd3b75bff..ccd4485087a1 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -1798,7 +1798,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
switch (rval) {
case QLA_SUCCESS:
break;
- case EAGAIN:
+ case -EAGAIN:
msleep(EDIF_MSLEEP_INTERVAL);
cnt++;
if (cnt < EDIF_RETRY_COUNT)
@@ -3649,7 +3649,7 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
p->e.extra_rx_xchg_address, p->e.extra_control_flags,
sp->handle, sp->remap.req.len, bsg_job);
break;
- case EAGAIN:
+ case -EAGAIN:
msleep(EDIF_MSLEEP_INTERVAL);
cnt++;
if (cnt < EDIF_RETRY_COUNT)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index be211ff22acb..6a2e1c7fd125 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2059,11 +2059,11 @@ static void qla_marker_sp_done(srb_t *sp, int res)
int cnt = 5; \
do { \
if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
- _rval = EINVAL; \
+ _rval = -EINVAL; \
break; \
} \
_rval = qla2x00_start_sp(_sp); \
- if (_rval == EAGAIN) \
+ if (_rval == -EAGAIN) \
msleep(1); \
else \
break; \
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 8ee2e337c9e1..316594aa40cc 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -419,7 +419,7 @@ static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
switch (rval) {
case QLA_SUCCESS:
break;
- case EAGAIN:
+ case -EAGAIN:
msleep(PURLS_MSLEEP_INTERVAL);
cnt++;
if (cnt < PURLS_RETRY_COUNT)
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index 7c349a94b45c..f45537546553 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -2165,10 +2165,18 @@ static struct device *svs_add_device_link(struct svs_platform *svsp,
return dev;
}
+static void svs_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
static int svs_mt8192_platform_probe(struct svs_platform *svsp)
{
struct device *dev;
u32 idx;
+ int ret;
svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst");
if (IS_ERR(svsp->rst))
@@ -2179,6 +2187,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
if (IS_ERR(dev))
return dev_err_probe(svsp->dev, PTR_ERR(dev),
"failed to get lvts device\n");
+ put_device(dev);
for (idx = 0; idx < svsp->bank_max; idx++) {
struct svs_bank *svsb = &svsp->banks[idx];
@@ -2188,6 +2197,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
case SVSB_SWID_CPU_LITTLE:
case SVSB_SWID_CPU_BIG:
svsb->opp_dev = get_cpu_device(bdata->cpu_id);
+ get_device(svsb->opp_dev);
break;
case SVSB_SWID_CCI:
svsb->opp_dev = svs_add_device_link(svsp, "cci");
@@ -2207,6 +2217,11 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
"failed to get OPP device for bank %d\n",
idx);
+
+ ret = devm_add_action_or_reset(svsp->dev, svs_put_device,
+ svsb->opp_dev);
+ if (ret)
+ return ret;
}
return 0;
@@ -2216,11 +2231,13 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
{
struct device *dev;
u32 idx;
+ int ret;
dev = svs_add_device_link(svsp, "thermal-sensor");
if (IS_ERR(dev))
return dev_err_probe(svsp->dev, PTR_ERR(dev),
"failed to get thermal device\n");
+ put_device(dev);
for (idx = 0; idx < svsp->bank_max; idx++) {
struct svs_bank *svsb = &svsp->banks[idx];
@@ -2230,6 +2247,7 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
case SVSB_SWID_CPU_LITTLE:
case SVSB_SWID_CPU_BIG:
svsb->opp_dev = get_cpu_device(bdata->cpu_id);
+ get_device(svsb->opp_dev);
break;
case SVSB_SWID_CCI:
svsb->opp_dev = svs_add_device_link(svsp, "cci");
@@ -2246,6 +2264,11 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
"failed to get OPP device for bank %d\n",
idx);
+
+ ret = devm_add_action_or_reset(svsp->dev, svs_put_device,
+ svsb->opp_dev);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index fdab2b1067db..c6f7d5c9c493 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -453,13 +453,10 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
trace_rpmh_tx_done(drv, i, req);
- /*
- * If wake tcs was re-purposed for sending active
- * votes, clear AMC trigger & enable modes and
+ /* Clear AMC trigger & enable modes and
* disable interrupt for this TCS
*/
- if (!drv->tcs[ACTIVE_TCS].num_tcs)
- __tcs_set_trigger(drv, i, false);
+ __tcs_set_trigger(drv, i, false);
skip:
/* Reclaim the TCS */
write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a388f372b27a..19c2a6eb9922 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2449,7 +2449,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
if (rc > ctlr->num_chipselect) {
dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
nc, rc);
- return rc;
+ return -EINVAL;
}
if ((of_property_present(nc, "parallel-memories")) &&
(!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
diff --git a/drivers/staging/media/ipu7/ipu7.c b/drivers/staging/media/ipu7/ipu7.c
index 1b4f01db13ca..ee6b63717ed3 100644
--- a/drivers/staging/media/ipu7/ipu7.c
+++ b/drivers/staging/media/ipu7/ipu7.c
@@ -2248,20 +2248,13 @@ void ipu7_dump_fw_error_log(const struct ipu7_bus_device *adev)
}
EXPORT_SYMBOL_NS_GPL(ipu7_dump_fw_error_log, "INTEL_IPU7");
-static int ipu7_pci_config_setup(struct pci_dev *dev)
+static void ipu7_pci_config_setup(struct pci_dev *dev)
{
u16 pci_command;
- int ret;
pci_read_config_word(dev, PCI_COMMAND, &pci_command);
pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
pci_write_config_word(dev, PCI_COMMAND, pci_command);
-
- ret = pci_enable_msi(dev);
- if (ret)
- dev_err(&dev->dev, "Failed to enable msi (%d)\n", ret);
-
- return ret;
}
static int ipu7_map_fw_code_region(struct ipu7_bus_device *sys,
@@ -2435,7 +2428,6 @@ static int ipu7_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!isp)
return -ENOMEM;
- dev_set_name(dev, "intel-ipu7");
isp->pdev = pdev;
INIT_LIST_HEAD(&isp->devices);
@@ -2510,13 +2502,15 @@ static int ipu7_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dma_set_max_seg_size(dev, UINT_MAX);
- ret = ipu7_pci_config_setup(pdev);
- if (ret)
- return ret;
+ ipu7_pci_config_setup(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to alloc irq vector\n");
ret = ipu_buttress_init(isp);
if (ret)
- return ret;
+ goto pci_irq_free;
dev_info(dev, "firmware cpd file: %s\n", isp->cpd_fw_name);
@@ -2632,6 +2626,8 @@ static int ipu7_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
release_firmware(isp->cpd_fw);
buttress_exit:
ipu_buttress_exit(isp);
+pci_irq_free:
+ pci_free_irq_vectors(pdev);
return ret;
}
@@ -2648,6 +2644,9 @@ static void ipu7_pci_remove(struct pci_dev *pdev)
if (!IS_ERR_OR_NULL(isp->fw_code_region))
vfree(isp->fw_code_region);
+ ipu7_mmu_cleanup(isp->isys->mmu);
+ ipu7_mmu_cleanup(isp->psys->mmu);
+
ipu7_bus_del_devices(pdev);
pm_runtime_forbid(&pdev->dev);
@@ -2656,9 +2655,6 @@ static void ipu7_pci_remove(struct pci_dev *pdev)
ipu_buttress_exit(isp);
release_firmware(isp->cpd_fw);
-
- ipu7_mmu_cleanup(isp->psys->mmu);
- ipu7_mmu_cleanup(isp->isys->mmu);
}
static void ipu7_pci_reset_prepare(struct pci_dev *pdev)
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 2a7d253d9c55..8e50476eb71f 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -321,6 +321,14 @@ register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
if (unlikely(len <= 0)) {
ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
goto err_free_shm_pages;
+ } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) {
+ /*
+ * If we only got a few pages, update to release the
+ * correct amount below.
+ */
+ shm->num_pages = len / PAGE_SIZE;
+ ret = ERR_PTR(-ENOMEM);
+ goto err_put_shm_pages;
}
/*
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 2c7f3f9a26eb..a6bb01082ec6 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -34,7 +34,8 @@ config QCOM_SPMI_TEMP_ALARM
config QCOM_LMH
tristate "Qualcomm Limits Management Hardware"
- depends on ARCH_QCOM && QCOM_SCM
+ depends on ARCH_QCOM || COMPILE_TEST
+ select QCOM_SCM
help
This enables initialization of Qualcomm limits management
hardware(LMh). LMh allows for hardware-enforced mitigation for cpus based on
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index 75eaa9a68ab8..c681a3c89ffa 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -5,6 +5,8 @@
*/
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/irqdomain.h>
#include <linux/err.h>
#include <linux/platform_device.h>
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index d52efe3f658c..8333fc7f3d55 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -1073,6 +1073,7 @@ static void tb_dp_dprx_work(struct work_struct *work)
if (tunnel->callback)
tunnel->callback(tunnel, tunnel->callback_data);
+ tb_tunnel_put(tunnel);
}
static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
@@ -1100,8 +1101,8 @@ static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
if (tunnel->dprx_started) {
tunnel->dprx_started = false;
tunnel->dprx_canceled = true;
- cancel_delayed_work(&tunnel->dprx_work);
- tb_tunnel_put(tunnel);
+ if (cancel_delayed_work(&tunnel->dprx_work))
+ tb_tunnel_put(tunnel);
}
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 7fc535452c0b..553d8c70352b 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -461,6 +461,7 @@ static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg);
static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr);
static void gsmld_write_trigger(struct gsm_mux *gsm);
static void gsmld_write_task(struct work_struct *work);
+static int gsm_modem_send_initial_msc(struct gsm_dlci *dlci);
/**
* gsm_fcs_add - update FCS
@@ -2174,7 +2175,7 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
pr_debug("DLCI %d goes open.\n", dlci->addr);
/* Send current modem state */
if (dlci->addr) {
- gsm_modem_update(dlci, 0);
+ gsm_modem_send_initial_msc(dlci);
} else {
/* Start keep-alive control */
gsm->ka_num = 0;
@@ -4161,6 +4162,28 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
return gsm_control_wait(dlci->gsm, ctrl);
}
+/**
+ * gsm_modem_send_initial_msc - Send initial modem status message
+ *
+ * @dlci channel
+ *
+ * Send an initial MSC message after DLCI open to set the initial
+ * modem status lines. This is only done for basic mode.
+ * Does not wait for a response as we cannot block the input queue
+ * processing.
+ */
+static int gsm_modem_send_initial_msc(struct gsm_dlci *dlci)
+{
+ u8 modembits[2];
+
+ if (dlci->adaption != 1 || dlci->gsm->encoding != GSM_BASIC_OPT)
+ return 0;
+
+ modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */
+ modembits[1] = (gsm_encode_modem(dlci) << 1) | EA;
+ return gsm_control_command(dlci->gsm, CMD_MSC, (const u8 *)&modembits, 2);
+}
+
/**
* gsm_modem_update - send modem status line state
* @dlci: channel
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index ce260e9949c3..d9a0100b92d2 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1644,6 +1644,8 @@ static int max310x_i2c_probe(struct i2c_client *client)
port_client = devm_i2c_new_dummy_device(&client->dev,
client->adapter,
port_addr);
+ if (IS_ERR(port_client))
+ return PTR_ERR(port_client);
regcfg_i2c.name = max310x_regmap_name(i);
regmaps[i] = devm_regmap_init_i2c(port_client, ®cfg_i2c);
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 4bd7d491e3c5..0086816b27cd 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -512,6 +512,8 @@ static ssize_t pm_qos_enable_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ guard(mutex)(&hba->pm_qos_mutex);
+
return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled);
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 9a43102b2b21..96a0f5fcc0e5 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1045,6 +1045,7 @@ EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
*/
void ufshcd_pm_qos_init(struct ufs_hba *hba)
{
+ guard(mutex)(&hba->pm_qos_mutex);
if (hba->pm_qos_enabled)
return;
@@ -1061,6 +1062,8 @@ void ufshcd_pm_qos_init(struct ufs_hba *hba)
*/
void ufshcd_pm_qos_exit(struct ufs_hba *hba)
{
+ guard(mutex)(&hba->pm_qos_mutex);
+
if (!hba->pm_qos_enabled)
return;
@@ -1075,6 +1078,8 @@ void ufshcd_pm_qos_exit(struct ufs_hba *hba)
*/
static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
{
+ guard(mutex)(&hba->pm_qos_mutex);
+
if (!hba->pm_qos_enabled)
return;
@@ -10669,6 +10674,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
spin_lock_init(&hba->clk_gating.lock);
+ /* Initialize mutex for PM QoS request synchronization */
+ mutex_init(&hba->pm_qos_mutex);
+
/*
* Set the default power management level for runtime and system PM.
* Host controller drivers can override them in their
@@ -10756,6 +10764,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
mutex_init(&hba->ee_ctrl_mutex);
mutex_init(&hba->wb_mutex);
+
init_rwsem(&hba->clk_scaling_lock);
ufshcd_init_clk_gating(hba);
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index f19efad4d6f8..3f8e2e27697f 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -111,7 +111,6 @@ static void hv_uio_channel_cb(void *context)
struct hv_device *hv_dev;
struct hv_uio_private_data *pdata;
- chan->inbound.ring_buffer->interrupt_mask = 1;
virt_mb();
/*
@@ -183,8 +182,6 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
return;
}
- /* Disable interrupts on sub channel */
- new_sc->inbound.ring_buffer->interrupt_mask = 1;
set_channel_read_mode(new_sc, HV_CALL_ISR);
ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap);
if (ret) {
@@ -227,9 +224,7 @@ hv_uio_open(struct uio_info *info, struct inode *inode)
ret = vmbus_connect_ring(dev->channel,
hv_uio_channel_cb, dev->channel);
- if (ret == 0)
- dev->channel->inbound.ring_buffer->interrupt_mask = 1;
- else
+ if (ret)
atomic_dec(&pdata->refcnt);
return ret;
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
index 8c361b8394e9..5e7b88ca8b96 100644
--- a/drivers/usb/cdns3/cdnsp-pci.c
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -85,7 +85,7 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL);
if (!cdnsp) {
ret = -ENOMEM;
- goto disable_pci;
+ goto put_pci;
}
}
@@ -168,9 +168,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
if (!pci_is_enabled(func))
kfree(cdnsp);
-disable_pci:
- pci_disable_device(pdev);
-
put_pci:
pci_dev_put(func);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index f94ea196ce54..6bcac85c5550 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1750,6 +1750,8 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
cdev->use_os_string = true;
cdev->b_vendor_code = gi->b_vendor_code;
memcpy(cdev->qw_sign, gi->qw_sign, OS_STRING_QW_SIGN_LEN);
+ } else {
+ cdev->use_os_string = false;
}
if (gadget_is_otg(gadget) && !otg_desc[0]) {
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index dcf31a592f5d..4b5f03f683f7 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1916,7 +1916,7 @@ max3421_probe(struct spi_device *spi)
if (hcd) {
kfree(max3421_hcd->tx);
kfree(max3421_hcd->rx);
- if (max3421_hcd->spi_thread)
+ if (!IS_ERR_OR_NULL(max3421_hcd->spi_thread))
kthread_stop(max3421_hcd->spi_thread);
usb_put_hcd(hcd);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 4f8f5aab109d..6309200e93dc 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1262,19 +1262,16 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
* Stopped state, but it will soon change to Running.
*
* Assume this bug on unexpected Stop Endpoint failures.
- * Keep retrying until the EP starts and stops again.
+ * Keep retrying until the EP starts and stops again, on
+ * chips where this is known to help. Wait for 100ms.
*/
+ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+ break;
fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
GET_EP_CTX_STATE(ep_ctx));
- /*
- * Don't retry forever if we guessed wrong or a defective HC never starts
- * the EP or says 'Running' but fails the command. We must give back TDs.
- */
- if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
- break;
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 6497c4e81e95..9bf8fc6247ba 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -147,6 +147,7 @@ config USB_APPLEDISPLAY
config USB_QCOM_EUD
tristate "QCOM Embedded USB Debugger(EUD) Driver"
depends on ARCH_QCOM || COMPILE_TEST
+ select QCOM_SCM
select USB_ROLE_SWITCH
help
This module enables support for Qualcomm Technologies, Inc.
diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c
index 83079c414b4f..05c8bdc943a8 100644
--- a/drivers/usb/misc/qcom_eud.c
+++ b/drivers/usb/misc/qcom_eud.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/usb/role.h>
+#include <linux/firmware/qcom/qcom_scm.h>
#define EUD_REG_INT1_EN_MASK 0x0024
#define EUD_REG_INT_STATUS_1 0x0044
@@ -34,7 +35,7 @@ struct eud_chip {
struct device *dev;
struct usb_role_switch *role_sw;
void __iomem *base;
- void __iomem *mode_mgr;
+ phys_addr_t mode_mgr;
unsigned int int_status;
int irq;
bool enabled;
@@ -43,18 +44,29 @@ struct eud_chip {
static int enable_eud(struct eud_chip *priv)
{
+ int ret;
+
+ ret = qcom_scm_io_writel(priv->mode_mgr + EUD_REG_EUD_EN2, 1);
+ if (ret)
+ return ret;
+
writel(EUD_ENABLE, priv->base + EUD_REG_CSR_EUD_EN);
writel(EUD_INT_VBUS | EUD_INT_SAFE_MODE,
priv->base + EUD_REG_INT1_EN_MASK);
- writel(1, priv->mode_mgr + EUD_REG_EUD_EN2);
return usb_role_switch_set_role(priv->role_sw, USB_ROLE_DEVICE);
}
-static void disable_eud(struct eud_chip *priv)
+static int disable_eud(struct eud_chip *priv)
{
+ int ret;
+
+ ret = qcom_scm_io_writel(priv->mode_mgr + EUD_REG_EUD_EN2, 0);
+ if (ret)
+ return ret;
+
writel(0, priv->base + EUD_REG_CSR_EUD_EN);
- writel(0, priv->mode_mgr + EUD_REG_EUD_EN2);
+ return 0;
}
static ssize_t enable_show(struct device *dev,
@@ -82,11 +94,12 @@ static ssize_t enable_store(struct device *dev,
chip->enabled = enable;
else
disable_eud(chip);
+
} else {
- disable_eud(chip);
+ ret = disable_eud(chip);
}
- return count;
+ return ret < 0 ? ret : count;
}
static DEVICE_ATTR_RW(enable);
@@ -178,6 +191,7 @@ static void eud_role_switch_release(void *data)
static int eud_probe(struct platform_device *pdev)
{
struct eud_chip *chip;
+ struct resource *res;
int ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
@@ -200,9 +214,10 @@ static int eud_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- chip->mode_mgr = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(chip->mode_mgr))
- return PTR_ERR(chip->mode_mgr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return -ENODEV;
+ chip->mode_mgr = res->start;
chip->irq = platform_get_irq(pdev, 0);
if (chip->irq < 0)
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 49d79c1257f3..8c09db750bfd 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -328,9 +328,8 @@ static int twl6030_set_vbus(struct phy_companion *comparator, bool enabled)
static int twl6030_usb_probe(struct platform_device *pdev)
{
- u32 ret;
struct twl6030_usb *twl;
- int status, err;
+ int status, err, ret;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index dcf141ada078..1c80296c3b27 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -545,24 +545,23 @@ static irqreturn_t cd321x_interrupt(int irq, void *data)
if (!event)
goto err_unlock;
+ tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event);
+
if (!tps6598x_read_status(tps, &status))
- goto err_clear_ints;
+ goto err_unlock;
if (event & APPLE_CD_REG_INT_POWER_STATUS_UPDATE)
if (!tps6598x_read_power_status(tps))
- goto err_clear_ints;
+ goto err_unlock;
if (event & APPLE_CD_REG_INT_DATA_STATUS_UPDATE)
if (!tps6598x_read_data_status(tps))
- goto err_clear_ints;
+ goto err_unlock;
/* Handle plug insert or removal */
if (event & APPLE_CD_REG_INT_PLUG_EVENT)
tps6598x_handle_plug_event(tps, status);
-err_clear_ints:
- tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event);
-
err_unlock:
mutex_unlock(&tps->lock);
@@ -668,25 +667,24 @@ static irqreturn_t tps6598x_interrupt(int irq, void *data)
if (!(event1[0] | event1[1] | event2[0] | event2[1]))
goto err_unlock;
+ tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len);
+ tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len);
+
if (!tps6598x_read_status(tps, &status))
- goto err_clear_ints;
+ goto err_unlock;
if ((event1[0] | event2[0]) & TPS_REG_INT_POWER_STATUS_UPDATE)
if (!tps6598x_read_power_status(tps))
- goto err_clear_ints;
+ goto err_unlock;
if ((event1[0] | event2[0]) & TPS_REG_INT_DATA_STATUS_UPDATE)
if (!tps6598x_read_data_status(tps))
- goto err_clear_ints;
+ goto err_unlock;
/* Handle plug insert or removal */
if ((event1[0] | event2[0]) & TPS_REG_INT_PLUG_EVENT)
tps6598x_handle_plug_event(tps, status);
-err_clear_ints:
- tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len);
- tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len);
-
err_unlock:
mutex_unlock(&tps->lock);
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index e70fba9f55d6..0d6c10a8490c 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -765,6 +765,17 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
ctrlreq->wValue, vdev->rhport);
vdev->udev = usb_get_dev(urb->dev);
+ /*
+ * NOTE: A similar operation has been done via
+ * USB_REQ_GET_DESCRIPTOR handler below, which is
+ * supposed to always precede USB_REQ_SET_ADDRESS.
+ *
+ * It's not entirely clear if operating on a different
+ * usb_device instance here is a real possibility,
+ * otherwise this call and vdev->udev assignment above
+ * should be dropped.
+ */
+ dev_pm_syscore_device(&vdev->udev->dev, true);
usb_put_dev(old);
spin_lock(&vdev->ud.lock);
@@ -785,6 +796,17 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
"Not yet?:Get_Descriptor to device 0 (get max pipe size)\n");
vdev->udev = usb_get_dev(urb->dev);
+ /*
+ * Set syscore PM flag for the virtually attached
+ * devices to ensure they will not enter suspend on
+ * the client side.
+ *
+ * Note this doesn't have any impact on the physical
+ * devices attached to the host system on the server
+ * side, hence there is no need to undo the operation
+ * on disconnect.
+ */
+ dev_pm_syscore_device(&vdev->udev->dev, true);
usb_put_dev(old);
goto out;
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 397f5e445136..fde33f54e99e 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -1612,8 +1612,10 @@ static void hisi_acc_vfio_debug_init(struct hisi_acc_vf_core_device *hisi_acc_vd
}
migf = kzalloc(sizeof(*migf), GFP_KERNEL);
- if (!migf)
+ if (!migf) {
+ dput(vfio_dev_migration);
return;
+ }
hisi_acc_vdev->debug_migf = migf;
vfio_hisi_acc = debugfs_create_dir("hisi_acc", vfio_dev_migration);
@@ -1623,6 +1625,8 @@ static void hisi_acc_vfio_debug_init(struct hisi_acc_vf_core_device *hisi_acc_vd
hisi_acc_vf_migf_read);
debugfs_create_devm_seqfile(dev, "cmd_state", vfio_hisi_acc,
hisi_acc_vf_debug_cmd);
+
+ dput(vfio_dev_migration);
}
static void hisi_acc_vf_debugfs_exit(struct hisi_acc_vf_core_device *hisi_acc_vdev)
diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c
index c51f5e4c3dd6..481992142f79 100644
--- a/drivers/vfio/pci/pds/dirty.c
+++ b/drivers/vfio/pci/pds/dirty.c
@@ -82,7 +82,7 @@ static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_region *region,
host_ack_bmp = vzalloc(bytes);
if (!host_ack_bmp) {
- bitmap_free(host_seq_bmp);
+ vfree(host_seq_bmp);
return -ENOMEM;
}
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 9f27c3f6091b..925858cc6096 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -1115,6 +1115,7 @@ static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
struct iov_iter iter;
u64 translated;
int ret;
+ size_t size;
ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
len - total_translated, &translated,
@@ -1132,9 +1133,9 @@ static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
translated);
}
- ret = copy_from_iter(dst, translated, &iter);
- if (ret < 0)
- return ret;
+ size = copy_from_iter(dst, translated, &iter);
+ if (size != translated)
+ return -EFAULT;
src += translated;
dst += translated;
@@ -1161,6 +1162,7 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
struct iov_iter iter;
u64 translated;
int ret;
+ size_t size;
ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
len - total_translated, &translated,
@@ -1178,9 +1180,9 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
translated);
}
- ret = copy_to_iter(src, translated, &iter);
- if (ret < 0)
- return ret;
+ size = copy_to_iter(src, translated, &iter);
+ if (size != translated)
+ return -EFAULT;
src += translated;
dst += translated;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 1893815dc67f..6acf5a00c2ba 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -93,6 +93,7 @@ struct simplefb_par {
static void simplefb_clocks_destroy(struct simplefb_par *par);
static void simplefb_regulators_destroy(struct simplefb_par *par);
+static void simplefb_detach_genpds(void *res);
/*
* fb_ops.fb_destroy is called by the last put_fb_info() call at the end
@@ -105,6 +106,7 @@ static void simplefb_destroy(struct fb_info *info)
simplefb_regulators_destroy(info->par);
simplefb_clocks_destroy(info->par);
+ simplefb_detach_genpds(info->par);
if (info->screen_base)
iounmap(info->screen_base);
@@ -445,13 +447,14 @@ static void simplefb_detach_genpds(void *res)
if (!IS_ERR_OR_NULL(par->genpds[i]))
dev_pm_domain_detach(par->genpds[i], true);
}
+ par->num_genpds = 0;
}
static int simplefb_attach_genpds(struct simplefb_par *par,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- unsigned int i;
+ unsigned int i, num_genpds;
int err;
err = of_count_phandle_with_args(dev->of_node, "power-domains",
@@ -465,26 +468,35 @@ static int simplefb_attach_genpds(struct simplefb_par *par,
return err;
}
- par->num_genpds = err;
+ num_genpds = err;
/*
* Single power-domain devices are handled by the driver core, so
* nothing to do here.
*/
- if (par->num_genpds <= 1)
+ if (num_genpds <= 1) {
+ par->num_genpds = num_genpds;
return 0;
+ }
- par->genpds = devm_kcalloc(dev, par->num_genpds, sizeof(*par->genpds),
+ par->genpds = devm_kcalloc(dev, num_genpds, sizeof(*par->genpds),
GFP_KERNEL);
if (!par->genpds)
return -ENOMEM;
- par->genpd_links = devm_kcalloc(dev, par->num_genpds,
+ par->genpd_links = devm_kcalloc(dev, num_genpds,
sizeof(*par->genpd_links),
GFP_KERNEL);
if (!par->genpd_links)
return -ENOMEM;
+ /*
+ * Set par->num_genpds only after genpds and genpd_links are allocated
+ * to exit early from simplefb_detach_genpds() without full
+ * initialisation.
+ */
+ par->num_genpds = num_genpds;
+
for (i = 0; i < par->num_genpds; i++) {
par->genpds[i] = dev_pm_domain_attach_by_id(dev, i);
if (IS_ERR(par->genpds[i])) {
@@ -506,9 +518,10 @@ static int simplefb_attach_genpds(struct simplefb_par *par,
dev_warn(dev, "failed to link power-domain %u\n", i);
}
- return devm_add_action_or_reset(dev, simplefb_detach_genpds, par);
+ return 0;
}
#else
+static void simplefb_detach_genpds(void *res) { }
static int simplefb_attach_genpds(struct simplefb_par *par,
struct platform_device *pdev)
{
@@ -622,18 +635,20 @@ static int simplefb_probe(struct platform_device *pdev)
ret = devm_aperture_acquire_for_platform_device(pdev, par->base, par->size);
if (ret) {
dev_err(&pdev->dev, "Unable to acquire aperture: %d\n", ret);
- goto error_regulators;
+ goto error_genpds;
}
ret = register_framebuffer(info);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
- goto error_regulators;
+ goto error_genpds;
}
dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node);
return 0;
+error_genpds:
+ simplefb_detach_genpds(par);
error_regulators:
simplefb_regulators_destroy(par);
error_clocks:
diff --git a/drivers/watchdog/intel_oc_wdt.c b/drivers/watchdog/intel_oc_wdt.c
index 7c0551106981..a39892c10770 100644
--- a/drivers/watchdog/intel_oc_wdt.c
+++ b/drivers/watchdog/intel_oc_wdt.c
@@ -41,6 +41,7 @@
struct intel_oc_wdt {
struct watchdog_device wdd;
struct resource *ctrl_res;
+ struct watchdog_info info;
bool locked;
};
@@ -115,7 +116,6 @@ static const struct watchdog_ops intel_oc_wdt_ops = {
static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt)
{
- struct watchdog_info *info;
unsigned long val;
val = inl(INTEL_OC_WDT_CTRL_REG(oc_wdt));
@@ -134,7 +134,6 @@ static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt)
set_bit(WDOG_HW_RUNNING, &oc_wdt->wdd.status);
if (oc_wdt->locked) {
- info = (struct watchdog_info *)&intel_oc_wdt_info;
/*
* Set nowayout unconditionally as we cannot stop
* the watchdog.
@@ -145,7 +144,7 @@ static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt)
* and inform the core we can't change it.
*/
oc_wdt->wdd.timeout = (val & INTEL_OC_WDT_TOV) + 1;
- info->options &= ~WDIOF_SETTIMEOUT;
+ oc_wdt->info.options &= ~WDIOF_SETTIMEOUT;
dev_info(oc_wdt->wdd.parent,
"Register access locked, heartbeat fixed at: %u s\n",
@@ -193,7 +192,8 @@ static int intel_oc_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = INTEL_OC_WDT_MIN_TOV;
wdd->max_timeout = INTEL_OC_WDT_MAX_TOV;
wdd->timeout = INTEL_OC_WDT_DEF_TOV;
- wdd->info = &intel_oc_wdt_info;
+ oc_wdt->info = intel_oc_wdt_info;
+ wdd->info = &oc_wdt->info;
wdd->ops = &intel_oc_wdt_ops;
wdd->parent = dev;
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 867f9f311379..a4b497ecfa20 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -100,6 +100,8 @@ static int mpc8xxx_wdt_start(struct watchdog_device *w)
ddata->swtc = tmp >> 16;
set_bit(WDOG_HW_RUNNING, &ddata->wdd.status);
+ mpc8xxx_wdt_keepalive(ddata);
+
return 0;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b21cb72835cc..4eafe3817e11 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1621,7 +1621,7 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned long range_bitmap = 0;
bool submitted_io = false;
- bool error = false;
+ int found_error = 0;
const u64 folio_start = folio_pos(folio);
const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
u64 cur;
@@ -1685,7 +1685,8 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
*/
btrfs_mark_ordered_io_finished(inode, folio, cur,
fs_info->sectorsize, false);
- error = true;
+ if (!found_error)
+ found_error = ret;
continue;
}
submitted_io = true;
@@ -1702,11 +1703,11 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
* If we hit any error, the corresponding sector will have its dirty
* flag cleared and writeback finished, thus no need to handle the error case.
*/
- if (!submitted_io && !error) {
+ if (!submitted_io && !found_error) {
btrfs_folio_set_writeback(fs_info, folio, start, len);
btrfs_folio_clear_writeback(fs_info, folio, start, len);
}
- return ret;
+ return found_error;
}
/*
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 18db1053cdf0..cd8a09e3d1dc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6479,6 +6479,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
if (!args->subvol)
btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
+ btrfs_set_inode_mapping_order(BTRFS_I(inode));
if (S_ISREG(inode->i_mode)) {
if (btrfs_test_opt(fs_info, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
@@ -6486,7 +6487,6 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
BTRFS_INODE_NODATASUM;
btrfs_update_inode_mapping_flags(BTRFS_I(inode));
- btrfs_set_inode_mapping_order(BTRFS_I(inode));
}
ret = btrfs_insert_inode_locked(inode);
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index b002e9b734f9..56c8005b24a3 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -412,7 +412,7 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
vm_fault_t vmf;
unsigned long off = i * PAGE_SIZE;
vmf = vmf_insert_mixed(vma, vma->vm_start + off,
- address + off);
+ PHYS_PFN(address + off));
if (vmf & VM_FAULT_ERROR)
ret = vm_fault_to_errno(vmf, 0);
}
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 2d73297003d2..625b8ae8f67f 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1835,7 +1835,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f,
map->m_la = end;
err = z_erofs_map_blocks_iter(inode, map,
EROFS_GET_BLOCKS_READMORE);
- if (err)
+ if (err || !(map->m_flags & EROFS_MAP_ENCODED))
return;
/* expand ra for the trailing edge if readahead */
@@ -1847,7 +1847,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f,
end = round_up(end, PAGE_SIZE);
} else {
end = round_up(map->m_la, PAGE_SIZE);
- if (!map->m_llen)
+ if (!(map->m_flags & EROFS_MAP_ENCODED) || !map->m_llen)
return;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 01a6e2de7fc3..72e02df72c4c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1981,6 +1981,16 @@ static inline bool ext4_verity_in_progress(struct inode *inode)
#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
+/*
+ * Check whether the inode is tracked as orphan (either in orphan file or
+ * orphan list).
+ */
+static inline bool ext4_inode_orphan_tracked(struct inode *inode)
+{
+ return ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
+ !list_empty(&EXT4_I(inode)->i_orphan);
+}
+
/*
* Codes for operating systems
*/
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 93240e35ee36..7a8b30932189 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -354,7 +354,7 @@ static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc)
* to cleanup the orphan list in ext4_handle_inode_extension(). Do it
* now.
*/
- if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
+ if (ext4_inode_orphan_tracked(inode) && inode->i_nlink) {
handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5b7a15db4953..5230452e29dd 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4748,7 +4748,7 @@ static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode
* old inodes get re-used with the upper 16 bits of the
* uid/gid intact.
*/
- if (ei->i_dtime && list_empty(&ei->i_orphan)) {
+ if (ei->i_dtime && !ext4_inode_orphan_tracked(inode)) {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
} else {
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5898d92ba19f..6070d3c86678 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3655,16 +3655,26 @@ static void ext4_discard_work(struct work_struct *work)
static inline void ext4_mb_avg_fragment_size_destroy(struct ext4_sb_info *sbi)
{
+ if (!sbi->s_mb_avg_fragment_size)
+ return;
+
for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
xa_destroy(&sbi->s_mb_avg_fragment_size[i]);
+
kfree(sbi->s_mb_avg_fragment_size);
+ sbi->s_mb_avg_fragment_size = NULL;
}
static inline void ext4_mb_largest_free_orders_destroy(struct ext4_sb_info *sbi)
{
+ if (!sbi->s_mb_largest_free_orders)
+ return;
+
for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
xa_destroy(&sbi->s_mb_largest_free_orders[i]);
+
kfree(sbi->s_mb_largest_free_orders);
+ sbi->s_mb_largest_free_orders = NULL;
}
int ext4_mb_init(struct super_block *sb)
diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
index 524d4658fa40..0fbcce67ffd4 100644
--- a/fs/ext4/orphan.c
+++ b/fs/ext4/orphan.c
@@ -109,11 +109,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
!inode_is_locked(inode));
- /*
- * Inode orphaned in orphan file or in orphan list?
- */
- if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) ||
- !list_empty(&EXT4_I(inode)->i_orphan))
+ if (ext4_inode_orphan_tracked(inode))
return 0;
/*
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 699c15db28a8..ba497387b9c8 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1438,9 +1438,9 @@ static void ext4_free_in_core_inode(struct inode *inode)
static void ext4_destroy_inode(struct inode *inode)
{
- if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
+ if (ext4_inode_orphan_tracked(inode)) {
ext4_msg(inode->i_sb, KERN_ERR,
- "Inode %lu (%p): orphan list check failed!",
+ "Inode %lu (%p): inode tracked as orphan!",
inode->i_ino, EXT4_I(inode));
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
EXT4_I(inode), sizeof(struct ext4_inode_info),
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 5c1f47e45dab..72bc05b913af 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1245,20 +1245,29 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
for (i = cluster_size - 1; i >= 0; i--) {
struct folio *folio = page_folio(rpages[i]);
- loff_t start = folio->index << PAGE_SHIFT;
+ loff_t start = (loff_t)folio->index << PAGE_SHIFT;
+ loff_t offset = from > start ? from - start : 0;
- if (from <= start) {
- folio_zero_segment(folio, 0, folio_size(folio));
- } else {
- folio_zero_segment(folio, from - start,
- folio_size(folio));
+ folio_zero_segment(folio, offset, folio_size(folio));
+
+ if (from >= start)
break;
- }
}
f2fs_compress_write_end(inode, fsdata, start_idx, true);
+
+ err = filemap_write_and_wait_range(inode->i_mapping,
+ round_down(from, cluster_size << PAGE_SHIFT),
+ LLONG_MAX);
+ if (err)
+ return err;
+
+ truncate_pagecache(inode, from);
+
+ err = f2fs_do_truncate_blocks(inode,
+ round_up(from, PAGE_SIZE), lock);
}
- return 0;
+ return err;
}
static int f2fs_write_compressed_pages(struct compress_ctx *cc,
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7961e0ddfca3..50c90bd03923 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -911,7 +911,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
if (fio->io_wbc)
wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio));
- inc_page_count(fio->sbi, WB_DATA_TYPE(data_folio, false));
+ inc_page_count(fio->sbi, WB_DATA_TYPE(folio, false));
*fio->last_block = fio->new_blkaddr;
*fio->bio = bio;
@@ -1778,12 +1778,13 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
if (map->m_flags & F2FS_MAP_MAPPED) {
unsigned int ofs = start_pgofs - map->m_lblk;
- f2fs_update_read_extent_cache_range(&dn,
- start_pgofs, map->m_pblk + ofs,
- map->m_len - ofs);
+ if (map->m_len > ofs)
+ f2fs_update_read_extent_cache_range(&dn,
+ start_pgofs, map->m_pblk + ofs,
+ map->m_len - ofs);
}
if (map->m_next_extent)
- *map->m_next_extent = pgofs + 1;
+ *map->m_next_extent = is_hole ? pgofs + 1 : pgofs;
}
f2fs_put_dnode(&dn);
unlock_out:
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index b4b62ac46bc6..dac7d44885e4 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2361,8 +2361,6 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
{
if (!inode)
return true;
- if (!test_opt(sbi, RESERVE_ROOT))
- return false;
if (IS_NOQUOTA(inode))
return true;
if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
@@ -2383,7 +2381,7 @@ static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
avail_user_block_count = sbi->user_block_count -
sbi->current_reserved_blocks;
- if (!__allow_reserved_blocks(sbi, inode, cap))
+ if (test_opt(sbi, RESERVE_ROOT) && !__allow_reserved_blocks(sbi, inode, cap))
avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 42faaed6a02d..ffa045b39c01 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -35,15 +35,23 @@
#include <trace/events/f2fs.h>
#include <uapi/linux/f2fs.h>
-static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size)
+static void f2fs_zero_post_eof_page(struct inode *inode,
+ loff_t new_size, bool lock)
{
loff_t old_size = i_size_read(inode);
if (old_size >= new_size)
return;
+ if (mapping_empty(inode->i_mapping))
+ return;
+
+ if (lock)
+ filemap_invalidate_lock(inode->i_mapping);
/* zero or drop pages only in range of [old_size, new_size] */
- truncate_pagecache(inode, old_size);
+ truncate_inode_pages_range(inode->i_mapping, old_size, new_size);
+ if (lock)
+ filemap_invalidate_unlock(inode->i_mapping);
}
static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
@@ -114,9 +122,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
- filemap_invalidate_lock(inode->i_mapping);
- f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT);
- filemap_invalidate_unlock(inode->i_mapping);
+ f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT, true);
file_update_time(vmf->vma->vm_file);
filemap_invalidate_lock_shared(inode->i_mapping);
@@ -904,8 +910,16 @@ int f2fs_truncate(struct inode *inode)
/* we should check inline_data size */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
- if (err)
+ if (err) {
+ /*
+ * Always truncate page #0 to avoid page cache
+ * leak in evict() path.
+ */
+ truncate_inode_pages_range(inode->i_mapping,
+ F2FS_BLK_TO_BYTES(0),
+ F2FS_BLK_END_BYTES(0));
return err;
+ }
}
err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
@@ -1141,7 +1155,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
filemap_invalidate_lock(inode->i_mapping);
if (attr->ia_size > old_size)
- f2fs_zero_post_eof_page(inode, attr->ia_size);
+ f2fs_zero_post_eof_page(inode, attr->ia_size, false);
truncate_setsize(inode, attr->ia_size);
if (attr->ia_size <= old_size)
@@ -1260,9 +1274,7 @@ static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (ret)
return ret;
- filemap_invalidate_lock(inode->i_mapping);
- f2fs_zero_post_eof_page(inode, offset + len);
- filemap_invalidate_unlock(inode->i_mapping);
+ f2fs_zero_post_eof_page(inode, offset + len, true);
pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1547,7 +1559,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
- f2fs_zero_post_eof_page(inode, offset + len);
+ f2fs_zero_post_eof_page(inode, offset + len, false);
f2fs_lock_op(sbi);
f2fs_drop_extent_tree(inode);
@@ -1670,9 +1682,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret)
return ret;
- filemap_invalidate_lock(mapping);
- f2fs_zero_post_eof_page(inode, offset + len);
- filemap_invalidate_unlock(mapping);
+ f2fs_zero_post_eof_page(inode, offset + len, true);
pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1806,7 +1816,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(mapping);
- f2fs_zero_post_eof_page(inode, offset + len);
+ f2fs_zero_post_eof_page(inode, offset + len, false);
truncate_pagecache(inode, offset);
while (!ret && idx > pg_start) {
@@ -1864,9 +1874,7 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
if (err)
return err;
- filemap_invalidate_lock(inode->i_mapping);
- f2fs_zero_post_eof_page(inode, offset + len);
- filemap_invalidate_unlock(inode->i_mapping);
+ f2fs_zero_post_eof_page(inode, offset + len, true);
f2fs_balance_fs(sbi, true);
@@ -4914,9 +4922,8 @@ static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
if (err)
return err;
- filemap_invalidate_lock(inode->i_mapping);
- f2fs_zero_post_eof_page(inode, iocb->ki_pos + iov_iter_count(from));
- filemap_invalidate_unlock(inode->i_mapping);
+ f2fs_zero_post_eof_page(inode,
+ iocb->ki_pos + iov_iter_count(from), true);
return count;
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index c0f209f74688..5734e0386468 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1794,6 +1794,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
struct folio *sum_folio = filemap_get_folio(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno));
+ if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno))) {
+ f2fs_err(sbi, "%s: segment %u is used by log",
+ __func__, segno);
+ f2fs_bug_on(sbi, 1);
+ goto skip;
+ }
+
if (get_valid_blocks(sbi, segno, false) == 0)
goto freed;
if (gc_type == BG_GC && __is_large_section(sbi) &&
@@ -1805,7 +1812,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
sum = folio_address(sum_folio);
if (type != GET_SUM_TYPE((&sum->footer))) {
- f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
+ f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SIT and SSA",
segno, type, GET_SUM_TYPE((&sum->footer)));
f2fs_stop_checkpoint(sbi, false,
STOP_CP_REASON_CORRUPTED_SUMMARY);
@@ -2068,6 +2075,13 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
+ /*
+ * avoid migrating empty section, as it can be allocated by
+ * log in parallel.
+ */
+ if (!get_valid_blocks(sbi, segno, true))
+ continue;
+
if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno)))
continue;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index e16c4e2830c2..8086a3456e4d 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -988,6 +988,10 @@ static int f2fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_enable:
+ F2FS_CTX_INFO(ctx).unusable_cap_perc = 0;
+ ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
+ F2FS_CTX_INFO(ctx).unusable_cap = 0;
+ ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
ctx_clear_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
default:
@@ -1185,7 +1189,11 @@ static int f2fs_check_quota_consistency(struct fs_context *fc,
goto err_jquota_change;
if (old_qname) {
- if (strcmp(old_qname, new_qname) == 0) {
+ if (!new_qname) {
+ f2fs_info(sbi, "remove qf_name %s",
+ old_qname);
+ continue;
+ } else if (strcmp(old_qname, new_qname) == 0) {
ctx->qname_mask &= ~(1 << i);
continue;
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4adcf09d4b01..c7351ca07065 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1175,7 +1175,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
num = min(iov_iter_count(ii), fc->max_write);
ap->args.in_pages = true;
- ap->descs[0].offset = offset;
while (num && ap->num_folios < max_folios) {
size_t tmp;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 72d95185a39f..bc67fa058c84 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -1442,6 +1442,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ int ret;
if (!(fl->c.flc_flags & FL_POSIX))
return -ENOLCK;
@@ -1450,14 +1451,20 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
locks_lock_file_wait(file, fl);
return -EIO;
}
- if (cmd == F_CANCELLK)
- return dlm_posix_cancel(ls->ls_dlm, ip->i_no_addr, file, fl);
- else if (IS_GETLK(cmd))
- return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
- else if (lock_is_unlock(fl))
- return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
- else
- return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
+ down_read(&ls->ls_sem);
+ ret = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ if (cmd == F_CANCELLK)
+ ret = dlm_posix_cancel(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else if (IS_GETLK(cmd))
+ ret = dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else if (lock_is_unlock(fl))
+ ret = dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else
+ ret = dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
+ }
+ up_read(&ls->ls_sem);
+ return ret;
}
static void __flock_holder_uninit(struct file *file, struct gfs2_holder *fl_gh)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b6fd1cb17de7..a6535413a0b4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -502,7 +502,7 @@ static bool do_promote(struct gfs2_glock *gl)
*/
if (list_is_first(&gh->gh_list, &gl->gl_holders))
return false;
- do_error(gl, 0);
+ do_error(gl, 0); /* Fail queued try locks */
break;
}
set_bit(HIF_HOLDER, &gh->gh_iflags);
@@ -703,44 +703,25 @@ __acquires(&gl->gl_lockref.lock)
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP);
GLOCK_BUG_ON(gl, gl->gl_state == target);
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
- if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
- glops->go_inval) {
- /*
- * If another process is already doing the invalidate, let that
- * finish first. The glock state machine will get back to this
- * holder again later.
- */
- if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
- &gl->gl_flags))
- return;
- do_error(gl, 0); /* Fail queued try locks */
- }
- gl->gl_req = target;
- set_bit(GLF_BLOCKING, &gl->gl_flags);
- if ((gl->gl_req == LM_ST_UNLOCKED) ||
- (gl->gl_state == LM_ST_EXCLUSIVE) ||
- (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
- clear_bit(GLF_BLOCKING, &gl->gl_flags);
- if (!glops->go_inval && !glops->go_sync)
+ if (!glops->go_inval || !glops->go_sync)
goto skip_inval;
spin_unlock(&gl->gl_lockref.lock);
- if (glops->go_sync) {
- ret = glops->go_sync(gl);
- /* If we had a problem syncing (due to io errors or whatever,
- * we should not invalidate the metadata or tell dlm to
- * release the glock to other nodes.
- */
- if (ret) {
- if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
- fs_err(sdp, "Error %d syncing glock \n", ret);
- gfs2_dump_glock(NULL, gl, true);
- }
- spin_lock(&gl->gl_lockref.lock);
- goto skip_inval;
+ ret = glops->go_sync(gl);
+ /* If we had a problem syncing (due to io errors or whatever,
+ * we should not invalidate the metadata or tell dlm to
+ * release the glock to other nodes.
+ */
+ if (ret) {
+ if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
+ fs_err(sdp, "Error %d syncing glock\n", ret);
+ gfs2_dump_glock(NULL, gl, true);
}
+ spin_lock(&gl->gl_lockref.lock);
+ goto skip_inval;
}
- if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
+
+ if (target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) {
/*
* The call to go_sync should have cleared out the ail list.
* If there are still items, we have a problem. We ought to
@@ -755,7 +736,6 @@ __acquires(&gl->gl_lockref.lock)
gfs2_dump_glock(NULL, gl, true);
}
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
- clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
spin_lock(&gl->gl_lockref.lock);
@@ -805,8 +785,6 @@ __acquires(&gl->gl_lockref.lock)
clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
return;
- } else {
- clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
}
@@ -816,21 +794,22 @@ __acquires(&gl->gl_lockref.lock)
ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
spin_lock(&gl->gl_lockref.lock);
- if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
- target == LM_ST_UNLOCKED &&
- test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ if (!ret) {
+ /* The operation will be completed asynchronously. */
+ return;
+ }
+ clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
+
+ if (ret == -ENODEV && gl->gl_target == LM_ST_UNLOCKED &&
+ target == LM_ST_UNLOCKED) {
/*
* The lockspace has been released and the lock has
* been unlocked implicitly.
*/
- } else if (ret) {
+ } else {
fs_err(sdp, "lm_lock ret %d\n", ret);
target = gl->gl_state | LM_OUT_ERROR;
- } else {
- /* The operation will be completed asynchronously. */
- return;
}
- clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
}
/* Complete the operation now. */
@@ -1462,6 +1441,24 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
va_end(args);
}
+static bool gfs2_should_queue_trylock(struct gfs2_glock *gl,
+ struct gfs2_holder *gh)
+{
+ struct gfs2_holder *current_gh, *gh2;
+
+ current_gh = find_first_holder(gl);
+ if (current_gh && !may_grant(gl, current_gh, gh))
+ return false;
+
+ list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
+ if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
+ continue;
+ if (!(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
+ return false;
+ }
+ return true;
+}
+
static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
{
if (!(gh->gh_flags & GL_NOPID))
@@ -1480,27 +1477,20 @@ static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
*/
static inline void add_to_queue(struct gfs2_holder *gh)
-__releases(&gl->gl_lockref.lock)
-__acquires(&gl->gl_lockref.lock)
{
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_holder *gh2;
- int try_futile = 0;
GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
GLOCK_BUG_ON(gl, true);
- if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
- if (test_bit(GLF_LOCK, &gl->gl_flags)) {
- struct gfs2_holder *current_gh;
-
- current_gh = find_first_holder(gl);
- try_futile = !may_grant(gl, current_gh, gh);
- }
- if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
- goto fail;
+ if ((gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
+ !gfs2_should_queue_trylock(gl, gh)) {
+ gh->gh_error = GLR_TRYFAILED;
+ gfs2_holder_wake(gh);
+ return;
}
list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
@@ -1512,15 +1502,6 @@ __acquires(&gl->gl_lockref.lock)
continue;
goto trap_recursive;
}
- list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
- if (try_futile &&
- !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
-fail:
- gh->gh_error = GLR_TRYFAILED;
- gfs2_holder_wake(gh);
- return;
- }
- }
trace_gfs2_glock_queue(gh, 1);
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
@@ -2321,8 +2302,6 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'y';
if (test_bit(GLF_LFLUSH, gflags))
*p++ = 'f';
- if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
- *p++ = 'i';
if (test_bit(GLF_PENDING_REPLY, gflags))
*p++ = 'R';
if (test_bit(GLF_HAVE_REPLY, gflags))
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 9339a3bff6ee..d041b922b45e 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -68,6 +68,10 @@ enum {
* also be granted in SHARED. The preferred state is whichever is compatible
* with other granted locks, or the specified state if no other locks exist.
*
+ * In addition, when a lock is already held in EX mode locally, a SHARED or
+ * DEFERRED mode request with the LM_FLAG_ANY flag set will be granted.
+ * (The LM_FLAG_ANY flag is only use for SHARED mode requests currently.)
+ *
* LM_FLAG_NODE_SCOPE
* This holder agrees to share the lock within this node. In other words,
* the glock is held in EX mode according to DLM, but local holders on the
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index d4ad82f47eee..3fcb7ab198d4 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -319,7 +319,6 @@ enum {
GLF_DEMOTE_IN_PROGRESS = 5,
GLF_DIRTY = 6,
GLF_LFLUSH = 7,
- GLF_INVALIDATE_IN_PROGRESS = 8,
GLF_HAVE_REPLY = 9,
GLF_INITIAL = 10,
GLF_HAVE_FROZEN_REPLY = 11,
@@ -658,6 +657,8 @@ struct lm_lockstruct {
struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
char *ls_lvb_bits;
+ struct rw_semaphore ls_sem;
+
spinlock_t ls_recover_spin; /* protects following fields */
unsigned long ls_recover_flags; /* DFL_ */
uint32_t ls_recover_mount; /* gen in first recover_done cb */
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index cee5d199d2d8..6db37c20587d 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -58,6 +58,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
/**
* gfs2_update_reply_times - Update locking statistics
* @gl: The glock to update
+ * @blocking: The operation may have been blocking
*
* This assumes that gl->gl_dstamp has been set earlier.
*
@@ -72,12 +73,12 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
* TRY_1CB flags are set are classified as non-blocking. All
* other DLM requests are counted as (potentially) blocking.
*/
-static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
+static inline void gfs2_update_reply_times(struct gfs2_glock *gl,
+ bool blocking)
{
struct gfs2_pcpu_lkstats *lks;
const unsigned gltype = gl->gl_name.ln_type;
- unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ?
- GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
+ unsigned index = blocking ? GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
s64 rtt;
preempt_disable();
@@ -119,14 +120,18 @@ static inline void gfs2_update_request_times(struct gfs2_glock *gl)
static void gdlm_ast(void *arg)
{
struct gfs2_glock *gl = arg;
+ bool blocking;
unsigned ret;
+ blocking = test_bit(GLF_BLOCKING, &gl->gl_flags);
+ gfs2_update_reply_times(gl, blocking);
+ clear_bit(GLF_BLOCKING, &gl->gl_flags);
+
/* If the glock is dead, we only react to a dlm_unlock() reply. */
if (__lockref_is_dead(&gl->gl_lockref) &&
gl->gl_lksb.sb_status != -DLM_EUNLOCK)
return;
- gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
@@ -241,7 +246,7 @@ static bool down_conversion(int cur, int req)
}
static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
- const int cur, const int req)
+ const int req, bool blocking)
{
u32 lkf = 0;
@@ -274,7 +279,7 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
* "upward" lock conversions or else DLM will reject the
* request as invalid.
*/
- if (!down_conversion(cur, req))
+ if (blocking)
lkf |= DLM_LKF_QUECVT;
}
@@ -294,14 +299,20 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
unsigned int flags)
{
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
+ bool blocking;
int cur, req;
u32 lkf;
char strname[GDLM_STRNAME_BYTES] = "";
int error;
+ gl->gl_req = req_state;
cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state);
req = make_mode(gl->gl_name.ln_sbd, req_state);
- lkf = make_flags(gl, flags, cur, req);
+ blocking = !down_conversion(cur, req) &&
+ !(flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB));
+ lkf = make_flags(gl, flags, req, blocking);
+ if (blocking)
+ set_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
if (test_bit(GLF_INITIAL, &gl->gl_flags)) {
@@ -318,8 +329,13 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
*/
again:
- error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
- GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
+ GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
+ }
+ up_read(&ls->ls_sem);
if (error == -EBUSY) {
msleep(20);
goto again;
@@ -341,7 +357,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
return;
}
- clear_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_update_request_times(gl);
@@ -369,8 +384,13 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
flags |= DLM_LKF_VALBLK;
again:
- error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, flags,
- NULL, gl);
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, flags,
+ NULL, gl);
+ }
+ up_read(&ls->ls_sem);
if (error == -EBUSY) {
msleep(20);
goto again;
@@ -386,7 +406,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
static void gdlm_cancel(struct gfs2_glock *gl)
{
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
- dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
+
+ down_read(&ls->ls_sem);
+ if (likely(ls->ls_dlm != NULL)) {
+ dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
+ }
+ up_read(&ls->ls_sem);
}
/*
@@ -567,7 +592,11 @@ static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
- error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL))
+ error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
+ up_read(&ls->ls_sem);
if (error) {
fs_err(sdp, "%s lkid %x error %d\n",
name, lksb->sb_lkid, error);
@@ -594,9 +623,14 @@ static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
memset(strname, 0, GDLM_STRNAME_BYTES);
snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
- error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
- strname, GDLM_STRNAME_BYTES - 1,
- 0, sync_wait_cb, ls, NULL);
+ down_read(&ls->ls_sem);
+ error = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
+ strname, GDLM_STRNAME_BYTES - 1,
+ 0, sync_wait_cb, ls, NULL);
+ }
+ up_read(&ls->ls_sem);
if (error) {
fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
name, lksb->sb_lkid, flags, mode, error);
@@ -1323,6 +1357,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
*/
INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
+ ls->ls_dlm = NULL;
spin_lock_init(&ls->ls_recover_spin);
ls->ls_recover_flags = 0;
ls->ls_recover_mount = 0;
@@ -1357,6 +1392,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
* create/join lockspace
*/
+ init_rwsem(&ls->ls_sem);
error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
&gdlm_lockspace_ops, sdp, &ops_result,
&ls->ls_dlm);
@@ -1436,10 +1472,12 @@ static void gdlm_unmount(struct gfs2_sbd *sdp)
/* mounted_lock and control_lock will be purged in dlm recovery */
release:
+ down_write(&ls->ls_sem);
if (ls->ls_dlm) {
dlm_release_lockspace(ls->ls_dlm, 2);
ls->ls_dlm = NULL;
}
+ up_write(&ls->ls_sem);
free_recover_size(ls);
}
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 26036ffc3f33..1c2507a27318 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -52,7 +52,6 @@
{(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
{(1UL << GLF_DIRTY), "y" }, \
{(1UL << GLF_LFLUSH), "f" }, \
- {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
{(1UL << GLF_PENDING_REPLY), "R" }, \
{(1UL << GLF_HAVE_REPLY), "r" }, \
{(1UL << GLF_INITIAL), "a" }, \
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 876bbb80fb4d..1b3e27a0d5e0 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -204,7 +204,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
fd.entrylength);
type = be16_to_cpu(entry.type);
len = NLS_MAX_CHARSET_SIZE * HFSPLUS_MAX_STRLEN;
- err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len);
+ err = hfsplus_uni2asc_str(sb, &fd.key->cat.name, strbuf, &len);
if (err)
goto out;
if (type == HFSPLUS_FOLDER) {
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 96a5c24813dd..2311e4be4e86 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -521,8 +521,12 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
const struct hfsplus_unistr *s2);
int hfsplus_strcmp(const struct hfsplus_unistr *s1,
const struct hfsplus_unistr *s2);
-int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr,
- char *astr, int *len_p);
+int hfsplus_uni2asc_str(struct super_block *sb,
+ const struct hfsplus_unistr *ustr, char *astr,
+ int *len_p);
+int hfsplus_uni2asc_xattr_str(struct super_block *sb,
+ const struct hfsplus_attr_unistr *ustr,
+ char *astr, int *len_p);
int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
int max_unistr_len, const char *astr, int len);
int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str);
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index 36b6cf2a3abb..862ba27f1628 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -119,9 +119,8 @@ static u16 *hfsplus_compose_lookup(u16 *p, u16 cc)
return NULL;
}
-int hfsplus_uni2asc(struct super_block *sb,
- const struct hfsplus_unistr *ustr,
- char *astr, int *len_p)
+static int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr,
+ int max_len, char *astr, int *len_p)
{
const hfsplus_unichr *ip;
struct nls_table *nls = HFSPLUS_SB(sb)->nls;
@@ -134,8 +133,8 @@ int hfsplus_uni2asc(struct super_block *sb,
ip = ustr->unicode;
ustrlen = be16_to_cpu(ustr->length);
- if (ustrlen > HFSPLUS_MAX_STRLEN) {
- ustrlen = HFSPLUS_MAX_STRLEN;
+ if (ustrlen > max_len) {
+ ustrlen = max_len;
pr_err("invalid length %u has been corrected to %d\n",
be16_to_cpu(ustr->length), ustrlen);
}
@@ -256,6 +255,21 @@ int hfsplus_uni2asc(struct super_block *sb,
return res;
}
+inline int hfsplus_uni2asc_str(struct super_block *sb,
+ const struct hfsplus_unistr *ustr, char *astr,
+ int *len_p)
+{
+ return hfsplus_uni2asc(sb, ustr, HFSPLUS_MAX_STRLEN, astr, len_p);
+}
+
+inline int hfsplus_uni2asc_xattr_str(struct super_block *sb,
+ const struct hfsplus_attr_unistr *ustr,
+ char *astr, int *len_p)
+{
+ return hfsplus_uni2asc(sb, (const struct hfsplus_unistr *)ustr,
+ HFSPLUS_ATTR_MAX_STRLEN, astr, len_p);
+}
+
/*
* Convert one or more ASCII characters into a single unicode character.
* Returns the number of ASCII characters corresponding to the unicode char.
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 18dc3d254d21..c951fa9835aa 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -735,9 +735,9 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
goto end_listxattr;
xattr_name_len = NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN;
- if (hfsplus_uni2asc(inode->i_sb,
- (const struct hfsplus_unistr *)&fd.key->attr.key_name,
- strbuf, &xattr_name_len)) {
+ if (hfsplus_uni2asc_xattr_str(inode->i_sb,
+ &fd.key->attr.key_name, strbuf,
+ &xattr_name_len)) {
pr_err("unicode conversion failed\n");
res = -EIO;
goto end_listxattr;
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
index 97abf62f109d..31ce210f032a 100644
--- a/fs/nfs/localio.c
+++ b/fs/nfs/localio.c
@@ -49,11 +49,6 @@ struct nfs_local_fsync_ctx {
static bool localio_enabled __read_mostly = true;
module_param(localio_enabled, bool, 0644);
-static bool localio_O_DIRECT_semantics __read_mostly = false;
-module_param(localio_O_DIRECT_semantics, bool, 0644);
-MODULE_PARM_DESC(localio_O_DIRECT_semantics,
- "LOCALIO will use O_DIRECT semantics to filesystem.");
-
static inline bool nfs_client_is_local(const struct nfs_client *clp)
{
return !!rcu_access_pointer(clp->cl_uuid.net);
@@ -321,12 +316,9 @@ nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
return NULL;
}
- if (localio_O_DIRECT_semantics &&
- test_bit(NFS_IOHDR_ODIRECT, &hdr->flags)) {
- iocb->kiocb.ki_filp = file;
+ init_sync_kiocb(&iocb->kiocb, file);
+ if (test_bit(NFS_IOHDR_ODIRECT, &hdr->flags))
iocb->kiocb.ki_flags = IOCB_DIRECT;
- } else
- init_sync_kiocb(&iocb->kiocb, file);
iocb->kiocb.ki_pos = hdr->args.offset;
iocb->hdr = hdr;
@@ -336,6 +328,30 @@ nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
return iocb;
}
+static bool nfs_iov_iter_aligned_bvec(const struct iov_iter *i,
+ loff_t offset, unsigned int addr_mask, unsigned int len_mask)
+{
+ const struct bio_vec *bvec = i->bvec;
+ size_t skip = i->iov_offset;
+ size_t size = i->count;
+
+ if ((offset | size) & len_mask)
+ return false;
+ do {
+ size_t len = bvec->bv_len;
+
+ if (len > size)
+ len = size;
+ if ((unsigned long)(bvec->bv_offset + skip) & addr_mask)
+ return false;
+ bvec++;
+ size -= len;
+ skip = 0;
+ } while (size);
+
+ return true;
+}
+
static void
nfs_local_iter_init(struct iov_iter *i, struct nfs_local_kiocb *iocb, int dir)
{
@@ -345,6 +361,25 @@ nfs_local_iter_init(struct iov_iter *i, struct nfs_local_kiocb *iocb, int dir)
hdr->args.count + hdr->args.pgbase);
if (hdr->args.pgbase != 0)
iov_iter_advance(i, hdr->args.pgbase);
+
+ if (iocb->kiocb.ki_flags & IOCB_DIRECT) {
+ u32 nf_dio_mem_align, nf_dio_offset_align, nf_dio_read_offset_align;
+ /* Verify the IO is DIO-aligned as required */
+ nfs_to->nfsd_file_dio_alignment(iocb->localio, &nf_dio_mem_align,
+ &nf_dio_offset_align,
+ &nf_dio_read_offset_align);
+ if (dir == READ)
+ nf_dio_offset_align = nf_dio_read_offset_align;
+
+ if (nf_dio_mem_align && nf_dio_offset_align &&
+ nfs_iov_iter_aligned_bvec(i, hdr->args.offset,
+ nf_dio_mem_align - 1,
+ nf_dio_offset_align - 1))
+ return; /* is DIO-aligned */
+
+ /* Fallback to using buffered for this misaligned IO */
+ iocb->kiocb.ki_flags &= ~IOCB_DIRECT;
+ }
}
static void
@@ -405,6 +440,11 @@ nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
struct nfs_pgio_header *hdr = iocb->hdr;
struct file *filp = iocb->kiocb.ki_filp;
+ if ((iocb->kiocb.ki_flags & IOCB_DIRECT) && status == -EINVAL) {
+ /* Underlying FS will return -EINVAL if misaligned DIO is attempted. */
+ pr_info_ratelimited("nfs: Unexpected direct I/O read alignment failure\n");
+ }
+
nfs_local_pgio_done(hdr, status);
/*
@@ -597,6 +637,11 @@ nfs_local_write_done(struct nfs_local_kiocb *iocb, long status)
dprintk("%s: wrote %ld bytes.\n", __func__, status > 0 ? status : 0);
+ if ((iocb->kiocb.ki_flags & IOCB_DIRECT) && status == -EINVAL) {
+ /* Underlying FS will return -EINVAL if misaligned DIO is attempted. */
+ pr_info_ratelimited("nfs: Unexpected direct I/O write alignment failure\n");
+ }
+
/* Handle short writes as if they are ENOSPC */
if (status > 0 && status < hdr->args.count) {
hdr->mds_offset += status;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ce61253efd45..611e6283c194 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -9442,7 +9442,7 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args
goto out;
if (rcvd->max_rqst_sz > sent->max_rqst_sz)
return -EINVAL;
- if (rcvd->max_resp_sz < sent->max_resp_sz)
+ if (rcvd->max_resp_sz > sent->max_resp_sz)
return -EINVAL;
if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
return -EINVAL;
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index 732abf6b92a5..7ca1dedf4e04 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -231,6 +231,9 @@ nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need,
refcount_set(&nf->nf_ref, 1);
nf->nf_may = need;
nf->nf_mark = NULL;
+ nf->nf_dio_mem_align = 0;
+ nf->nf_dio_offset_align = 0;
+ nf->nf_dio_read_offset_align = 0;
return nf;
}
@@ -1069,6 +1072,35 @@ nfsd_file_is_cached(struct inode *inode)
return ret;
}
+static __be32
+nfsd_file_get_dio_attrs(const struct svc_fh *fhp, struct nfsd_file *nf)
+{
+ struct inode *inode = file_inode(nf->nf_file);
+ struct kstat stat;
+ __be32 status;
+
+ /* Currently only need to get DIO alignment info for regular files */
+ if (!S_ISREG(inode->i_mode))
+ return nfs_ok;
+
+ status = fh_getattr(fhp, &stat);
+ if (status != nfs_ok)
+ return status;
+
+ trace_nfsd_file_get_dio_attrs(inode, &stat);
+
+ if (stat.result_mask & STATX_DIOALIGN) {
+ nf->nf_dio_mem_align = stat.dio_mem_align;
+ nf->nf_dio_offset_align = stat.dio_offset_align;
+ }
+ if (stat.result_mask & STATX_DIO_READ_ALIGN)
+ nf->nf_dio_read_offset_align = stat.dio_read_offset_align;
+ else
+ nf->nf_dio_read_offset_align = nf->nf_dio_offset_align;
+
+ return nfs_ok;
+}
+
static __be32
nfsd_file_do_acquire(struct svc_rqst *rqstp, struct net *net,
struct svc_cred *cred,
@@ -1187,6 +1219,8 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct net *net,
}
status = nfserrno(ret);
trace_nfsd_file_open(nf, status);
+ if (status == nfs_ok)
+ status = nfsd_file_get_dio_attrs(fhp, nf);
}
} else
status = nfserr_jukebox;
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index 722b26c71e45..237a05c74211 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -54,6 +54,10 @@ struct nfsd_file {
struct list_head nf_gc;
struct rcu_head nf_rcu;
ktime_t nf_birthtime;
+
+ u32 nf_dio_mem_align;
+ u32 nf_dio_offset_align;
+ u32 nf_dio_read_offset_align;
};
int nfsd_file_cache_init(void);
diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c
index cb237f1b902a..9e0a37cd29d8 100644
--- a/fs/nfsd/localio.c
+++ b/fs/nfsd/localio.c
@@ -117,6 +117,16 @@ nfsd_open_local_fh(struct net *net, struct auth_domain *dom,
return localio;
}
+static void nfsd_file_dio_alignment(struct nfsd_file *nf,
+ u32 *nf_dio_mem_align,
+ u32 *nf_dio_offset_align,
+ u32 *nf_dio_read_offset_align)
+{
+ *nf_dio_mem_align = nf->nf_dio_mem_align;
+ *nf_dio_offset_align = nf->nf_dio_offset_align;
+ *nf_dio_read_offset_align = nf->nf_dio_read_offset_align;
+}
+
static const struct nfsd_localio_operations nfsd_localio_ops = {
.nfsd_net_try_get = nfsd_net_try_get,
.nfsd_net_put = nfsd_net_put,
@@ -124,6 +134,7 @@ static const struct nfsd_localio_operations nfsd_localio_ops = {
.nfsd_file_put_local = nfsd_file_put_local,
.nfsd_file_get_local = nfsd_file_get_local,
.nfsd_file_file = nfsd_file_file,
+ .nfsd_file_dio_alignment = nfsd_file_dio_alignment,
};
void nfsd_localio_ops_init(void)
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index a664fdf1161e..6e2c8e2aab10 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -1133,6 +1133,33 @@ TRACE_EVENT(nfsd_file_alloc,
)
);
+TRACE_EVENT(nfsd_file_get_dio_attrs,
+ TP_PROTO(
+ const struct inode *inode,
+ const struct kstat *stat
+ ),
+ TP_ARGS(inode, stat),
+ TP_STRUCT__entry(
+ __field(const void *, inode)
+ __field(unsigned long, mask)
+ __field(u32, mem_align)
+ __field(u32, offset_align)
+ __field(u32, read_offset_align)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->mask = stat->result_mask;
+ __entry->mem_align = stat->dio_mem_align;
+ __entry->offset_align = stat->dio_offset_align;
+ __entry->read_offset_align = stat->dio_read_offset_align;
+ ),
+ TP_printk("inode=%p flags=%s mem_align=%u offset_align=%u read_offset_align=%u",
+ __entry->inode, show_statx_mask(__entry->mask),
+ __entry->mem_align, __entry->offset_align,
+ __entry->read_offset_align
+ )
+);
+
TRACE_EVENT(nfsd_file_acquire,
TP_PROTO(
const struct svc_rqst *rqstp,
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index eff04959606f..fde3e0c11dba 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -185,6 +185,10 @@ static inline __be32 fh_getattr(const struct svc_fh *fh, struct kstat *stat)
u32 request_mask = STATX_BASIC_STATS;
struct path p = {.mnt = fh->fh_export->ex_path.mnt,
.dentry = fh->fh_dentry};
+ struct inode *inode = d_inode(p.dentry);
+
+ if (S_ISREG(inode->i_mode))
+ request_mask |= (STATX_DIOALIGN | STATX_DIO_READ_ALIGN);
if (fh->fh_maxsize == NFS4_FHSIZE)
request_mask |= (STATX_BTIME | STATX_CHANGE_COOKIE);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index b192ee068a7a..561339b4cf75 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1999,7 +1999,10 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
user_ns = path.mnt->mnt_sb->s_user_ns;
obj = path.mnt->mnt_sb;
} else if (obj_type == FSNOTIFY_OBJ_TYPE_MNTNS) {
+ ret = -EINVAL;
mntns = mnt_ns_from_dentry(path.dentry);
+ if (!mntns)
+ goto path_put_and_out;
user_ns = mntns->user_ns;
obj = mntns;
}
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 1bf2a6593dec..6d1bf890929d 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -1508,6 +1508,16 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
bmp_size = bmp_size_v = le32_to_cpu(bmp->res.data_size);
}
+ /*
+ * Index blocks exist, but $BITMAP has zero valid bits.
+ * This implies an on-disk corruption and must be rejected.
+ */
+ if (in->name == I30_NAME &&
+ unlikely(bmp_size_v == 0 && indx->alloc_run.count)) {
+ err = -EINVAL;
+ goto out1;
+ }
+
bit = bmp_size << 3;
}
diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
index 6e86d66197ef..88550085f745 100644
--- a/fs/ntfs3/run.c
+++ b/fs/ntfs3/run.c
@@ -9,6 +9,7 @@
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/log2.h>
+#include <linux/overflow.h>
#include "debug.h"
#include "ntfs.h"
@@ -982,14 +983,18 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
if (!dlcn)
return -EINVAL;
- lcn = prev_lcn + dlcn;
+
+ if (check_add_overflow(prev_lcn, dlcn, &lcn))
+ return -EINVAL;
prev_lcn = lcn;
} else {
/* The size of 'dlcn' can't be > 8. */
return -EINVAL;
}
- next_vcn = vcn64 + len;
+ if (check_add_overflow(vcn64, len, &next_vcn))
+ return -EINVAL;
+
/* Check boundary. */
if (next_vcn > evcn + 1)
return -EINVAL;
@@ -1153,7 +1158,8 @@ int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
return -EINVAL;
run_buf += size_size + offset_size;
- vcn64 += len;
+ if (check_add_overflow(vcn64, len, &vcn64))
+ return -EINVAL;
#ifndef CONFIG_NTFS3_64BIT_CLUSTER
if (vcn64 > 0x100000000ull)
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index 0f045e45fa0c..439742cec3c2 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -1011,6 +1011,7 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
printk(KERN_ERR "ocfs2: Could not determine"
" locking version\n");
user_cluster_disconnect(conn);
+ lc = NULL;
goto out;
}
wait_event(lc->oc_wait, (atomic_read(&lc->oc_this_node) > 0));
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index e586f3f4b5c9..68286673afc9 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4219,7 +4219,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
int num_rqst, const u8 *sig, u8 **iv,
struct aead_request **req, struct sg_table *sgt,
- unsigned int *num_sgs, size_t *sensitive_size)
+ unsigned int *num_sgs)
{
unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
unsigned int iv_size = crypto_aead_ivsize(tfm);
@@ -4236,9 +4236,8 @@ static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst
len += req_size;
len = ALIGN(len, __alignof__(struct scatterlist));
len += array_size(*num_sgs, sizeof(struct scatterlist));
- *sensitive_size = len;
- p = kvzalloc(len, GFP_NOFS);
+ p = kzalloc(len, GFP_NOFS);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -4252,16 +4251,14 @@ static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst
static void *smb2_get_aead_req(struct crypto_aead *tfm, struct smb_rqst *rqst,
int num_rqst, const u8 *sig, u8 **iv,
- struct aead_request **req, struct scatterlist **sgl,
- size_t *sensitive_size)
+ struct aead_request **req, struct scatterlist **sgl)
{
struct sg_table sgtable = {};
unsigned int skip, num_sgs, i, j;
ssize_t rc;
void *p;
- p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable,
- &num_sgs, sensitive_size);
+ p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable, &num_sgs);
if (IS_ERR(p))
return ERR_CAST(p);
@@ -4350,7 +4347,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
DECLARE_CRYPTO_WAIT(wait);
unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
void *creq;
- size_t sensitive_size;
rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
if (rc) {
@@ -4376,8 +4372,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
return rc;
}
- creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg,
- &sensitive_size);
+ creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
if (IS_ERR(creq))
return PTR_ERR(creq);
@@ -4407,7 +4402,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
if (!rc && enc)
memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
- kvfree_sensitive(creq, sensitive_size);
+ kfree_sensitive(creq);
return rc;
}
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index e0fce5033004..6480945c2459 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -179,6 +179,8 @@ static int smbd_conn_upcall(
struct smbd_connection *info = id->context;
struct smbdirect_socket *sc = &info->socket;
const char *event_name = rdma_event_msg(event->event);
+ u8 peer_initiator_depth;
+ u8 peer_responder_resources;
log_rdma_event(INFO, "event=%s status=%d\n",
event_name, event->status);
@@ -204,6 +206,85 @@ static int smbd_conn_upcall(
case RDMA_CM_EVENT_ESTABLISHED:
log_rdma_event(INFO, "connected event=%s\n", event_name);
+
+ /*
+ * Here we work around an inconsistency between
+ * iWarp and other devices (at least rxe and irdma using RoCEv2)
+ */
+ if (rdma_protocol_iwarp(id->device, id->port_num)) {
+ /*
+ * iWarp devices report the peer's values
+ * with the perspective of the peer here.
+ * Tested with siw and irdma (in iwarp mode)
+ * We need to change to our perspective here,
+ * so we need to switch the values.
+ */
+ peer_initiator_depth = event->param.conn.responder_resources;
+ peer_responder_resources = event->param.conn.initiator_depth;
+ } else {
+ /*
+ * Non iWarp devices report the peer's values
+ * already changed to our perspective here.
+ * Tested with rxe and irdma (in roce mode).
+ */
+ peer_initiator_depth = event->param.conn.initiator_depth;
+ peer_responder_resources = event->param.conn.responder_resources;
+ }
+ if (rdma_protocol_iwarp(id->device, id->port_num) &&
+ event->param.conn.private_data_len == 8) {
+ /*
+ * Legacy clients with only iWarp MPA v1 support
+ * need a private blob in order to negotiate
+ * the IRD/ORD values.
+ */
+ const __be32 *ird_ord_hdr = event->param.conn.private_data;
+ u32 ird32 = be32_to_cpu(ird_ord_hdr[0]);
+ u32 ord32 = be32_to_cpu(ird_ord_hdr[1]);
+
+ /*
+ * cifs.ko sends the legacy IRD/ORD negotiation
+ * event if iWarp MPA v2 was used.
+ *
+ * Here we check that the values match and only
+ * mark the client as legacy if they don't match.
+ */
+ if ((u32)event->param.conn.initiator_depth != ird32 ||
+ (u32)event->param.conn.responder_resources != ord32) {
+ /*
+ * There are broken clients (old cifs.ko)
+ * using little endian and also
+ * struct rdma_conn_param only uses u8
+ * for initiator_depth and responder_resources,
+ * so we truncate the value to U8_MAX.
+ *
+ * smb_direct_accept_client() will then
+ * do the real negotiation in order to
+ * select the minimum between client and
+ * server.
+ */
+ ird32 = min_t(u32, ird32, U8_MAX);
+ ord32 = min_t(u32, ord32, U8_MAX);
+
+ info->legacy_iwarp = true;
+ peer_initiator_depth = (u8)ird32;
+ peer_responder_resources = (u8)ord32;
+ }
+ }
+
+ /*
+ * negotiate the value by using the minimum
+ * between client and server if the client provided
+ * non 0 values.
+ */
+ if (peer_initiator_depth != 0)
+ info->initiator_depth =
+ min_t(u8, info->initiator_depth,
+ peer_initiator_depth);
+ if (peer_responder_resources != 0)
+ info->responder_resources =
+ min_t(u8, info->responder_resources,
+ peer_responder_resources);
+
sc->status = SMBDIRECT_SOCKET_CONNECTED;
wake_up_interruptible(&info->status_wait);
break;
@@ -1551,7 +1632,7 @@ static struct smbd_connection *_smbd_get_connection(
struct ib_qp_init_attr qp_attr;
struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
struct ib_port_immutable port_immutable;
- u32 ird_ord_hdr[2];
+ __be32 ird_ord_hdr[2];
info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
if (!info)
@@ -1559,6 +1640,9 @@ static struct smbd_connection *_smbd_get_connection(
sc = &info->socket;
sp = &sc->parameters;
+ info->initiator_depth = 1;
+ info->responder_resources = SMBD_CM_RESPONDER_RESOURCES;
+
sc->status = SMBDIRECT_SOCKET_CONNECTING;
rc = smbd_ia_open(info, dstaddr, port);
if (rc) {
@@ -1639,22 +1723,22 @@ static struct smbd_connection *_smbd_get_connection(
}
sc->ib.qp = sc->rdma.cm_id->qp;
- memset(&conn_param, 0, sizeof(conn_param));
- conn_param.initiator_depth = 0;
-
- conn_param.responder_resources =
- min(sc->ib.dev->attrs.max_qp_rd_atom,
- SMBD_CM_RESPONDER_RESOURCES);
- info->responder_resources = conn_param.responder_resources;
+ info->responder_resources =
+ min_t(u8, info->responder_resources,
+ sc->ib.dev->attrs.max_qp_rd_atom);
log_rdma_mr(INFO, "responder_resources=%d\n",
info->responder_resources);
+ memset(&conn_param, 0, sizeof(conn_param));
+ conn_param.initiator_depth = info->initiator_depth;
+ conn_param.responder_resources = info->responder_resources;
+
/* Need to send IRD/ORD in private data for iWARP */
sc->ib.dev->ops.get_port_immutable(
sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable);
if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
- ird_ord_hdr[0] = info->responder_resources;
- ird_ord_hdr[1] = 1;
+ ird_ord_hdr[0] = cpu_to_be32(conn_param.responder_resources);
+ ird_ord_hdr[1] = cpu_to_be32(conn_param.initiator_depth);
conn_param.private_data = ird_ord_hdr;
conn_param.private_data_len = sizeof(ird_ord_hdr);
} else {
@@ -2121,6 +2205,12 @@ static int allocate_mr_list(struct smbd_connection *info)
atomic_set(&info->mr_used_count, 0);
init_waitqueue_head(&info->wait_for_mr_cleanup);
INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
+
+ if (info->responder_resources == 0) {
+ log_rdma_mr(ERR, "responder_resources negotiated as 0\n");
+ return -EINVAL;
+ }
+
/* Allocate more MRs (2x) than hardware responder_resources */
for (i = 0; i < info->responder_resources * 2; i++) {
smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
index e45aa9ddd71d..4ca9b2b2c57f 100644
--- a/fs/smb/client/smbdirect.h
+++ b/fs/smb/client/smbdirect.h
@@ -67,7 +67,9 @@ struct smbd_connection {
/* Memory registrations */
/* Maximum number of RDMA read/write outstanding on this connection */
- int responder_resources;
+ bool legacy_iwarp;
+ u8 initiator_depth;
+ u8 responder_resources;
/* Maximum number of pages in a single RDMA write/read on this connection */
int max_frmr_depth;
/*
diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
index 3f07a612c05b..8ccd57fd904b 100644
--- a/fs/smb/server/ksmbd_netlink.h
+++ b/fs/smb/server/ksmbd_netlink.h
@@ -112,10 +112,11 @@ struct ksmbd_startup_request {
__u32 smbd_max_io_size; /* smbd read write size */
__u32 max_connections; /* Number of maximum simultaneous connections */
__s8 bind_interfaces_only;
- __s8 reserved[503]; /* Reserved room */
+ __u32 max_ip_connections; /* Number of maximum connection per ip address */
+ __s8 reserved[499]; /* Reserved room */
__u32 ifc_list_sz; /* interfaces list size */
__s8 ____payload[];
-};
+} __packed;
#define KSMBD_STARTUP_CONFIG_INTERFACES(s) ((s)->____payload)
diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
index 9dec4c2940bc..b36d0676dbe5 100644
--- a/fs/smb/server/mgmt/user_session.c
+++ b/fs/smb/server/mgmt/user_session.c
@@ -104,29 +104,32 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
if (!entry)
return -ENOMEM;
- down_read(&sess->rpc_lock);
entry->method = method;
entry->id = id = ksmbd_ipc_id_alloc();
if (id < 0)
goto free_entry;
+
+ down_write(&sess->rpc_lock);
old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP);
- if (xa_is_err(old))
+ if (xa_is_err(old)) {
+ up_write(&sess->rpc_lock);
goto free_id;
+ }
resp = ksmbd_rpc_open(sess, id);
- if (!resp)
- goto erase_xa;
+ if (!resp) {
+ xa_erase(&sess->rpc_handle_list, entry->id);
+ up_write(&sess->rpc_lock);
+ goto free_id;
+ }
- up_read(&sess->rpc_lock);
+ up_write(&sess->rpc_lock);
kvfree(resp);
return id;
-erase_xa:
- xa_erase(&sess->rpc_handle_list, entry->id);
free_id:
ksmbd_rpc_id_free(entry->id);
free_entry:
kfree(entry);
- up_read(&sess->rpc_lock);
return -EINVAL;
}
@@ -144,9 +147,14 @@ void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
{
struct ksmbd_session_rpc *entry;
+ int method;
+ down_read(&sess->rpc_lock);
entry = xa_load(&sess->rpc_handle_list, id);
- return entry ? entry->method : 0;
+ method = entry ? entry->method : 0;
+ up_read(&sess->rpc_lock);
+
+ return method;
}
void ksmbd_session_destroy(struct ksmbd_session *sess)
diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h
index 995555febe7d..b8a7317be86b 100644
--- a/fs/smb/server/server.h
+++ b/fs/smb/server/server.h
@@ -43,6 +43,7 @@ struct ksmbd_server_config {
unsigned int auth_mechs;
unsigned int max_connections;
unsigned int max_inflight_req;
+ unsigned int max_ip_connections;
char *conf[SERVER_CONF_WORK_GROUP + 1];
struct task_struct *dh_task;
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index a565fc36cee6..a1db006ab6e9 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -5628,7 +5628,8 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
if (!work->tcon->posix_extensions) {
pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
- rc = -EOPNOTSUPP;
+ path_put(&path);
+ return -EOPNOTSUPP;
} else {
info = (struct filesystem_posix_info *)(rsp->Buffer);
info->OptimalTransferSize = cpu_to_le32(stfs.f_bsize);
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index 2a3e2b0ce557..2aa1b29bea08 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -335,6 +335,9 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
if (req->max_connections)
server_conf.max_connections = req->max_connections;
+ if (req->max_ip_connections)
+ server_conf.max_ip_connections = req->max_ip_connections;
+
ret = ksmbd_set_netbios_name(req->netbios_name);
ret |= ksmbd_set_server_string(req->server_string);
ret |= ksmbd_set_work_group(req->work_group);
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index 74dfb6496095..e1f659d3b4cf 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -153,6 +153,10 @@ struct smb_direct_transport {
struct work_struct disconnect_work;
bool negotiation_requested;
+
+ bool legacy_iwarp;
+ u8 initiator_depth;
+ u8 responder_resources;
};
#define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
@@ -347,6 +351,9 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
t->cm_id = cm_id;
cm_id->context = t;
+ t->initiator_depth = SMB_DIRECT_CM_INITIATOR_DEPTH;
+ t->responder_resources = 1;
+
t->status = SMB_DIRECT_CS_NEW;
init_waitqueue_head(&t->wait_status);
@@ -1676,21 +1683,21 @@ static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
static int smb_direct_accept_client(struct smb_direct_transport *t)
{
struct rdma_conn_param conn_param;
- struct ib_port_immutable port_immutable;
- u32 ird_ord_hdr[2];
+ __be32 ird_ord_hdr[2];
int ret;
+ /*
+ * smb_direct_handle_connect_request()
+ * already negotiated t->initiator_depth
+ * and t->responder_resources
+ */
memset(&conn_param, 0, sizeof(conn_param));
- conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
- SMB_DIRECT_CM_INITIATOR_DEPTH);
- conn_param.responder_resources = 0;
-
- t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
- t->cm_id->port_num,
- &port_immutable);
- if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
- ird_ord_hdr[0] = conn_param.responder_resources;
- ird_ord_hdr[1] = 1;
+ conn_param.initiator_depth = t->initiator_depth;
+ conn_param.responder_resources = t->responder_resources;
+
+ if (t->legacy_iwarp) {
+ ird_ord_hdr[0] = cpu_to_be32(conn_param.responder_resources);
+ ird_ord_hdr[1] = cpu_to_be32(conn_param.initiator_depth);
conn_param.private_data = ird_ord_hdr;
conn_param.private_data_len = sizeof(ird_ord_hdr);
} else {
@@ -2081,10 +2088,13 @@ static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
return true;
}
-static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
+static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id,
+ struct rdma_cm_event *event)
{
struct smb_direct_transport *t;
struct task_struct *handler;
+ u8 peer_initiator_depth;
+ u8 peer_responder_resources;
int ret;
if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
@@ -2098,6 +2108,67 @@ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
if (!t)
return -ENOMEM;
+ peer_initiator_depth = event->param.conn.initiator_depth;
+ peer_responder_resources = event->param.conn.responder_resources;
+ if (rdma_protocol_iwarp(new_cm_id->device, new_cm_id->port_num) &&
+ event->param.conn.private_data_len == 8) {
+ /*
+ * Legacy clients with only iWarp MPA v1 support
+ * need a private blob in order to negotiate
+ * the IRD/ORD values.
+ */
+ const __be32 *ird_ord_hdr = event->param.conn.private_data;
+ u32 ird32 = be32_to_cpu(ird_ord_hdr[0]);
+ u32 ord32 = be32_to_cpu(ird_ord_hdr[1]);
+
+ /*
+ * cifs.ko sends the legacy IRD/ORD negotiation
+ * event if iWarp MPA v2 was used.
+ *
+ * Here we check that the values match and only
+ * mark the client as legacy if they don't match.
+ */
+ if ((u32)event->param.conn.initiator_depth != ird32 ||
+ (u32)event->param.conn.responder_resources != ord32) {
+ /*
+ * There are broken clients (old cifs.ko)
+ * using little endian and also
+ * struct rdma_conn_param only uses u8
+ * for initiator_depth and responder_resources,
+ * so we truncate the value to U8_MAX.
+ *
+ * smb_direct_accept_client() will then
+ * do the real negotiation in order to
+ * select the minimum between client and
+ * server.
+ */
+ ird32 = min_t(u32, ird32, U8_MAX);
+ ord32 = min_t(u32, ord32, U8_MAX);
+
+ t->legacy_iwarp = true;
+ peer_initiator_depth = (u8)ird32;
+ peer_responder_resources = (u8)ord32;
+ }
+ }
+
+ /*
+ * First set what the we as server are able to support
+ */
+ t->initiator_depth = min_t(u8, t->initiator_depth,
+ new_cm_id->device->attrs.max_qp_rd_atom);
+
+ /*
+ * negotiate the value by using the minimum
+ * between client and server if the client provided
+ * non 0 values.
+ */
+ if (peer_initiator_depth != 0)
+ t->initiator_depth = min_t(u8, t->initiator_depth,
+ peer_initiator_depth);
+ if (peer_responder_resources != 0)
+ t->responder_resources = min_t(u8, t->responder_resources,
+ peer_responder_resources);
+
ret = smb_direct_connect(t);
if (ret)
goto out_err;
@@ -2122,7 +2193,7 @@ static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
{
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST: {
- int ret = smb_direct_handle_connect_request(cm_id);
+ int ret = smb_direct_handle_connect_request(cm_id, event);
if (ret) {
pr_err("Can't create transport: %d\n", ret);
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index 4337df97987d..1009cb324fd5 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -238,6 +238,7 @@ static int ksmbd_kthread_fn(void *p)
struct interface *iface = (struct interface *)p;
struct ksmbd_conn *conn;
int ret;
+ unsigned int max_ip_conns;
while (!kthread_should_stop()) {
mutex_lock(&iface->sock_release_lock);
@@ -255,34 +256,38 @@ static int ksmbd_kthread_fn(void *p)
continue;
}
+ if (!server_conf.max_ip_connections)
+ goto skip_max_ip_conns_limit;
+
/*
* Limits repeated connections from clients with the same IP.
*/
+ max_ip_conns = 0;
down_read(&conn_list_lock);
- list_for_each_entry(conn, &conn_list, conns_list)
+ list_for_each_entry(conn, &conn_list, conns_list) {
#if IS_ENABLED(CONFIG_IPV6)
if (client_sk->sk->sk_family == AF_INET6) {
if (memcmp(&client_sk->sk->sk_v6_daddr,
- &conn->inet6_addr, 16) == 0) {
- ret = -EAGAIN;
- break;
- }
+ &conn->inet6_addr, 16) == 0)
+ max_ip_conns++;
} else if (inet_sk(client_sk->sk)->inet_daddr ==
- conn->inet_addr) {
- ret = -EAGAIN;
- break;
- }
+ conn->inet_addr)
+ max_ip_conns++;
#else
if (inet_sk(client_sk->sk)->inet_daddr ==
- conn->inet_addr) {
+ conn->inet_addr)
+ max_ip_conns++;
+#endif
+ if (server_conf.max_ip_connections <= max_ip_conns) {
ret = -EAGAIN;
break;
}
-#endif
+ }
up_read(&conn_list_lock);
if (ret == -EAGAIN)
continue;
+skip_max_ip_conns_limit:
if (server_conf.max_connections &&
atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index d5918eba27e3..53104f25de51 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -165,6 +165,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->block_list_start = block;
squashfs_i(inode)->offset = offset;
+ squashfs_i(inode)->parent = 0;
inode->i_data.a_ops = &squashfs_aops;
TRACE("File inode %x:%x, start_block %llx, block_list_start "
@@ -212,6 +213,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->block_list_start = block;
squashfs_i(inode)->offset = offset;
+ squashfs_i(inode)->parent = 0;
inode->i_data.a_ops = &squashfs_aops;
TRACE("File inode %x:%x, start_block %llx, block_list_start "
@@ -292,6 +294,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
inode->i_mode |= S_IFLNK;
squashfs_i(inode)->start = block;
squashfs_i(inode)->offset = offset;
+ squashfs_i(inode)->parent = 0;
if (type == SQUASHFS_LSYMLINK_TYPE) {
__le32 xattr;
@@ -329,6 +332,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
rdev = le32_to_cpu(sqsh_ino->rdev);
init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
+ squashfs_i(inode)->parent = 0;
TRACE("Device inode %x:%x, rdev %x\n",
SQUASHFS_INODE_BLK(ino), offset, rdev);
@@ -353,6 +357,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
rdev = le32_to_cpu(sqsh_ino->rdev);
init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
+ squashfs_i(inode)->parent = 0;
TRACE("Device inode %x:%x, rdev %x\n",
SQUASHFS_INODE_BLK(ino), offset, rdev);
@@ -373,6 +378,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
inode->i_mode |= S_IFSOCK;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
init_special_inode(inode, inode->i_mode, 0);
+ squashfs_i(inode)->parent = 0;
break;
}
case SQUASHFS_LFIFO_TYPE:
@@ -392,6 +398,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
inode->i_op = &squashfs_inode_ops;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
init_special_inode(inode, inode->i_mode, 0);
+ squashfs_i(inode)->parent = 0;
break;
}
default:
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
index 2c82d6f2a456..8e497ac07b9a 100644
--- a/fs/squashfs/squashfs_fs_i.h
+++ b/fs/squashfs/squashfs_fs_i.h
@@ -16,6 +16,7 @@ struct squashfs_inode_info {
u64 xattr;
unsigned int xattr_size;
int xattr_count;
+ int parent;
union {
struct {
u64 fragment_block;
@@ -27,7 +28,6 @@ struct squashfs_inode_info {
u64 dir_idx_start;
int dir_idx_offset;
int dir_idx_cnt;
- int parent;
};
};
struct inode vfs_inode;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index f24aa98e6869..a79d73f28aa7 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -2272,6 +2272,9 @@ int udf_current_aext(struct inode *inode, struct extent_position *epos,
if (check_add_overflow(sizeof(struct allocExtDesc),
le32_to_cpu(header->lengthAllocDescs), &alen))
return -1;
+
+ if (alen > epos->bh->b_size)
+ return -1;
}
switch (iinfo->i_alloc_type) {
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 243097a3da63..8a67d4ea6e3f 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -73,7 +73,7 @@ struct acpi_table_header {
char oem_id[ACPI_OEM_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM identification */
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM table identification */
u32 oem_revision; /* OEM revision number */
- char asl_compiler_id[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */
+ char asl_compiler_id[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; /* ASCII ASL compiler vendor ID */
u32 asl_compiler_revision; /* ASL compiler version */
};
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ae2d2359b79e..8efbe8c4874e 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -361,6 +361,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
__start_once = .; \
*(.data..once) \
__end_once = .; \
+ *(.data..do_once) \
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 533d6c16a491..6a2c5f2e90f9 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -18,11 +18,8 @@ struct crypto_scomp {
/**
* struct scomp_alg - synchronous compression algorithm
*
- * @alloc_ctx: Function allocates algorithm specific context
- * @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @base: Common crypto API algorithm data structure
* @streams: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp
*/
@@ -34,13 +31,7 @@ struct scomp_alg {
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
- union {
- struct {
- void *(*alloc_ctx)(void);
- void (*free_ctx)(void *ctx);
- };
- struct crypto_acomp_streams streams;
- };
+ struct crypto_acomp_streams streams;
union {
struct COMP_ALG_COMMON;
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 843fb756a295..2407bfa60236 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -160,6 +160,20 @@ struct drm_panel_follower_funcs {
* Called before the panel is powered off.
*/
int (*panel_unpreparing)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_enabled:
+ *
+ * Called after the panel and the backlight have been enabled.
+ */
+ int (*panel_enabled)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_disabling:
+ *
+ * Called before the panel and the backlight are disabled.
+ */
+ int (*panel_disabling)(struct drm_panel_follower *follower);
};
struct drm_panel_follower {
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 09b99d52fd36..f78145be77df 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -198,10 +198,6 @@ static inline bool blk_path_error(blk_status_t error)
return true;
}
-struct bio_issue {
- u64 value;
-};
-
typedef __u32 __bitwise blk_opf_t;
typedef unsigned int blk_qc_t;
@@ -242,7 +238,8 @@ struct bio {
* on release of the bio.
*/
struct blkcg_gq *bi_blkg;
- struct bio_issue bi_issue;
+ /* Time that this bio was issued. */
+ u64 issue_time_ns;
#ifdef CONFIG_BLK_CGROUP_IOCOST
u64 bi_iocost_cost;
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fe1797bbec42..cc221318712e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -999,6 +999,8 @@ extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
void submit_bio_noacct(struct bio *bio);
struct bio *bio_split_to_limits(struct bio *bio);
+struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
+ struct bio_set *bs);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index cc700925b802..84826dc0a326 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -285,6 +285,7 @@ struct bpf_map_owner {
bool xdp_has_frags;
u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
const struct btf_type *attach_func_proto;
+ enum bpf_attach_type expected_attach_type;
};
struct bpf_map {
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 94defa405c85..fe9a841fdf0c 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -875,13 +875,15 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
#define verifier_bug_if(cond, env, fmt, args...) \
({ \
bool __cond = (cond); \
- if (unlikely(__cond)) { \
- BPF_WARN_ONCE(1, "verifier bug: " fmt "(" #cond ")\n", ##args); \
- bpf_log(&env->log, "verifier bug: " fmt "(" #cond ")\n", ##args); \
- } \
+ if (unlikely(__cond)) \
+ verifier_bug(env, fmt " (" #cond ")", ##args); \
(__cond); \
})
-#define verifier_bug(env, fmt, args...) verifier_bug_if(1, env, fmt, ##args)
+#define verifier_bug(env, fmt, args...) \
+ ({ \
+ BPF_WARN_ONCE(1, "verifier bug: " fmt "\n", ##args); \
+ bpf_log(&env->log, "verifier bug: " fmt "\n", ##args); \
+ })
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 9eda6b113f9b..f06976ffb63f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -86,7 +86,7 @@
* as to avoid issues such as the compiler inlining or eliding either a static
* kfunc, or a global kfunc in an LTO build.
*/
-#define __bpf_kfunc __used __retain noinline
+#define __bpf_kfunc __used __retain __noclone noinline
#define __bpf_kfunc_start_defs() \
__diag_push(); \
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 4ac65c68bbf4..bb49080ec8f9 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -6,6 +6,7 @@
#ifndef _LINUX_CORESIGHT_H
#define _LINUX_CORESIGHT_H
+#include <linux/acpi.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -480,26 +481,24 @@ static inline bool is_coresight_device(void __iomem *base)
* Returns:
*
* clk - Clock is found and enabled
- * NULL - clock is not found
+ * NULL - Clock is controlled by firmware (ACPI device only) or when managed
+ * by the AMBA bus driver instead
* ERROR - Clock is found but failed to enable
*/
static inline struct clk *coresight_get_enable_apb_pclk(struct device *dev)
{
- struct clk *pclk;
- int ret;
-
- pclk = clk_get(dev, "apb_pclk");
- if (IS_ERR(pclk)) {
- pclk = clk_get(dev, "apb");
- if (IS_ERR(pclk))
- return NULL;
- }
+ struct clk *pclk = NULL;
+
+ /* Firmware controls clocks for an ACPI device. */
+ if (has_acpi_companion(dev))
+ return NULL;
- ret = clk_prepare_enable(pclk);
- if (ret) {
- clk_put(pclk);
- return ERR_PTR(ret);
+ if (!dev_is_amba(dev)) {
+ pclk = devm_clk_get_optional_enabled(dev, "apb_pclk");
+ if (!pclk)
+ pclk = devm_clk_get_optional_enabled(dev, "apb");
}
+
return pclk;
}
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 6de7c05d6bd8..99efe2b9b4ea 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -594,9 +594,9 @@ struct dma_descriptor_metadata_ops {
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
+ * descriptor pending. To be pushed on .issue_pending() call
* @desc_free: driver's callback function to free a resusable descriptor
* after completion
- * descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
* @callback_result: error result from a DMA transaction
* @callback_param: general parameter to pass to the callback routine
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 2cc4f1e4ea96..c32425b5d011 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -364,6 +364,7 @@ struct hid_item {
* | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
* | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
* | @HID_QUIRK_IGNORE_SPECIAL_DRIVER
+ * | @HID_QUIRK_POWER_ON_AFTER_BACKLIGHT
* | @HID_QUIRK_FULLSPEED_INTERVAL:
* | @HID_QUIRK_NO_INIT_REPORTS:
* | @HID_QUIRK_NO_IGNORE:
@@ -391,6 +392,7 @@ struct hid_item {
#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
#define HID_QUIRK_NOINVERT BIT(21)
#define HID_QUIRK_IGNORE_SPECIAL_DRIVER BIT(22)
+#define HID_QUIRK_POWER_ON_AFTER_BACKLIGHT BIT(23)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 1d6b606a81ef..890e1371f5d4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -669,6 +669,8 @@ extern int irq_chip_set_parent_state(struct irq_data *data,
extern int irq_chip_get_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state);
+extern void irq_chip_shutdown_parent(struct irq_data *data);
+extern unsigned int irq_chip_startup_parent(struct irq_data *data);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 785173aa0739..25921fbec685 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1604,6 +1604,7 @@ extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
+void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk);
#if BITS_PER_LONG < 64
static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
@@ -1661,6 +1662,11 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg);
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
static inline void mem_cgroup_sk_free(struct sock *sk) { };
+
+static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
+{
+}
+
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
return false;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1ae97a0b8ec7..c6794d0e24eb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -296,7 +296,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
-#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
+#define VM_MERGEABLE BIT(31) /* KSM may merge identical pages */
#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index fe3d6d98f8da..673cbdf43453 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -77,7 +77,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43439 0xa9af
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_43751 0xaae7
-#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
+#define SDIO_DEVICE_ID_BROADCOM_43752 0xaae8
#define SDIO_VENDOR_ID_CYPRESS 0x04b4
#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439 0xbd3d
diff --git a/include/linux/msi.h b/include/linux/msi.h
index e5e86a8529fb..3111ba95fbde 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -568,6 +568,8 @@ enum {
MSI_FLAG_PARENT_PM_DEV = (1 << 8),
/* Support for parent mask/unmask */
MSI_FLAG_PCI_MSI_MASK_PARENT = (1 << 9),
+ /* Support for parent startup/shutdown */
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT = (1 << 10),
/* Mask for the generic functionality */
MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
index 5c7c92659e73..7ca2715edccc 100644
--- a/include/linux/nfslocalio.h
+++ b/include/linux/nfslocalio.h
@@ -65,6 +65,8 @@ struct nfsd_localio_operations {
struct net *(*nfsd_file_put_local)(struct nfsd_file __rcu **);
struct nfsd_file *(*nfsd_file_get_local)(struct nfsd_file *);
struct file *(*nfsd_file_file)(struct nfsd_file *);
+ void (*nfsd_file_dio_alignment)(struct nfsd_file *,
+ u32 *, u32 *, u32 *);
} ____cacheline_aligned;
extern void nfsd_localio_ops_init(void);
diff --git a/include/linux/once.h b/include/linux/once.h
index 30346fcdc799..449a0e34ad5a 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -46,7 +46,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
#define DO_ONCE(func, ...) \
({ \
bool ___ret = false; \
- static bool __section(".data..once") ___done = false; \
+ static bool __section(".data..do_once") ___done = false; \
static DEFINE_STATIC_KEY_TRUE(___once_key); \
if (static_branch_unlikely(&___once_key)) { \
unsigned long ___flags; \
@@ -64,7 +64,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
#define DO_ONCE_SLEEPABLE(func, ...) \
({ \
bool ___ret = false; \
- static bool __section(".data..once") ___done = false; \
+ static bool __section(".data..do_once") ___done = false; \
static DEFINE_STATIC_KEY_TRUE(___once_key); \
if (static_branch_unlikely(&___once_key)) { \
___ret = __do_once_sleepable_start(&___done); \
diff --git a/include/linux/phy.h b/include/linux/phy.h
index bb45787d8684..04553419adc3 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1273,9 +1273,13 @@ struct phy_driver {
#define to_phy_driver(d) container_of_const(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
-#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0)
-#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
-#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+#define PHY_ID_MATCH_EXTACT_MASK GENMASK(31, 0)
+#define PHY_ID_MATCH_MODEL_MASK GENMASK(31, 4)
+#define PHY_ID_MATCH_VENDOR_MASK GENMASK(31, 10)
+
+#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_EXTACT_MASK
+#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_MODEL_MASK
+#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_VENDOR_MASK
/**
* phy_id_compare - compare @id1 with @id2 taking account of @mask
@@ -1291,6 +1295,19 @@ static inline bool phy_id_compare(u32 id1, u32 id2, u32 mask)
return !((id1 ^ id2) & mask);
}
+/**
+ * phy_id_compare_vendor - compare @id with @vendor mask
+ * @id: PHY ID
+ * @vendor_mask: PHY Vendor mask
+ *
+ * Return: true if the bits from @id match @vendor using the
+ * generic PHY Vendor mask.
+ */
+static inline bool phy_id_compare_vendor(u32 id, u32 vendor_mask)
+{
+ return phy_id_compare(id, vendor_mask, PHY_ID_MATCH_VENDOR_MASK);
+}
+
/**
* phydev_id_compare - compare @id with the PHY's Clause 22 ID
* @phydev: the PHY device
diff --git a/include/linux/power/max77705_charger.h b/include/linux/power/max77705_charger.h
index fdec9af9c541..a612795577b6 100644
--- a/include/linux/power/max77705_charger.h
+++ b/include/linux/power/max77705_charger.h
@@ -9,6 +9,8 @@
#ifndef __MAX77705_CHARGER_H
#define __MAX77705_CHARGER_H __FILE__
+#include <linux/regmap.h>
+
/* MAX77705_CHG_REG_CHG_INT */
#define MAX77705_BYP_I BIT(0)
#define MAX77705_INP_LIMIT_I BIT(1)
@@ -63,7 +65,6 @@
#define MAX77705_BUCK_SHIFT 2
#define MAX77705_BOOST_SHIFT 3
#define MAX77705_WDTEN_SHIFT 4
-#define MAX77705_MODE_MASK GENMASK(3, 0)
#define MAX77705_CHG_MASK BIT(MAX77705_CHG_SHIFT)
#define MAX77705_UNO_MASK BIT(MAX77705_UNO_SHIFT)
#define MAX77705_OTG_MASK BIT(MAX77705_OTG_SHIFT)
@@ -74,34 +75,19 @@
#define MAX77705_OTG_CTRL (MAX77705_OTG_MASK | MAX77705_BOOST_MASK)
/* MAX77705_CHG_REG_CNFG_01 */
-#define MAX77705_FCHGTIME_SHIFT 0
-#define MAX77705_FCHGTIME_MASK GENMASK(2, 0)
-#define MAX77705_CHG_RSTRT_SHIFT 4
-#define MAX77705_CHG_RSTRT_MASK GENMASK(5, 4)
#define MAX77705_FCHGTIME_DISABLE 0
#define MAX77705_CHG_RSTRT_DISABLE 0x3
-#define MAX77705_PQEN_SHIFT 7
-#define MAX77705_PQEN_MASK BIT(7)
#define MAX77705_CHG_PQEN_DISABLE 0
#define MAX77705_CHG_PQEN_ENABLE 1
/* MAX77705_CHG_REG_CNFG_02 */
-#define MAX77705_OTG_ILIM_SHIFT 6
-#define MAX77705_OTG_ILIM_MASK GENMASK(7, 6)
#define MAX77705_OTG_ILIM_500 0
#define MAX77705_OTG_ILIM_900 1
#define MAX77705_OTG_ILIM_1200 2
#define MAX77705_OTG_ILIM_1500 3
-#define MAX77705_CHG_CC GENMASK(5, 0)
/* MAX77705_CHG_REG_CNFG_03 */
-#define MAX77705_TO_ITH_SHIFT 0
-#define MAX77705_TO_ITH_MASK GENMASK(2, 0)
-#define MAX77705_TO_TIME_SHIFT 3
-#define MAX77705_TO_TIME_MASK GENMASK(5, 3)
-#define MAX77705_SYS_TRACK_DIS_SHIFT 7
-#define MAX77705_SYS_TRACK_DIS_MASK BIT(7)
#define MAX77705_TO_ITH_150MA 0
#define MAX77705_TO_TIME_30M 3
#define MAX77705_SYS_TRACK_ENABLE 0
@@ -110,15 +96,8 @@
/* MAX77705_CHG_REG_CNFG_04 */
#define MAX77705_CHG_MINVSYS_SHIFT 6
#define MAX77705_CHG_MINVSYS_MASK GENMASK(7, 6)
-#define MAX77705_CHG_PRM_SHIFT 0
-#define MAX77705_CHG_PRM_MASK GENMASK(5, 0)
-
-#define MAX77705_CHG_CV_PRM_SHIFT 0
-#define MAX77705_CHG_CV_PRM_MASK GENMASK(5, 0)
/* MAX77705_CHG_REG_CNFG_05 */
-#define MAX77705_REG_B2SOVRC_SHIFT 0
-#define MAX77705_REG_B2SOVRC_MASK GENMASK(3, 0)
#define MAX77705_B2SOVRC_DISABLE 0
#define MAX77705_B2SOVRC_4_5A 6
#define MAX77705_B2SOVRC_4_8A 8
@@ -128,9 +107,8 @@
#define MAX77705_WDTCLR_SHIFT 0
#define MAX77705_WDTCLR_MASK GENMASK(1, 0)
#define MAX77705_WDTCLR 1
-#define MAX77705_CHGPROT_MASK GENMASK(3, 2)
-#define MAX77705_CHGPROT_UNLOCKED GENMASK(3, 2)
-#define MAX77705_SLOWEST_LX_SLOPE GENMASK(6, 5)
+#define MAX77705_CHGPROT_UNLOCKED 3
+#define MAX77705_SLOWEST_LX_SLOPE 3
/* MAX77705_CHG_REG_CNFG_07 */
#define MAX77705_CHG_FMBST 4
@@ -140,36 +118,14 @@
#define MAX77705_REG_FGSRC_MASK BIT(MAX77705_REG_FGSRC_SHIFT)
/* MAX77705_CHG_REG_CNFG_08 */
-#define MAX77705_REG_FSW_SHIFT 0
-#define MAX77705_REG_FSW_MASK GENMASK(1, 0)
#define MAX77705_CHG_FSW_3MHz 0
#define MAX77705_CHG_FSW_2MHz 1
#define MAX77705_CHG_FSW_1_5MHz 2
/* MAX77705_CHG_REG_CNFG_09 */
-#define MAX77705_CHG_CHGIN_LIM_MASK GENMASK(6, 0)
-#define MAX77705_CHG_EN_MASK BIT(7)
#define MAX77705_CHG_DISABLE 0
-#define MAX77705_CHARGER_CHG_CHARGING(_reg) \
- (((_reg) & MAX77705_CHG_EN_MASK) > 1)
-
-
-/* MAX77705_CHG_REG_CNFG_10 */
-#define MAX77705_CHG_WCIN_LIM GENMASK(5, 0)
-
-/* MAX77705_CHG_REG_CNFG_11 */
-#define MAX77705_VBYPSET_SHIFT 0
-#define MAX77705_VBYPSET_MASK GENMASK(6, 0)
/* MAX77705_CHG_REG_CNFG_12 */
-#define MAX77705_CHGINSEL_SHIFT 5
-#define MAX77705_CHGINSEL_MASK BIT(MAX77705_CHGINSEL_SHIFT)
-#define MAX77705_WCINSEL_SHIFT 6
-#define MAX77705_WCINSEL_MASK BIT(MAX77705_WCINSEL_SHIFT)
-#define MAX77705_VCHGIN_REG_MASK GENMASK(4, 3)
-#define MAX77705_WCIN_REG_MASK GENMASK(2, 1)
-#define MAX77705_REG_DISKIP_SHIFT 0
-#define MAX77705_REG_DISKIP_MASK BIT(MAX77705_REG_DISKIP_SHIFT)
/* REG=4.5V, UVLO=4.7V */
#define MAX77705_VCHGIN_4_5 0
/* REG=4.5V, UVLO=4.7V */
@@ -183,9 +139,59 @@
#define MAX77705_CURRENT_CHGIN_MIN 100000
#define MAX77705_CURRENT_CHGIN_MAX 3200000
+enum max77705_field_idx {
+ MAX77705_CHGPROT,
+ MAX77705_CHG_EN,
+ MAX77705_CHG_CC_LIM,
+ MAX77705_CHG_CHGIN_LIM,
+ MAX77705_CHG_CV_PRM,
+ MAX77705_CHG_PQEN,
+ MAX77705_CHG_RSTRT,
+ MAX77705_CHG_WCIN,
+ MAX77705_FCHGTIME,
+ MAX77705_LX_SLOPE,
+ MAX77705_MODE,
+ MAX77705_OTG_ILIM,
+ MAX77705_REG_B2SOVRC,
+ MAX77705_REG_DISKIP,
+ MAX77705_REG_FSW,
+ MAX77705_SYS_TRACK,
+ MAX77705_TO,
+ MAX77705_TO_TIME,
+ MAX77705_VBYPSET,
+ MAX77705_VCHGIN,
+ MAX77705_WCIN,
+ MAX77705_N_REGMAP_FIELDS,
+};
+
+static const struct reg_field max77705_reg_field[MAX77705_N_REGMAP_FIELDS] = {
+ [MAX77705_MODE] = REG_FIELD(MAX77705_CHG_REG_CNFG_00, 0, 3),
+ [MAX77705_FCHGTIME] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 0, 2),
+ [MAX77705_CHG_RSTRT] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 4, 5),
+ [MAX77705_CHG_PQEN] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 7, 7),
+ [MAX77705_CHG_CC_LIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_02, 0, 5),
+ [MAX77705_OTG_ILIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_02, 6, 7),
+ [MAX77705_TO] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 0, 2),
+ [MAX77705_TO_TIME] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 3, 5),
+ [MAX77705_SYS_TRACK] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 7, 7),
+ [MAX77705_CHG_CV_PRM] = REG_FIELD(MAX77705_CHG_REG_CNFG_04, 0, 5),
+ [MAX77705_REG_B2SOVRC] = REG_FIELD(MAX77705_CHG_REG_CNFG_05, 0, 3),
+ [MAX77705_CHGPROT] = REG_FIELD(MAX77705_CHG_REG_CNFG_06, 2, 3),
+ [MAX77705_LX_SLOPE] = REG_FIELD(MAX77705_CHG_REG_CNFG_06, 5, 6),
+ [MAX77705_REG_FSW] = REG_FIELD(MAX77705_CHG_REG_CNFG_08, 0, 1),
+ [MAX77705_CHG_CHGIN_LIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_09, 0, 6),
+ [MAX77705_CHG_EN] = REG_FIELD(MAX77705_CHG_REG_CNFG_09, 7, 7),
+ [MAX77705_CHG_WCIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_10, 0, 5),
+ [MAX77705_VBYPSET] = REG_FIELD(MAX77705_CHG_REG_CNFG_11, 0, 6),
+ [MAX77705_REG_DISKIP] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 0, 0),
+ [MAX77705_WCIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 1, 2),
+ [MAX77705_VCHGIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 3, 4),
+};
+
struct max77705_charger_data {
struct device *dev;
struct regmap *regmap;
+ struct regmap_field *rfield[MAX77705_N_REGMAP_FIELDS];
struct power_supply_battery_info *bat_info;
struct workqueue_struct *wqueue;
struct work_struct chgin_work;
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 5263746b63e8..a3a24e115d44 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -30,11 +30,19 @@ struct sd_flag_debug {
};
extern const struct sd_flag_debug sd_flag_debug[];
+struct sched_domain_topology_level;
+
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{
return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
}
+
+static inline const
+struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu)
+{
+ return cpu_smt_mask(cpu);
+}
#endif
#ifdef CONFIG_SCHED_CLUSTER
@@ -42,6 +50,12 @@ static inline int cpu_cluster_flags(void)
{
return SD_CLUSTER | SD_SHARE_LLC;
}
+
+static inline const
+struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu)
+{
+ return cpu_clustergroup_mask(cpu);
+}
#endif
#ifdef CONFIG_SCHED_MC
@@ -49,8 +63,20 @@ static inline int cpu_core_flags(void)
{
return SD_SHARE_LLC;
}
+
+static inline const
+struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu)
+{
+ return cpu_coregroup_mask(cpu);
+}
#endif
+static inline const
+struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu)
+{
+ return cpu_node_mask(cpu);
+}
+
#ifdef CONFIG_NUMA
static inline int cpu_numa_flags(void)
{
@@ -172,7 +198,7 @@ bool cpus_equal_capacity(int this_cpu, int that_cpu);
bool cpus_share_cache(int this_cpu, int that_cpu);
bool cpus_share_resources(int this_cpu, int that_cpu);
-typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
+typedef const struct cpumask *(*sched_domain_mask_f)(struct sched_domain_topology_level *tl, int cpu);
typedef int (*sched_domain_flags_f)(void);
struct sd_data {
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 33b7fda97d39..6575af39fd10 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -260,7 +260,7 @@ static inline bool topology_is_primary_thread(unsigned int cpu)
#endif
-static inline const struct cpumask *cpu_cpu_mask(int cpu)
+static inline const struct cpumask *cpu_node_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
}
diff --git a/include/net/bonding.h b/include/net/bonding.h
index e06f0d63b2c1..bd56ad976cfb 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -711,6 +711,7 @@ struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
void bond_work_init_all(struct bonding *bond);
+void bond_work_cancel_all(struct bonding *bond);
#ifdef CONFIG_PROC_FS
void bond_create_proc_entry(struct bonding *bond);
diff --git a/include/net/dst.h b/include/net/dst.h
index bab01363bb97..f8aa1239b4db 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -24,7 +24,10 @@
struct sk_buff;
struct dst_entry {
- struct net_device *dev;
+ union {
+ struct net_device *dev;
+ struct net_device __rcu *dev_rcu;
+ };
struct dst_ops *ops;
unsigned long _metrics;
unsigned long expires;
@@ -570,9 +573,12 @@ static inline struct net_device *dst_dev(const struct dst_entry *dst)
static inline struct net_device *dst_dev_rcu(const struct dst_entry *dst)
{
- /* In the future, use rcu_dereference(dst->dev) */
- WARN_ON_ONCE(!rcu_read_lock_held());
- return READ_ONCE(dst->dev);
+ return rcu_dereference(dst->dev_rcu);
+}
+
+static inline struct net *dst_dev_net_rcu(const struct dst_entry *dst)
+{
+ return dev_net_rcu(dst_dev_rcu(dst));
}
static inline struct net_device *skb_dst_dev(const struct sk_buff *skb)
@@ -592,7 +598,7 @@ static inline struct net *skb_dst_dev_net(const struct sk_buff *skb)
static inline struct net *skb_dst_dev_net_rcu(const struct sk_buff *skb)
{
- return dev_net_rcu(skb_dst_dev(skb));
+ return dev_net_rcu(skb_dst_dev_rcu(skb));
}
struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
diff --git a/include/net/ip.h b/include/net/ip.h
index befcba575129..a1624e8db1ab 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -338,6 +338,19 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
} \
}
+#define snmp_get_cpu_field64_batch_cnt(buff64, stats_list, cnt, \
+ mib_statistic, offset) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; i < cnt; i++) \
+ buff64[i] += snmp_get_cpu_field64( \
+ mib_statistic, \
+ c, stats_list[i].entry, \
+ offset); \
+ } \
+}
+
#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
{ \
int i, c; \
@@ -349,6 +362,17 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
} \
}
+#define snmp_get_cpu_field_batch_cnt(buff, stats_list, cnt, mib_statistic) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; i < cnt; i++) \
+ buff[i] += snmp_get_cpu_field( \
+ mib_statistic, \
+ c, stats_list[i].entry); \
+ } \
+}
+
static inline void inet_get_local_port_range(const struct net *net, int *low, int *high)
{
u32 range = READ_ONCE(net->ipv4.ip_local_ports.range);
@@ -467,12 +491,14 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
const struct rtable *rt = dst_rtable(dst);
+ const struct net_device *dev;
unsigned int mtu, res;
struct net *net;
rcu_read_lock();
- net = dev_net_rcu(dst_dev(dst));
+ dev = dst_dev_rcu(dst);
+ net = dev_net_rcu(dev);
if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
ip_mtu_locked(dst) ||
!forwarding) {
@@ -486,7 +512,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
if (mtu)
goto out;
- mtu = READ_ONCE(dst_dev(dst)->mtu);
+ mtu = READ_ONCE(dev->mtu);
if (unlikely(ip_mtu_locked(dst))) {
if (rt->rt_uses_gateway && mtu > 576)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 9255f21818ee..59f48ca3abdf 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -337,7 +337,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
mtu = IPV6_MIN_MTU;
rcu_read_lock();
- idev = __in6_dev_get(dst_dev(dst));
+ idev = __in6_dev_get(dst_dev_rcu(dst));
if (idev)
mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
diff --git a/include/net/route.h b/include/net/route.h
index 7ea840daa775..c916bbe25a77 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -390,7 +390,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
const struct net *net;
rcu_read_lock();
- net = dev_net_rcu(dst_dev(dst));
+ net = dst_dev_net_rcu(dst);
hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
rcu_read_unlock();
}
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ba460b6c0374..8d38565e99fa 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -203,6 +203,14 @@ static inline bool dev_is_expander(enum sas_device_type type)
type == SAS_FANOUT_EXPANDER_DEVICE;
}
+static inline bool dev_parent_is_expander(struct domain_device *dev)
+{
+ if (!dev->parent)
+ return false;
+
+ return dev_is_expander(dev->parent->dev_type);
+}
+
static inline void INIT_SAS_WORK(struct sas_work *sw, void (*fn)(struct work_struct *))
{
INIT_WORK(&sw->work, fn);
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index b8d1e00a7982..2dfeb158e848 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -27,7 +27,8 @@
{ FL_SLEEP, "FL_SLEEP" }, \
{ FL_DOWNGRADE_PENDING, "FL_DOWNGRADE_PENDING" }, \
{ FL_UNLOCK_PENDING, "FL_UNLOCK_PENDING" }, \
- { FL_OFDLCK, "FL_OFDLCK" })
+ { FL_OFDLCK, "FL_OFDLCK" }, \
+ { FL_RECLAIM, "FL_RECLAIM"})
#define show_fl_type(val) \
__print_symbolic(val, \
diff --git a/include/trace/misc/fs.h b/include/trace/misc/fs.h
index 0406ebe2a80a..7ead1c61f0cb 100644
--- a/include/trace/misc/fs.h
+++ b/include/trace/misc/fs.h
@@ -141,3 +141,25 @@
{ ATTR_TIMES_SET, "TIMES_SET" }, \
{ ATTR_TOUCH, "TOUCH"}, \
{ ATTR_DELEG, "DELEG"})
+
+#define show_statx_mask(flags) \
+ __print_flags(flags, "|", \
+ { STATX_TYPE, "TYPE" }, \
+ { STATX_MODE, "MODE" }, \
+ { STATX_NLINK, "NLINK" }, \
+ { STATX_UID, "UID" }, \
+ { STATX_GID, "GID" }, \
+ { STATX_ATIME, "ATIME" }, \
+ { STATX_MTIME, "MTIME" }, \
+ { STATX_CTIME, "CTIME" }, \
+ { STATX_INO, "INO" }, \
+ { STATX_SIZE, "SIZE" }, \
+ { STATX_BLOCKS, "BLOCKS" }, \
+ { STATX_BASIC_STATS, "BASIC_STATS" }, \
+ { STATX_BTIME, "BTIME" }, \
+ { STATX_MNT_ID, "MNT_ID" }, \
+ { STATX_DIOALIGN, "DIOALIGN" }, \
+ { STATX_MNT_ID_UNIQUE, "MNT_ID_UNIQUE" }, \
+ { STATX_SUBVOL, "SUBVOL" }, \
+ { STATX_WRITE_ATOMIC, "WRITE_ATOMIC" }, \
+ { STATX_DIO_READ_ALIGN, "DIO_READ_ALIGN" })
diff --git a/include/uapi/linux/hidraw.h b/include/uapi/linux/hidraw.h
index d5ee269864e0..ebd701b3c18d 100644
--- a/include/uapi/linux/hidraw.h
+++ b/include/uapi/linux/hidraw.h
@@ -48,6 +48,8 @@ struct hidraw_devinfo {
#define HIDIOCGOUTPUT(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x0C, len)
#define HIDIOCREVOKE _IOW('H', 0x0D, int) /* Revoke device access */
+#define HIDIOCTL_LAST _IOC_NR(HIDIOCREVOKE)
+
#define HIDRAW_FIRST_MINOR 0
#define HIDRAW_MAX_DEVICES 64
/* number of reports to buffer */
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 1d3943777584..a3fa98540d18 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -963,6 +963,7 @@ enum ufshcd_mcq_opr {
* @ufs_rtc_update_work: A work for UFS RTC periodic update
* @pm_qos_req: PM QoS request handle
* @pm_qos_enabled: flag to check if pm qos is enabled
+ * @pm_qos_mutex: synchronizes PM QoS request and status updates
* @critical_health_count: count of critical health exceptions
* @dev_lvl_exception_count: count of device level exceptions since last reset
* @dev_lvl_exception_id: vendor specific information about the
@@ -1136,6 +1137,8 @@ struct ufs_hba {
struct delayed_work ufs_rtc_update_work;
struct pm_qos_request pm_qos_req;
bool pm_qos_enabled;
+ /* synchronizes PM QoS request and status updates */
+ struct mutex pm_qos_mutex;
int critical_health_count;
atomic_t dev_lvl_exception_count;
diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h
index c50d152e7b3e..9ac161866653 100644
--- a/include/vdso/gettime.h
+++ b/include/vdso/gettime.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
struct __kernel_timespec;
+struct __kernel_old_timeval;
struct timezone;
#if !defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
diff --git a/init/Kconfig b/init/Kconfig
index ecddb94db8dc..87c868f86a06 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -102,7 +102,7 @@ config CC_HAS_ASM_GOTO_OUTPUT
# Detect basic support
depends on $(success,echo 'int foo(int x) { asm goto ("": "=r"(x) ::: bar); return x; bar: return 0; }' | $(CC) -x c - -c -o /dev/null)
# Detect clang (< v17) scoped label issues
- depends on $(success,echo 'void b(void **);void* c(void);int f(void){{asm goto("jmp %l0"::::l0);return 0;l0:return 1;}void *x __attribute__((cleanup(b)))=c();{asm goto("jmp %l0"::::l1);return 2;l1:return 3;}}' | $(CC) -x c - -c -o /dev/null)
+ depends on $(success,echo 'void b(void **);void* c(void);int f(void){{asm goto(""::::l0);return 0;l0:return 1;}void *x __attribute__((cleanup(b)))=c();{asm goto(""::::l1);return 2;l1:return 3;}}' | $(CC) -x c - -c -o /dev/null)
config CC_HAS_ASM_GOTO_TIED_OUTPUT
depends on CC_HAS_ASM_GOTO_OUTPUT
@@ -1504,6 +1504,7 @@ config BOOT_CONFIG_EMBED_FILE
config INITRAMFS_PRESERVE_MTIME
bool "Preserve cpio archive mtimes in initramfs"
+ depends on BLK_DEV_INITRD
default y
help
Each entry in an initramfs cpio archive carries an mtime value. When
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
index e07a94694397..3101ad8ec0cf 100644
--- a/io_uring/waitid.c
+++ b/io_uring/waitid.c
@@ -232,13 +232,14 @@ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
if (!pid_child_should_wake(wo, p))
return 0;
+ list_del_init(&wait->entry);
+
/* cancel is in progress */
if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
return 1;
req->io_task_work.func = io_waitid_cb;
io_req_task_work_add(req);
- list_del_init(&wait->entry);
return 1;
}
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index e5ff49f3425e..643a69f9ffe2 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -1154,12 +1154,16 @@ io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
end = start + frag_iter->len;
if (offset < end) {
+ size_t count;
+
copy = end - offset;
if (copy > len)
copy = len;
off = offset - start;
+ count = desc->count;
ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
+ desc->count = count;
if (ret < 0)
goto out;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index e4568d44e827..f6dd071f5e38 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2393,6 +2393,7 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
map->owner->type = prog_type;
map->owner->jited = fp->jited;
map->owner->xdp_has_frags = aux->xdp_has_frags;
+ map->owner->expected_attach_type = fp->expected_attach_type;
map->owner->attach_func_proto = aux->attach_func_proto;
for_each_cgroup_storage_type(i) {
map->owner->storage_cookie[i] =
@@ -2404,6 +2405,10 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
ret = map->owner->type == prog_type &&
map->owner->jited == fp->jited &&
map->owner->xdp_has_frags == aux->xdp_has_frags;
+ if (ret &&
+ map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
+ map->owner->expected_attach_type != fp->expected_attach_type)
+ ret = false;
for_each_cgroup_storage_type(i) {
if (!ret)
break;
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 8af62cb243d9..9c750a6a895b 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -774,11 +774,9 @@ int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs)
{
int nest_level;
- preempt_disable();
nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
this_cpu_dec(bpf_bprintf_nest_level);
- preempt_enable();
return -EBUSY;
}
*bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
@@ -791,7 +789,6 @@ void bpf_put_buffers(void)
if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
return;
this_cpu_dec(bpf_bprintf_nest_level);
- preempt_enable();
}
void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9fb1f957a093..ed1457c27340 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1946,9 +1946,24 @@ static int maybe_exit_scc(struct bpf_verifier_env *env, struct bpf_verifier_stat
return 0;
visit = scc_visit_lookup(env, callchain);
if (!visit) {
- verifier_bug(env, "scc exit: no visit info for call chain %s",
- format_callchain(env, callchain));
- return -EFAULT;
+ /*
+ * If path traversal stops inside an SCC, corresponding bpf_scc_visit
+ * must exist for non-speculative paths. For non-speculative paths
+ * traversal stops when:
+ * a. Verification error is found, maybe_exit_scc() is not called.
+ * b. Top level BPF_EXIT is reached. Top level BPF_EXIT is not a member
+ * of any SCC.
+ * c. A checkpoint is reached and matched. Checkpoints are created by
+ * is_state_visited(), which calls maybe_enter_scc(), which allocates
+ * bpf_scc_visit instances for checkpoints within SCCs.
+ * (c) is the only case that can reach this point.
+ */
+ if (!st->speculative) {
+ verifier_bug(env, "scc exit: no visit info for call chain %s",
+ format_callchain(env, callchain));
+ return -EFAULT;
+ }
+ return 0;
}
if (visit->entry_state != st)
return 0;
@@ -15577,7 +15592,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
/* check dest operand */
- if (opcode == BPF_NEG) {
+ if (opcode == BPF_NEG &&
+ regs[insn->dst_reg].type == SCALAR_VALUE) {
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
err = err ?: adjust_scalar_min_max_vals(env, insn,
®s[insn->dst_reg],
@@ -15739,7 +15755,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
} else { /* all other ALU ops: and, sub, xor, add, ... */
if (BPF_SRC(insn->code) == BPF_X) {
- if (insn->imm != 0 || insn->off > 1 ||
+ if (insn->imm != 0 || (insn->off != 0 && insn->off != 1) ||
(insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
@@ -15749,7 +15765,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
} else {
- if (insn->src_reg != BPF_REG_0 || insn->off > 1 ||
+ if (insn->src_reg != BPF_REG_0 || (insn->off != 0 && insn->off != 1) ||
(insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 27adb04df675..fef93032fe7e 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1716,6 +1716,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
xcpus = tmp->delmask;
if (compute_effective_exclusive_cpumask(cs, xcpus, NULL))
WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
+ new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
/*
* Enabling partition root is not allowed if its
@@ -1748,7 +1749,6 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
deleting = true;
subparts_delta++;
- new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
} else if (cmd == partcmd_disable) {
/*
* May need to add cpus back to parent's effective_cpus
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 7ca1940607bd..4b97d16f731c 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -121,7 +121,7 @@ struct xol_area {
static void uprobe_warn(struct task_struct *t, const char *msg)
{
- pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg);
+ pr_warn("uprobe: %s:%d failed to %s\n", t->comm, t->pid, msg);
}
/*
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 1da5e9d9da71..a75df2bb9db6 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -147,7 +147,9 @@ config GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD
config IRQ_KUNIT_TEST
bool "KUnit tests for IRQ management APIs" if !KUNIT_ALL_TESTS
depends on KUNIT=y
+ depends on SPARSE_IRQ
default KUNIT_ALL_TESTS
+ select IRQ_DOMAIN
imply SMP
help
This option enables KUnit tests for the IRQ subsystem API. These are
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 0d0276378c70..3ffa0d80ddd1 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1259,6 +1259,43 @@ int irq_chip_get_parent_state(struct irq_data *data,
}
EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
+/**
+ * irq_chip_shutdown_parent - Shutdown the parent interrupt
+ * @data: Pointer to interrupt specific data
+ *
+ * Invokes the irq_shutdown() callback of the parent if available or falls
+ * back to irq_chip_disable_parent().
+ */
+void irq_chip_shutdown_parent(struct irq_data *data)
+{
+ struct irq_data *parent = data->parent_data;
+
+ if (parent->chip->irq_shutdown)
+ parent->chip->irq_shutdown(parent);
+ else
+ irq_chip_disable_parent(data);
+}
+EXPORT_SYMBOL_GPL(irq_chip_shutdown_parent);
+
+/**
+ * irq_chip_startup_parent - Startup the parent interrupt
+ * @data: Pointer to interrupt specific data
+ *
+ * Invokes the irq_startup() callback of the parent if available or falls
+ * back to irq_chip_enable_parent().
+ */
+unsigned int irq_chip_startup_parent(struct irq_data *data)
+{
+ struct irq_data *parent = data->parent_data;
+
+ if (parent->chip->irq_startup)
+ return parent->chip->irq_startup(parent);
+
+ irq_chip_enable_parent(data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_chip_startup_parent);
+
/**
* irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
* NULL)
diff --git a/kernel/irq/irq_test.c b/kernel/irq/irq_test.c
index a75abebed7f2..f71f46fdcfd5 100644
--- a/kernel/irq/irq_test.c
+++ b/kernel/irq/irq_test.c
@@ -54,6 +54,9 @@ static void irq_disable_depth_test(struct kunit *test)
desc = irq_to_desc(virq);
KUNIT_ASSERT_PTR_NE(test, desc, NULL);
+ /* On some architectures, IRQs are NOREQUEST | NOPROBE by default. */
+ irq_settings_clr_norequest(desc);
+
ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -81,6 +84,9 @@ static void irq_free_disabled_test(struct kunit *test)
desc = irq_to_desc(virq);
KUNIT_ASSERT_PTR_NE(test, desc, NULL);
+ /* On some architectures, IRQs are NOREQUEST | NOPROBE by default. */
+ irq_settings_clr_norequest(desc);
+
ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -120,6 +126,9 @@ static void irq_shutdown_depth_test(struct kunit *test)
desc = irq_to_desc(virq);
KUNIT_ASSERT_PTR_NE(test, desc, NULL);
+ /* On some architectures, IRQs are NOREQUEST | NOPROBE by default. */
+ irq_settings_clr_norequest(desc);
+
data = irq_desc_get_irq_data(desc);
KUNIT_ASSERT_PTR_NE(test, data, NULL);
@@ -169,6 +178,8 @@ static void irq_cpuhotplug_test(struct kunit *test)
kunit_skip(test, "requires more than 1 CPU for CPU hotplug");
if (!cpu_is_hotpluggable(1))
kunit_skip(test, "CPU 1 must be hotpluggable");
+ if (!cpu_online(1))
+ kunit_skip(test, "CPU 1 must be online");
cpumask_copy(&affinity.mask, cpumask_of(1));
@@ -180,6 +191,9 @@ static void irq_cpuhotplug_test(struct kunit *test)
desc = irq_to_desc(virq);
KUNIT_ASSERT_PTR_NE(test, desc, NULL);
+ /* On some architectures, IRQs are NOREQUEST | NOPROBE by default. */
+ irq_settings_clr_norequest(desc);
+
data = irq_desc_get_irq_data(desc);
KUNIT_ASSERT_PTR_NE(test, data, NULL);
@@ -196,13 +210,9 @@ static void irq_cpuhotplug_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, desc->depth, 1);
KUNIT_EXPECT_EQ(test, remove_cpu(1), 0);
- KUNIT_EXPECT_FALSE(test, irqd_is_activated(data));
- KUNIT_EXPECT_FALSE(test, irqd_is_started(data));
KUNIT_EXPECT_GE(test, desc->depth, 1);
KUNIT_EXPECT_EQ(test, add_cpu(1), 0);
- KUNIT_EXPECT_FALSE(test, irqd_is_activated(data));
- KUNIT_EXPECT_FALSE(test, irqd_is_started(data));
KUNIT_EXPECT_EQ(test, desc->depth, 1);
enable_irq(virq);
diff --git a/kernel/pid.c b/kernel/pid.c
index c45a28c16cd2..d94ce0250501 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -680,7 +680,7 @@ static int pid_table_root_permissions(struct ctl_table_header *head,
container_of(head->set, struct pid_namespace, set);
int mode = table->mode;
- if (ns_capable(pidns->user_ns, CAP_SYS_ADMIN) ||
+ if (ns_capable_noaudit(pidns->user_ns, CAP_SYS_ADMIN) ||
uid_eq(current_euid(), make_kuid(pidns->user_ns, 0)))
mode = (mode & S_IRWXU) >> 6;
else if (in_egroup_p(make_kgid(pidns->user_ns, 0)))
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 6e9fe2ce1075..e3b64a5e0ec7 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -176,10 +176,9 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
{
unsigned long cookie;
- preempt_disable(); // Needed for PREEMPT_LAZY
+ lockdep_assert_preemption_disabled(); // Needed for PREEMPT_LAZY
cookie = get_state_synchronize_srcu(ssp);
if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) {
- preempt_enable();
return;
}
WRITE_ONCE(ssp->srcu_idx_max, cookie);
@@ -189,7 +188,6 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
else if (list_empty(&ssp->srcu_work.entry))
list_add(&ssp->srcu_work.entry, &srcu_boot_list);
}
- preempt_enable();
}
/*
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 6e2f54169e66..36d4f9f06351 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1591,7 +1591,6 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
enum numa_topology_type sched_numa_topology_type;
static int sched_domains_numa_levels;
-static int sched_domains_curr_level;
int sched_max_numa_distance;
static int *sched_domains_numa_distance;
@@ -1632,14 +1631,7 @@ sd_init(struct sched_domain_topology_level *tl,
int sd_id, sd_weight, sd_flags = 0;
struct cpumask *sd_span;
-#ifdef CONFIG_NUMA
- /*
- * Ugly hack to pass state to sd_numa_mask()...
- */
- sched_domains_curr_level = tl->numa_level;
-#endif
-
- sd_weight = cpumask_weight(tl->mask(cpu));
+ sd_weight = cpumask_weight(tl->mask(tl, cpu));
if (tl->sd_flags)
sd_flags = (*tl->sd_flags)();
@@ -1677,7 +1669,7 @@ sd_init(struct sched_domain_topology_level *tl,
};
sd_span = sched_domain_span(sd);
- cpumask_and(sd_span, cpu_map, tl->mask(cpu));
+ cpumask_and(sd_span, cpu_map, tl->mask(tl, cpu));
sd_id = cpumask_first(sd_span);
sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
@@ -1737,17 +1729,17 @@ sd_init(struct sched_domain_topology_level *tl,
*/
static struct sched_domain_topology_level default_topology[] = {
#ifdef CONFIG_SCHED_SMT
- SDTL_INIT(cpu_smt_mask, cpu_smt_flags, SMT),
+ SDTL_INIT(tl_smt_mask, cpu_smt_flags, SMT),
#endif
#ifdef CONFIG_SCHED_CLUSTER
- SDTL_INIT(cpu_clustergroup_mask, cpu_cluster_flags, CLS),
+ SDTL_INIT(tl_cls_mask, cpu_cluster_flags, CLS),
#endif
#ifdef CONFIG_SCHED_MC
- SDTL_INIT(cpu_coregroup_mask, cpu_core_flags, MC),
+ SDTL_INIT(tl_mc_mask, cpu_core_flags, MC),
#endif
- SDTL_INIT(cpu_cpu_mask, NULL, PKG),
+ SDTL_INIT(tl_pkg_mask, NULL, PKG),
{ NULL, },
};
@@ -1769,9 +1761,9 @@ void __init set_sched_topology(struct sched_domain_topology_level *tl)
#ifdef CONFIG_NUMA
-static const struct cpumask *sd_numa_mask(int cpu)
+static const struct cpumask *sd_numa_mask(struct sched_domain_topology_level *tl, int cpu)
{
- return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
+ return sched_domains_numa_masks[tl->numa_level][cpu_to_node(cpu)];
}
static void sched_numa_warn(const char *str)
@@ -2413,7 +2405,7 @@ static bool topology_span_sane(const struct cpumask *cpu_map)
* breaks the linking done for an earlier span.
*/
for_each_cpu(cpu, cpu_map) {
- const struct cpumask *tl_cpu_mask = tl->mask(cpu);
+ const struct cpumask *tl_cpu_mask = tl->mask(tl, cpu);
int id;
/* lowest bit set in this mask is used as a unique id */
@@ -2421,7 +2413,7 @@ static bool topology_span_sane(const struct cpumask *cpu_map)
if (cpumask_test_cpu(id, id_seen)) {
/* First CPU has already been seen, ensure identical spans */
- if (!cpumask_equal(tl->mask(id), tl_cpu_mask))
+ if (!cpumask_equal(tl->mask(tl, id), tl_cpu_mask))
return false;
} else {
/* First CPU hasn't been seen before, ensure it's a completely new span */
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 41aa761c7738..3bbfba30a777 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -1139,7 +1139,7 @@ static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd, struct seccomp_kn
static bool should_sleep_killable(struct seccomp_filter *match,
struct seccomp_knotif *n)
{
- return match->wait_killable_recv && n->state == SECCOMP_NOTIFY_SENT;
+ return match->wait_killable_recv && n->state >= SECCOMP_NOTIFY_SENT;
}
static int seccomp_do_user_notification(int this_syscall,
@@ -1186,13 +1186,11 @@ static int seccomp_do_user_notification(int this_syscall,
if (err != 0) {
/*
- * Check to see if the notifcation got picked up and
- * whether we should switch to wait killable.
+ * Check to see whether we should switch to wait
+ * killable. Only return the interrupted error if not.
*/
- if (!wait_killable && should_sleep_killable(match, &n))
- continue;
-
- goto interrupted;
+ if (!(!wait_killable && should_sleep_killable(match, &n)))
+ goto interrupted;
}
addfd = list_first_entry_or_null(&n.addfd,
diff --git a/kernel/smp.c b/kernel/smp.c
index 56f83aa58ec8..02f52291fae4 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -884,16 +884,15 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
- * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
- * (atomically) until function has completed on other CPUs. If
- * %SCF_RUN_LOCAL is set, the function will also be run locally
- * if the local CPU is set in the @cpumask.
- *
- * If @wait is true, then returns once @func has returned.
+ * @wait: If true, wait (atomically) until function has completed
+ * on other CPUs.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. Preemption
* must be disabled when calling this function.
+ *
+ * @func is not called on the local CPU even if @mask contains it. Consider
+ * using on_each_cpu_cond_mask() instead if this is not desirable.
*/
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait)
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index f3e831f62906..a59bc75ab7c5 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -633,7 +633,7 @@ void tick_offline_cpu(unsigned int cpu)
raw_spin_lock(&clockevents_lock);
tick_broadcast_offline(cpu);
- tick_shutdown(cpu);
+ tick_shutdown();
/*
* Unregister the clock event devices which were
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 9a3859443c04..7e33d3f2e889 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -411,24 +411,18 @@ int tick_cpu_dying(unsigned int dying_cpu)
}
/*
- * Shutdown an event device on a given cpu:
+ * Shutdown an event device on the outgoing CPU:
*
- * This is called on a life CPU, when a CPU is dead. So we cannot
- * access the hardware device itself.
- * We just set the mode and remove it from the lists.
+ * Called by the dying CPU during teardown, with clockevents_lock held
+ * and interrupts disabled.
*/
-void tick_shutdown(unsigned int cpu)
+void tick_shutdown(void)
{
- struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
struct clock_event_device *dev = td->evtdev;
td->mode = TICKDEV_MODE_PERIODIC;
if (dev) {
- /*
- * Prevent that the clock events layer tries to call
- * the set mode function!
- */
- clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
clockevents_exchange_device(dev, NULL);
dev->event_handler = clockevents_handle_noop;
td->evtdev = NULL;
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index faac36de35b9..4e4f7bbe2a64 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -26,7 +26,7 @@ extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
extern void tick_handle_periodic(struct clock_event_device *dev);
extern void tick_check_new_device(struct clock_event_device *dev);
extern void tick_offline_cpu(unsigned int cpu);
-extern void tick_shutdown(unsigned int cpu);
+extern void tick_shutdown(void);
extern void tick_suspend(void);
extern void tick_resume(void);
extern bool tick_check_replacement(struct clock_event_device *curdev,
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 3ae52978cae6..606007c387c5 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2728,20 +2728,25 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
struct pt_regs *regs;
int err;
+ /*
+ * graph tracer framework ensures we won't migrate, so there is no need
+ * to use migrate_disable for bpf_prog_run again. The check here just for
+ * __this_cpu_inc_return.
+ */
+ cant_sleep();
+
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
bpf_prog_inc_misses_counter(link->link.prog);
err = 1;
goto out;
}
- migrate_disable();
rcu_read_lock();
regs = ftrace_partial_regs(fregs, bpf_kprobe_multi_pt_regs_ptr());
old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
err = bpf_prog_run(link->link.prog, regs);
bpf_reset_run_ctx(old_run_ctx);
rcu_read_unlock();
- migrate_enable();
out:
__this_cpu_dec(bpf_prog_active);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b3c94fbaf002..eb256378e65b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4791,12 +4791,6 @@ int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
return single_release(inode, filp);
}
-static int tracing_mark_open(struct inode *inode, struct file *filp)
-{
- stream_open(inode, filp);
- return tracing_open_generic_tr(inode, filp);
-}
-
static int tracing_release(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
@@ -7163,7 +7157,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
#define TRACE_MARKER_MAX_SIZE 4096
-static ssize_t write_marker_to_buffer(struct trace_array *tr, const char __user *ubuf,
+static ssize_t write_marker_to_buffer(struct trace_array *tr, const char *buf,
size_t cnt, unsigned long ip)
{
struct ring_buffer_event *event;
@@ -7173,20 +7167,11 @@ static ssize_t write_marker_to_buffer(struct trace_array *tr, const char __user
int meta_size;
ssize_t written;
size_t size;
- int len;
-
-/* Used in tracing_mark_raw_write() as well */
-#define FAULTED_STR "<faulted>"
-#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */
again:
size = cnt + meta_size;
- /* If less than "<faulted>", then make sure we can still add that */
- if (cnt < FAULTED_SIZE)
- size += FAULTED_SIZE - cnt;
-
buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
tracing_gen_ctx());
@@ -7196,9 +7181,6 @@ static ssize_t write_marker_to_buffer(struct trace_array *tr, const char __user
* make it smaller and try again.
*/
if (size > ring_buffer_max_event_size(buffer)) {
- /* cnt < FAULTED size should never be bigger than max */
- if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
- return -EBADF;
cnt = ring_buffer_max_event_size(buffer) - meta_size;
/* The above should only happen once */
if (WARN_ON_ONCE(cnt + meta_size == size))
@@ -7212,14 +7194,8 @@ static ssize_t write_marker_to_buffer(struct trace_array *tr, const char __user
entry = ring_buffer_event_data(event);
entry->ip = ip;
-
- len = copy_from_user_nofault(&entry->buf, ubuf, cnt);
- if (len) {
- memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
- cnt = FAULTED_SIZE;
- written = -EFAULT;
- } else
- written = cnt;
+ memcpy(&entry->buf, buf, cnt);
+ written = cnt;
if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
/* do not add \n before testing triggers, but add \0 */
@@ -7243,6 +7219,169 @@ static ssize_t write_marker_to_buffer(struct trace_array *tr, const char __user
return written;
}
+struct trace_user_buf {
+ char *buf;
+};
+
+struct trace_user_buf_info {
+ struct trace_user_buf __percpu *tbuf;
+ int ref;
+};
+
+
+static DEFINE_MUTEX(trace_user_buffer_mutex);
+static struct trace_user_buf_info *trace_user_buffer;
+
+static void trace_user_fault_buffer_free(struct trace_user_buf_info *tinfo)
+{
+ char *buf;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ buf = per_cpu_ptr(tinfo->tbuf, cpu)->buf;
+ kfree(buf);
+ }
+ free_percpu(tinfo->tbuf);
+ kfree(tinfo);
+}
+
+static int trace_user_fault_buffer_enable(void)
+{
+ struct trace_user_buf_info *tinfo;
+ char *buf;
+ int cpu;
+
+ guard(mutex)(&trace_user_buffer_mutex);
+
+ if (trace_user_buffer) {
+ trace_user_buffer->ref++;
+ return 0;
+ }
+
+ tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+ if (!tinfo)
+ return -ENOMEM;
+
+ tinfo->tbuf = alloc_percpu(struct trace_user_buf);
+ if (!tinfo->tbuf) {
+ kfree(tinfo);
+ return -ENOMEM;
+ }
+
+ tinfo->ref = 1;
+
+ /* Clear each buffer in case of error */
+ for_each_possible_cpu(cpu) {
+ per_cpu_ptr(tinfo->tbuf, cpu)->buf = NULL;
+ }
+
+ for_each_possible_cpu(cpu) {
+ buf = kmalloc_node(TRACE_MARKER_MAX_SIZE, GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!buf) {
+ trace_user_fault_buffer_free(tinfo);
+ return -ENOMEM;
+ }
+ per_cpu_ptr(tinfo->tbuf, cpu)->buf = buf;
+ }
+
+ trace_user_buffer = tinfo;
+
+ return 0;
+}
+
+static void trace_user_fault_buffer_disable(void)
+{
+ struct trace_user_buf_info *tinfo;
+
+ guard(mutex)(&trace_user_buffer_mutex);
+
+ tinfo = trace_user_buffer;
+
+ if (WARN_ON_ONCE(!tinfo))
+ return;
+
+ if (--tinfo->ref)
+ return;
+
+ trace_user_fault_buffer_free(tinfo);
+ trace_user_buffer = NULL;
+}
+
+/* Must be called with preemption disabled */
+static char *trace_user_fault_read(struct trace_user_buf_info *tinfo,
+ const char __user *ptr, size_t size,
+ size_t *read_size)
+{
+ int cpu = smp_processor_id();
+ char *buffer = per_cpu_ptr(tinfo->tbuf, cpu)->buf;
+ unsigned int cnt;
+ int trys = 0;
+ int ret;
+
+ if (size > TRACE_MARKER_MAX_SIZE)
+ size = TRACE_MARKER_MAX_SIZE;
+ *read_size = 0;
+
+ /*
+ * This acts similar to a seqcount. The per CPU context switches are
+ * recorded, migration is disabled and preemption is enabled. The
+ * read of the user space memory is copied into the per CPU buffer.
+ * Preemption is disabled again, and if the per CPU context switches count
+ * is still the same, it means the buffer has not been corrupted.
+ * If the count is different, it is assumed the buffer is corrupted
+ * and reading must be tried again.
+ */
+
+ do {
+ /*
+ * If for some reason, copy_from_user() always causes a context
+ * switch, this would then cause an infinite loop.
+ * If this task is preempted by another user space task, it
+ * will cause this task to try again. But just in case something
+ * changes where the copying from user space causes another task
+ * to run, prevent this from going into an infinite loop.
+ * 100 tries should be plenty.
+ */
+ if (WARN_ONCE(trys++ > 100, "Error: Too many tries to read user space"))
+ return NULL;
+
+ /* Read the current CPU context switch counter */
+ cnt = nr_context_switches_cpu(cpu);
+
+ /*
+ * Preemption is going to be enabled, but this task must
+ * remain on this CPU.
+ */
+ migrate_disable();
+
+ /*
+ * Now preemption is being enabed and another task can come in
+ * and use the same buffer and corrupt our data.
+ */
+ preempt_enable_notrace();
+
+ ret = __copy_from_user(buffer, ptr, size);
+
+ preempt_disable_notrace();
+ migrate_enable();
+
+ /* if it faulted, no need to test if the buffer was corrupted */
+ if (ret)
+ return NULL;
+
+ /*
+ * Preemption is disabled again, now check the per CPU context
+ * switch counter. If it doesn't match, then another user space
+ * process may have schedule in and corrupted our buffer. In that
+ * case the copying must be retried.
+ */
+ } while (nr_context_switches_cpu(cpu) != cnt);
+
+ *read_size = size;
+ return buffer;
+}
+
static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
@@ -7250,6 +7389,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
struct trace_array *tr = filp->private_data;
ssize_t written = -ENODEV;
unsigned long ip;
+ size_t size;
+ char *buf;
if (tracing_disabled)
return -EINVAL;
@@ -7263,6 +7404,16 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (cnt > TRACE_MARKER_MAX_SIZE)
cnt = TRACE_MARKER_MAX_SIZE;
+ /* Must have preemption disabled while having access to the buffer */
+ guard(preempt_notrace)();
+
+ buf = trace_user_fault_read(trace_user_buffer, ubuf, cnt, &size);
+ if (!buf)
+ return -EFAULT;
+
+ if (cnt > size)
+ cnt = size;
+
/* The selftests expect this function to be the IP address */
ip = _THIS_IP_;
@@ -7270,32 +7421,28 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (tr == &global_trace) {
guard(rcu)();
list_for_each_entry_rcu(tr, &marker_copies, marker_list) {
- written = write_marker_to_buffer(tr, ubuf, cnt, ip);
+ written = write_marker_to_buffer(tr, buf, cnt, ip);
if (written < 0)
break;
}
} else {
- written = write_marker_to_buffer(tr, ubuf, cnt, ip);
+ written = write_marker_to_buffer(tr, buf, cnt, ip);
}
return written;
}
static ssize_t write_raw_marker_to_buffer(struct trace_array *tr,
- const char __user *ubuf, size_t cnt)
+ const char *buf, size_t cnt)
{
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct raw_data_entry *entry;
ssize_t written;
- int size;
- int len;
-
-#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
+ size_t size;
- size = sizeof(*entry) + cnt;
- if (cnt < FAULT_SIZE_ID)
- size += FAULT_SIZE_ID - cnt;
+ /* cnt includes both the entry->id and the data behind it. */
+ size = struct_size(entry, buf, cnt - sizeof(entry->id));
buffer = tr->array_buffer.buffer;
@@ -7309,14 +7456,11 @@ static ssize_t write_raw_marker_to_buffer(struct trace_array *tr,
return -EBADF;
entry = ring_buffer_event_data(event);
-
- len = copy_from_user_nofault(&entry->id, ubuf, cnt);
- if (len) {
- entry->id = -1;
- memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
- written = -EFAULT;
- } else
- written = cnt;
+ unsafe_memcpy(&entry->id, buf, cnt,
+ "id and content already reserved on ring buffer"
+ "'buf' includes the 'id' and the data."
+ "'entry' was allocated with cnt from 'id'.");
+ written = cnt;
__buffer_unlock_commit(buffer, event);
@@ -7329,8 +7473,8 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
{
struct trace_array *tr = filp->private_data;
ssize_t written = -ENODEV;
-
-#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
+ size_t size;
+ char *buf;
if (tracing_disabled)
return -EINVAL;
@@ -7342,21 +7486,53 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
if (cnt < sizeof(unsigned int))
return -EINVAL;
+ /* Must have preemption disabled while having access to the buffer */
+ guard(preempt_notrace)();
+
+ buf = trace_user_fault_read(trace_user_buffer, ubuf, cnt, &size);
+ if (!buf)
+ return -EFAULT;
+
+ /* raw write is all or nothing */
+ if (cnt > size)
+ return -EINVAL;
+
/* The global trace_marker_raw can go to multiple instances */
if (tr == &global_trace) {
guard(rcu)();
list_for_each_entry_rcu(tr, &marker_copies, marker_list) {
- written = write_raw_marker_to_buffer(tr, ubuf, cnt);
+ written = write_raw_marker_to_buffer(tr, buf, cnt);
if (written < 0)
break;
}
} else {
- written = write_raw_marker_to_buffer(tr, ubuf, cnt);
+ written = write_raw_marker_to_buffer(tr, buf, cnt);
}
return written;
}
+static int tracing_mark_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+
+ ret = trace_user_fault_buffer_enable();
+ if (ret < 0)
+ return ret;
+
+ stream_open(inode, filp);
+ ret = tracing_open_generic_tr(inode, filp);
+ if (ret < 0)
+ trace_user_fault_buffer_disable();
+ return ret;
+}
+
+static int tracing_mark_release(struct inode *inode, struct file *file)
+{
+ trace_user_fault_buffer_disable();
+ return tracing_release_generic_tr(inode, file);
+}
+
static int tracing_clock_show(struct seq_file *m, void *v)
{
struct trace_array *tr = m->private;
@@ -7764,13 +7940,13 @@ static const struct file_operations tracing_free_buffer_fops = {
static const struct file_operations tracing_mark_fops = {
.open = tracing_mark_open,
.write = tracing_mark_write,
- .release = tracing_release_generic_tr,
+ .release = tracing_mark_release,
};
static const struct file_operations tracing_mark_raw_fops = {
.open = tracing_mark_open,
.write = tracing_mark_raw_write,
- .release = tracing_release_generic_tr,
+ .release = tracing_mark_release,
};
static const struct file_operations trace_clock_fops = {
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 9f3e9537417d..e00da4182deb 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1629,11 +1629,10 @@ static void *s_start(struct seq_file *m, loff_t *pos)
loff_t l;
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ mutex_lock(&event_mutex);
if (!iter)
return NULL;
- mutex_lock(&event_mutex);
-
iter->type = SET_EVENT_FILE;
iter->file = list_entry(&tr->events, struct trace_event_file, list);
diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
index b36ade43d4b3..ad9d6347b5fa 100644
--- a/kernel/trace/trace_fprobe.c
+++ b/kernel/trace/trace_fprobe.c
@@ -522,13 +522,14 @@ static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip,
void *entry_data)
{
struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
+ unsigned int flags = trace_probe_load_flag(&tf->tp);
int ret = 0;
- if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
+ if (flags & TP_FLAG_TRACE)
fentry_trace_func(tf, entry_ip, fregs);
#ifdef CONFIG_PERF_EVENTS
- if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
+ if (flags & TP_FLAG_PROFILE)
ret = fentry_perf_func(tf, entry_ip, fregs);
#endif
return ret;
@@ -540,11 +541,12 @@ static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
void *entry_data)
{
struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
+ unsigned int flags = trace_probe_load_flag(&tf->tp);
- if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
+ if (flags & TP_FLAG_TRACE)
fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data);
#ifdef CONFIG_PERF_EVENTS
- if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
+ if (flags & TP_FLAG_PROFILE)
fexit_perf_func(tf, entry_ip, ret_ip, fregs, entry_data);
#endif
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 5496758b6c76..4c45c49b06c8 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -184,7 +184,7 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
unsigned long flags;
unsigned int trace_ctx;
u64 *calltime;
- int ret;
+ int ret = 0;
if (ftrace_graph_ignore_func(gops, trace))
return 0;
@@ -202,13 +202,11 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
return 0;
calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
- if (!calltime)
- return 0;
-
- *calltime = trace_clock_local();
-
- trace_ctx = tracing_gen_ctx_flags(flags);
- ret = __trace_graph_entry(tr, trace, trace_ctx);
+ if (calltime) {
+ *calltime = trace_clock_local();
+ trace_ctx = tracing_gen_ctx_flags(flags);
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
+ }
local_dec(&data->disabled);
return ret;
@@ -233,11 +231,10 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
rettime = trace_clock_local();
calltime = fgraph_retrieve_data(gops->idx, &size);
- if (!calltime)
- return;
-
- trace_ctx = tracing_gen_ctx_flags(flags);
- __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
+ if (calltime) {
+ trace_ctx = tracing_gen_ctx_flags(flags);
+ __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
+ }
local_dec(&data->disabled);
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index fa60362a3f31..ee8171b19bee 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1815,14 +1815,15 @@ static int kprobe_register(struct trace_event_call *event,
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
+ unsigned int flags = trace_probe_load_flag(&tk->tp);
int ret = 0;
raw_cpu_inc(*tk->nhit);
- if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
+ if (flags & TP_FLAG_TRACE)
kprobe_trace_func(tk, regs);
#ifdef CONFIG_PERF_EVENTS
- if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
+ if (flags & TP_FLAG_PROFILE)
ret = kprobe_perf_func(tk, regs);
#endif
return ret;
@@ -1834,6 +1835,7 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
struct kretprobe *rp = get_kretprobe(ri);
struct trace_kprobe *tk;
+ unsigned int flags;
/*
* There is a small chance that get_kretprobe(ri) returns NULL when
@@ -1846,10 +1848,11 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
tk = container_of(rp, struct trace_kprobe, rp);
raw_cpu_inc(*tk->nhit);
- if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
+ flags = trace_probe_load_flag(&tk->tp);
+ if (flags & TP_FLAG_TRACE)
kretprobe_trace_func(tk, ri, regs);
#ifdef CONFIG_PERF_EVENTS
- if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
+ if (flags & TP_FLAG_PROFILE)
kretprobe_perf_func(tk, ri, regs);
#endif
return 0; /* We don't tweak kernel, so just return 0 */
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 842383fbc03b..08b5bda24da2 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -271,16 +271,21 @@ struct event_file_link {
struct list_head list;
};
+static inline unsigned int trace_probe_load_flag(struct trace_probe *tp)
+{
+ return smp_load_acquire(&tp->event->flags);
+}
+
static inline bool trace_probe_test_flag(struct trace_probe *tp,
unsigned int flag)
{
- return !!(tp->event->flags & flag);
+ return !!(trace_probe_load_flag(tp) & flag);
}
static inline void trace_probe_set_flag(struct trace_probe *tp,
unsigned int flag)
{
- tp->event->flags |= flag;
+ smp_store_release(&tp->event->flags, tp->event->flags | flag);
}
static inline void trace_probe_clear_flag(struct trace_probe *tp,
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index bf1cb80742ae..e3f2e4f56faa 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -138,12 +138,10 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
return 0;
calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
- if (!calltime)
- return 0;
-
- *calltime = trace_clock_local();
-
- ret = __trace_graph_entry(tr, trace, trace_ctx);
+ if (calltime) {
+ *calltime = trace_clock_local();
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
+ }
local_dec(&data->disabled);
preempt_enable_notrace();
@@ -169,12 +167,10 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace,
rettime = trace_clock_local();
calltime = fgraph_retrieve_data(gops->idx, &size);
- if (!calltime)
- return;
+ if (calltime)
+ __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
- __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
local_dec(&data->disabled);
-
preempt_enable_notrace();
return;
}
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 8b0bcc0d8f41..430d09c49462 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1547,6 +1547,7 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
struct trace_uprobe *tu;
struct uprobe_dispatch_data udd;
struct uprobe_cpu_buffer *ucb = NULL;
+ unsigned int flags;
int ret = 0;
tu = container_of(con, struct trace_uprobe, consumer);
@@ -1561,11 +1562,12 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
if (WARN_ON_ONCE(!uprobe_cpu_buffer))
return 0;
- if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
+ flags = trace_probe_load_flag(&tu->tp);
+ if (flags & TP_FLAG_TRACE)
ret |= uprobe_trace_func(tu, regs, &ucb);
#ifdef CONFIG_PERF_EVENTS
- if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
+ if (flags & TP_FLAG_PROFILE)
ret |= uprobe_perf_func(tu, regs, &ucb);
#endif
uprobe_buffer_put(ucb);
@@ -1579,6 +1581,7 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
struct trace_uprobe *tu;
struct uprobe_dispatch_data udd;
struct uprobe_cpu_buffer *ucb = NULL;
+ unsigned int flags;
tu = container_of(con, struct trace_uprobe, consumer);
@@ -1590,11 +1593,12 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
if (WARN_ON_ONCE(!uprobe_cpu_buffer))
return 0;
- if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
+ flags = trace_probe_load_flag(&tu->tp);
+ if (flags & TP_FLAG_TRACE)
uretprobe_trace_func(tu, func, regs, &ucb);
#ifdef CONFIG_PERF_EVENTS
- if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
+ if (flags & TP_FLAG_PROFILE)
uretprobe_perf_func(tu, func, regs, &ucb);
#endif
uprobe_buffer_put(ucb);
diff --git a/lib/raid6/recov_rvv.c b/lib/raid6/recov_rvv.c
index 5d54c4b437df..5f779719c3d3 100644
--- a/lib/raid6/recov_rvv.c
+++ b/lib/raid6/recov_rvv.c
@@ -4,9 +4,7 @@
* Author: Chunyan Zhang <zhangchunyan@...as.ac.cn>
*/
-#include <asm/simd.h>
#include <asm/vector.h>
-#include <crypto/internal/simd.h>
#include <linux/raid/pq.h>
static int rvv_has_vector(void)
diff --git a/lib/raid6/rvv.c b/lib/raid6/rvv.c
index 7d82efa5b14f..b193ea176d5d 100644
--- a/lib/raid6/rvv.c
+++ b/lib/raid6/rvv.c
@@ -9,11 +9,8 @@
* Copyright 2002-2004 H. Peter Anvin
*/
-#include <asm/simd.h>
#include <asm/vector.h>
-#include <crypto/internal/simd.h>
#include <linux/raid/pq.h>
-#include <linux/types.h>
#include "rvv.h"
#define NSIZE (riscv_v_vsize / 32) /* NSIZE = vlenb */
diff --git a/lib/vdso/datastore.c b/lib/vdso/datastore.c
index 3693c6caf2c4..a565c30c71a0 100644
--- a/lib/vdso/datastore.c
+++ b/lib/vdso/datastore.c
@@ -11,14 +11,14 @@
/*
* The vDSO data page.
*/
-#ifdef CONFIG_HAVE_GENERIC_VDSO
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
static union {
struct vdso_time_data data;
u8 page[PAGE_SIZE];
} vdso_time_data_store __page_aligned_data;
struct vdso_time_data *vdso_k_time_data = &vdso_time_data_store.data;
static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE);
-#endif /* CONFIG_HAVE_GENERIC_VDSO */
+#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
#ifdef CONFIG_VDSO_GETRANDOM
static union {
@@ -46,7 +46,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
switch (vmf->pgoff) {
case VDSO_TIME_PAGE_OFFSET:
- if (!IS_ENABLED(CONFIG_HAVE_GENERIC_VDSO))
+ if (!IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY))
return VM_FAULT_SIGBUS;
pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
if (timens_page) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6cfe0b43ab8f..8f19d0f293e0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7203,6 +7203,8 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
psize);
}
spin_unlock(ptl);
+
+ cond_resched();
}
/*
* Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8dd7fbed5a94..46713b9ece06 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5024,6 +5024,19 @@ void mem_cgroup_sk_free(struct sock *sk)
css_put(&sk->sk_memcg->css);
}
+void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
+{
+ if (sk->sk_memcg == newsk->sk_memcg)
+ return;
+
+ mem_cgroup_sk_free(newsk);
+
+ if (sk->sk_memcg)
+ css_get(&sk->sk_memcg->css);
+
+ newsk->sk_memcg = sk->sk_memcg;
+}
+
/**
* mem_cgroup_charge_skmem - charge socket memory
* @memcg: memcg to charge
diff --git a/mm/slub.c b/mm/slub.c
index d257141896c9..264fc76455d7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -7731,10 +7731,7 @@ static int cmp_loc_by_count(const void *a, const void *b, const void *data)
struct location *loc1 = (struct location *)a;
struct location *loc2 = (struct location *)b;
- if (loc1->count > loc2->count)
- return -1;
- else
- return 1;
+ return cmp_int(loc2->count, loc1->count);
}
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
diff --git a/net/9p/trans_usbg.c b/net/9p/trans_usbg.c
index 6b694f117aef..468f7e8f0277 100644
--- a/net/9p/trans_usbg.c
+++ b/net/9p/trans_usbg.c
@@ -231,6 +231,8 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
struct f_usb9pfs *usb9pfs = ep->driver_data;
struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
struct p9_req_t *p9_rx_req;
+ unsigned int req_size = req->actual;
+ int status = REQ_STATUS_RCVD;
if (req->status) {
dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
@@ -242,11 +244,19 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
if (!p9_rx_req)
return;
- memcpy(p9_rx_req->rc.sdata, req->buf, req->actual);
+ if (req_size > p9_rx_req->rc.capacity) {
+ dev_err(&cdev->gadget->dev,
+ "%s received data size %u exceeds buffer capacity %zu\n",
+ ep->name, req_size, p9_rx_req->rc.capacity);
+ req_size = 0;
+ status = REQ_STATUS_ERROR;
+ }
+
+ memcpy(p9_rx_req->rc.sdata, req->buf, req_size);
- p9_rx_req->rc.size = req->actual;
+ p9_rx_req->rc.size = req_size;
- p9_client_cb(usb9pfs->client, p9_rx_req, REQ_STATUS_RCVD);
+ p9_client_cb(usb9pfs->client, p9_rx_req, status);
p9_req_put(usb9pfs->client, p9_rx_req);
complete(&usb9pfs->received);
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 7a7d49890858..eefdb6134ca5 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -1325,7 +1325,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
{
struct hci_cp_le_set_ext_adv_params cp;
struct hci_rp_le_set_ext_adv_params rp;
- bool connectable;
+ bool connectable, require_privacy;
u32 flags;
bdaddr_t random_addr;
u8 own_addr_type;
@@ -1363,10 +1363,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
return -EPERM;
/* Set require_privacy to true only when non-connectable
- * advertising is used. In that case it is fine to use a
- * non-resolvable private address.
+ * advertising is used and it is not periodic.
+ * In that case it is fine to use a non-resolvable private address.
*/
- err = hci_get_random_address(hdev, !connectable,
+ require_privacy = !connectable && !(adv && adv->periodic);
+
+ err = hci_get_random_address(hdev, require_privacy,
adv_use_rpa(hdev, flags), adv,
&own_addr_type, &random_addr);
if (err < 0)
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 5ce823ca3aaf..88602f19deca 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -111,6 +111,8 @@ static void iso_conn_free(struct kref *ref)
/* Ensure no more work items will run since hci_conn has been dropped */
disable_delayed_work_sync(&conn->timeout_work);
+ kfree_skb(conn->rx_skb);
+
kfree(conn);
}
@@ -750,6 +752,13 @@ static void iso_sock_kill(struct sock *sk)
BT_DBG("sk %p state %d", sk, sk->sk_state);
+ /* Sock is dead, so set conn->sk to NULL to avoid possible UAF */
+ if (iso_pi(sk)->conn) {
+ iso_conn_lock(iso_pi(sk)->conn);
+ iso_pi(sk)->conn->sk = NULL;
+ iso_conn_unlock(iso_pi(sk)->conn);
+ }
+
/* Kill poor orphan */
bt_sock_unlink(&iso_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
@@ -2407,7 +2416,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
skb->len);
conn->rx_len -= skb->len;
- return;
+ break;
case ISO_END:
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 225140fcb3d6..a3d16eece0d2 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -4542,13 +4542,11 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
return -ENOMEM;
#ifdef CONFIG_BT_FEATURE_DEBUG
- if (!hdev) {
- flags = bt_dbg_get() ? BIT(0) : 0;
+ flags = bt_dbg_get() ? BIT(0) : 0;
- memcpy(rp->features[idx].uuid, debug_uuid, 16);
- rp->features[idx].flags = cpu_to_le32(flags);
- idx++;
- }
+ memcpy(rp->features[idx].uuid, debug_uuid, 16);
+ rp->features[idx].flags = cpu_to_le32(flags);
+ idx++;
#endif
if (hdev && hci_dev_le_state_simultaneous(hdev)) {
diff --git a/net/core/dst.c b/net/core/dst.c
index e2de8b68c41d..e9d35f49c9e7 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -150,7 +150,7 @@ void dst_dev_put(struct dst_entry *dst)
dst->ops->ifdown(dst, dev);
WRITE_ONCE(dst->input, dst_discard);
WRITE_ONCE(dst->output, dst_discard_out);
- WRITE_ONCE(dst->dev, blackhole_netdev);
+ rcu_assign_pointer(dst->dev_rcu, blackhole_netdev);
netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
GFP_ATOMIC);
}
diff --git a/net/core/filter.c b/net/core/filter.c
index da391e2b0788..2d326d35c387 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -9284,13 +9284,17 @@ static bool sock_addr_is_valid_access(int off, int size,
return false;
info->reg_type = PTR_TO_SOCKET;
break;
- default:
- if (type == BPF_READ) {
- if (size != size_default)
- return false;
- } else {
+ case bpf_ctx_range(struct bpf_sock_addr, user_family):
+ case bpf_ctx_range(struct bpf_sock_addr, family):
+ case bpf_ctx_range(struct bpf_sock_addr, type):
+ case bpf_ctx_range(struct bpf_sock_addr, protocol):
+ if (type != BPF_READ)
return false;
- }
+ if (size != size_default)
+ return false;
+ break;
+ default:
+ return false;
}
return true;
diff --git a/net/core/sock.c b/net/core/sock.c
index 158bddd23134..e21348ead7e7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2584,7 +2584,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
}
EXPORT_SYMBOL_GPL(sk_clone_lock);
-static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
+static u32 sk_dst_gso_max_size(struct sock *sk, const struct net_device *dev)
{
bool is_ipv6 = false;
u32 max_size;
@@ -2594,8 +2594,8 @@ static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
!ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr));
#endif
/* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
- max_size = is_ipv6 ? READ_ONCE(dst_dev(dst)->gso_max_size) :
- READ_ONCE(dst_dev(dst)->gso_ipv4_max_size);
+ max_size = is_ipv6 ? READ_ONCE(dev->gso_max_size) :
+ READ_ONCE(dev->gso_ipv4_max_size);
if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk))
max_size = GSO_LEGACY_MAX_SIZE;
@@ -2604,9 +2604,12 @@ static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
+ const struct net_device *dev;
u32 max_segs = 1;
- sk->sk_route_caps = dst_dev(dst)->features;
+ rcu_read_lock();
+ dev = dst_dev_rcu(dst);
+ sk->sk_route_caps = dev->features;
if (sk_is_tcp(sk)) {
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2622,13 +2625,14 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
- sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst);
+ sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dev);
/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
- max_segs = max_t(u32, READ_ONCE(dst_dev(dst)->gso_max_segs), 1);
+ max_segs = max_t(u32, READ_ONCE(dev->gso_max_segs), 1);
}
}
sk->sk_gso_max_segs = max_segs;
sk_dst_set(sk, dst);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(sk_setup_caps);
diff --git a/net/ethtool/tsconfig.c b/net/ethtool/tsconfig.c
index 2be356bdfe87..169b413b31fc 100644
--- a/net/ethtool/tsconfig.c
+++ b/net/ethtool/tsconfig.c
@@ -423,13 +423,11 @@ static int ethnl_set_tsconfig(struct ethnl_req_info *req_base,
return ret;
}
- if (hwprov_mod || config_mod) {
- ret = tsconfig_send_reply(dev, info);
- if (ret && ret != -EOPNOTSUPP) {
- NL_SET_ERR_MSG(info->extack,
- "error while reading the new configuration set");
- return ret;
- }
+ ret = tsconfig_send_reply(dev, info);
+ if (ret && ret != -EOPNOTSUPP) {
+ NL_SET_ERR_MSG(info->extack,
+ "error while reading the new configuration set");
+ return ret;
}
/* tsconfig has no notification */
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index c48c572f024d..1be0d91620a3 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -318,17 +318,17 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
return true;
/* No rate limit on loopback */
- dev = dst_dev(dst);
+ rcu_read_lock();
+ dev = dst_dev_rcu(dst);
if (dev && (dev->flags & IFF_LOOPBACK))
goto out;
- rcu_read_lock();
peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr,
l3mdev_master_ifindex_rcu(dev));
rc = inet_peer_xrlim_allow(peer,
READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));
- rcu_read_unlock();
out:
+ rcu_read_unlock();
if (!rc)
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
else
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b2584cce90ae..f7012479713b 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -476,14 +476,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
/* Process an incoming IP datagram fragment. */
int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
{
- struct net_device *dev = skb->dev ? : skb_dst_dev(skb);
- int vif = l3mdev_master_ifindex_rcu(dev);
+ struct net_device *dev;
struct ipq *qp;
+ int vif;
__IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
/* Lookup (or create) queue header */
rcu_read_lock();
+ dev = skb->dev ? : skb_dst_dev_rcu(skb);
+ vif = l3mdev_master_ifindex_rcu(dev);
qp = ip_find(net, ip_hdr(skb), user, vif);
if (qp) {
int ret, refs = 0;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index e86a8a862c41..8c568fbddb5f 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1904,7 +1904,7 @@ static int ipmr_prepare_xmit(struct net *net, struct mr_table *mrt,
return -1;
}
- encap += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+ encap += LL_RESERVED_SPACE(dst_dev_rcu(&rt->dst)) + rt->dst.header_len;
if (skb_cow(skb, encap)) {
ip_rt_put(rt);
@@ -1957,7 +1957,7 @@ static void ipmr_queue_fwd_xmit(struct net *net, struct mr_table *mrt,
* result in receiving multiple packets.
*/
NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
- net, NULL, skb, skb->dev, rt->dst.dev,
+ net, NULL, skb, skb->dev, dst_dev_rcu(&rt->dst),
ipmr_forward_finish);
return;
@@ -2301,7 +2301,7 @@ int ip_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
guard(rcu)();
- dev = rt->dst.dev;
+ dev = dst_dev_rcu(&rt->dst);
if (IPCB(skb)->flags & IPSKB_FORWARDED)
goto mc_output;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 031df4c19fcc..d2c3480df8f7 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -77,6 +77,7 @@ static inline struct hlist_head *ping_hashslot(struct ping_table *table,
int ping_get_port(struct sock *sk, unsigned short ident)
{
+ struct net *net = sock_net(sk);
struct inet_sock *isk, *isk2;
struct hlist_head *hlist;
struct sock *sk2 = NULL;
@@ -90,9 +91,10 @@ int ping_get_port(struct sock *sk, unsigned short ident)
for (i = 0; i < (1L << 16); i++, result++) {
if (!result)
result++; /* avoid zero */
- hlist = ping_hashslot(&ping_table, sock_net(sk),
- result);
+ hlist = ping_hashslot(&ping_table, net, result);
sk_for_each(sk2, hlist) {
+ if (!net_eq(sock_net(sk2), net))
+ continue;
isk2 = inet_sk(sk2);
if (isk2->inet_num == result)
@@ -108,8 +110,10 @@ int ping_get_port(struct sock *sk, unsigned short ident)
if (i >= (1L << 16))
goto fail;
} else {
- hlist = ping_hashslot(&ping_table, sock_net(sk), ident);
+ hlist = ping_hashslot(&ping_table, net, ident);
sk_for_each(sk2, hlist) {
+ if (!net_eq(sock_net(sk2), net))
+ continue;
isk2 = inet_sk(sk2);
/* BUG? Why is this reuse and not reuseaddr? ping.c
@@ -129,7 +133,7 @@ int ping_get_port(struct sock *sk, unsigned short ident)
pr_debug("was not hashed\n");
sk_add_node_rcu(sk, hlist);
sock_set_flag(sk, SOCK_RCU_FREE);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ sock_prot_inuse_add(net, sk->sk_prot, 1);
}
spin_unlock(&ping_table.lock);
return 0;
@@ -188,6 +192,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
}
sk_for_each_rcu(sk, hslot) {
+ if (!net_eq(sock_net(sk), net))
+ continue;
isk = inet_sk(sk);
pr_debug("iterate\n");
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index baa43e5966b1..5582ccd673ee 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -413,11 +413,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
const void *daddr)
{
const struct rtable *rt = container_of(dst, struct rtable, dst);
- struct net_device *dev = dst_dev(dst);
+ struct net_device *dev;
struct neighbour *n;
rcu_read_lock();
-
+ dev = dst_dev_rcu(dst);
if (likely(rt->rt_gw_family == AF_INET)) {
n = ip_neigh_gw4(dev, rt->rt_gw4);
} else if (rt->rt_gw_family == AF_INET6) {
@@ -1026,7 +1026,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
return;
rcu_read_lock();
- net = dev_net_rcu(dst_dev(dst));
+ net = dst_dev_net_rcu(dst);
if (mtu < net->ipv4.ip_rt_min_pmtu) {
lock = true;
mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
@@ -1326,7 +1326,7 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
struct net *net;
rcu_read_lock();
- net = dev_net_rcu(dst_dev(dst));
+ net = dst_dev_net_rcu(dst);
advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
net->ipv4.ip_rt_min_advmss);
rcu_read_unlock();
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ad76556800f2..89040007c7b7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3099,8 +3099,8 @@ bool tcp_check_oom(const struct sock *sk, int shift)
void __tcp_close(struct sock *sk, long timeout)
{
+ bool data_was_unread = false;
struct sk_buff *skb;
- int data_was_unread = 0;
int state;
WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
@@ -3119,11 +3119,12 @@ void __tcp_close(struct sock *sk, long timeout)
* reader process may not have drained the data yet!
*/
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
+ u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
- len--;
- data_was_unread += len;
+ end_seq--;
+ if (after(end_seq, tcp_sk(sk)->copied_seq))
+ data_was_unread = true;
__kfree_skb(skb);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 71b76e98371a..64f93668a845 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4890,12 +4890,23 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
/* Check if this incoming skb can be added to socket receive queues
* while satisfying sk->sk_rcvbuf limit.
+ *
+ * In theory we should use skb->truesize, but this can cause problems
+ * when applications use too small SO_RCVBUF values.
+ * When LRO / hw gro is used, the socket might have a high tp->scaling_ratio,
+ * allowing RWIN to be close to available space.
+ * Whenever the receive queue gets full, we can receive a small packet
+ * filling RWIN, but with a high skb->truesize, because most NIC use 4K page
+ * plus sk_buff metadata even when receiving less than 1500 bytes of payload.
+ *
+ * Note that we use skb->len to decide to accept or drop this packet,
+ * but sk->sk_rmem_alloc is the sum of all skb->truesize.
*/
static bool tcp_can_ingest(const struct sock *sk, const struct sk_buff *skb)
{
- unsigned int new_mem = atomic_read(&sk->sk_rmem_alloc) + skb->truesize;
+ unsigned int rmem = atomic_read(&sk->sk_rmem_alloc);
- return new_mem <= sk->sk_rcvbuf;
+ return rmem + skb->len <= sk->sk_rcvbuf;
}
static int tcp_try_rmem_schedule(struct sock *sk, const struct sk_buff *skb,
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 03c068ea27b6..10e86f1008e9 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -170,7 +170,7 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
struct net *net;
spin_lock_bh(&tcp_metrics_lock);
- net = dev_net_rcu(dst_dev(dst));
+ net = dst_dev_net_rcu(dst);
/* While waiting for the spin-lock the cache might have been populated
* with this entry and so we have to check again.
@@ -273,7 +273,7 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
return NULL;
}
- net = dev_net_rcu(dst_dev(dst));
+ net = dst_dev_net_rcu(dst);
hash ^= net_hash_mix(net);
hash = hash_32(hash, tcp_metrics_hash_log);
@@ -318,7 +318,7 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
else
return NULL;
- net = dev_net_rcu(dst_dev(dst));
+ net = dst_dev_net_rcu(dst);
hash ^= net_hash_mix(net);
hash = hash_32(hash, tcp_metrics_hash_log);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index f8a8e46286b8..52599584422b 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -104,7 +104,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
rcu_read_lock();
rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
if (rt) {
- dev = dst_dev(&rt->dst);
+ dev = dst_dev_rcu(&rt->dst);
netdev_hold(dev, &dev_tracker, GFP_ATOMIC);
ip6_rt_put(rt);
} else if (ishost) {
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 44550957fd4e..56c974cf75d1 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -209,7 +209,8 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
* this lookup should be more aggressive (not longer than timeout).
*/
dst = ip6_route_output(net, sk, fl6);
- dev = dst_dev(dst);
+ rcu_read_lock();
+ dev = dst_dev_rcu(dst);
if (dst->error) {
IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_OUTNOROUTES);
@@ -224,14 +225,12 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
if (rt->rt6i_dst.plen < 128)
tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
- rcu_read_lock();
peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr);
res = inet_peer_xrlim_allow(peer, tmo);
- rcu_read_unlock();
}
+ rcu_read_unlock();
if (!res)
- __ICMP6_INC_STATS(net, ip6_dst_idev(dst),
- ICMP6_MIB_RATELIMITHOST);
+ __ICMP6_INC_STATS(net, NULL, ICMP6_MIB_RATELIMITHOST);
else
icmp_global_consume(net);
dst_release(dst);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 1e1410237b6e..9d64c13bab5e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -60,7 +60,7 @@
static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct net_device *dev = dst_dev(dst);
+ struct net_device *dev = dst_dev_rcu(dst);
struct inet6_dev *idev = ip6_dst_idev(dst);
unsigned int hh_len = LL_RESERVED_SPACE(dev);
const struct in6_addr *daddr, *nexthop;
@@ -70,15 +70,12 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
/* Be paranoid, rather than too clever. */
if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) {
- /* Make sure idev stays alive */
- rcu_read_lock();
+ /* idev stays alive because we hold rcu_read_lock(). */
skb = skb_expand_head(skb, hh_len);
if (!skb) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
- rcu_read_unlock();
return -ENOMEM;
}
- rcu_read_unlock();
}
hdr = ipv6_hdr(skb);
@@ -123,7 +120,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
- rcu_read_lock();
nexthop = rt6_nexthop(dst_rt6_info(dst), daddr);
neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
@@ -131,7 +127,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
if (IS_ERR(neigh)) {
- rcu_read_unlock();
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES);
kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
return -EINVAL;
@@ -139,7 +134,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
}
sock_confirm_neigh(skb, neigh);
ret = neigh_output(neigh, skb, false);
- rcu_read_unlock();
return ret;
}
@@ -233,22 +227,29 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct net_device *dev = dst_dev(dst), *indev = skb->dev;
- struct inet6_dev *idev = ip6_dst_idev(dst);
+ struct net_device *dev, *indev = skb->dev;
+ struct inet6_dev *idev;
+ int ret;
skb->protocol = htons(ETH_P_IPV6);
+ rcu_read_lock();
+ dev = dst_dev_rcu(dst);
+ idev = ip6_dst_idev(dst);
skb->dev = dev;
if (unlikely(!idev || READ_ONCE(idev->cnf.disable_ipv6))) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ rcu_read_unlock();
kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
return 0;
}
- return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, indev, dev,
- ip6_finish_output,
- !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+ ret = NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+ net, sk, skb, indev, dev,
+ ip6_finish_output,
+ !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL(ip6_output);
@@ -268,35 +269,36 @@ bool ip6_autoflowlabel(struct net *net, const struct sock *sk)
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
__u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority)
{
- struct net *net = sock_net(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *first_hop = &fl6->daddr;
struct dst_entry *dst = skb_dst(skb);
- struct net_device *dev = dst_dev(dst);
struct inet6_dev *idev = ip6_dst_idev(dst);
struct hop_jumbo_hdr *hop_jumbo;
int hoplen = sizeof(*hop_jumbo);
+ struct net *net = sock_net(sk);
unsigned int head_room;
+ struct net_device *dev;
struct ipv6hdr *hdr;
u8 proto = fl6->flowi6_proto;
int seg_len = skb->len;
- int hlimit = -1;
+ int ret, hlimit = -1;
u32 mtu;
+ rcu_read_lock();
+
+ dev = dst_dev_rcu(dst);
head_room = sizeof(struct ipv6hdr) + hoplen + LL_RESERVED_SPACE(dev);
if (opt)
head_room += opt->opt_nflen + opt->opt_flen;
if (unlikely(head_room > skb_headroom(skb))) {
- /* Make sure idev stays alive */
- rcu_read_lock();
+ /* idev stays alive while we hold rcu_read_lock(). */
skb = skb_expand_head(skb, head_room);
if (!skb) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
- rcu_read_unlock();
- return -ENOBUFS;
+ ret = -ENOBUFS;
+ goto unlock;
}
- rcu_read_unlock();
}
if (opt) {
@@ -358,17 +360,21 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
* skb to its handler for processing
*/
skb = l3mdev_ip6_out((struct sock *)sk, skb);
- if (unlikely(!skb))
- return 0;
+ if (unlikely(!skb)) {
+ ret = 0;
+ goto unlock;
+ }
/* hooks should never assume socket lock is held.
* we promote our socket to non const
*/
- return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
- net, (struct sock *)sk, skb, NULL, dev,
- dst_output);
+ ret = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+ net, (struct sock *)sk, skb, NULL, dev,
+ dst_output);
+ goto unlock;
}
+ ret = -EMSGSIZE;
skb->dev = dev;
/* ipv6_local_error() does not require socket lock,
* we promote our socket to non const
@@ -377,7 +383,9 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
IP6_INC_STATS(net, idev, IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
- return -EMSGSIZE;
+unlock:
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL(ip6_xmit);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 36ca27496b3c..016b572e7d6f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -169,6 +169,29 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
return iv > 0 ? iv : 1;
}
+static struct net_device *ip6_mc_find_dev(struct net *net,
+ const struct in6_addr *group,
+ int ifindex)
+{
+ struct net_device *dev = NULL;
+ struct rt6_info *rt;
+
+ if (ifindex == 0) {
+ rcu_read_lock();
+ rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
+ if (rt) {
+ dev = dst_dev_rcu(&rt->dst);
+ dev_hold(dev);
+ ip6_rt_put(rt);
+ }
+ rcu_read_unlock();
+ } else {
+ dev = dev_get_by_index(net, ifindex);
+ }
+
+ return dev;
+}
+
/*
* socket join on multicast group
*/
@@ -191,28 +214,13 @@ static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
}
mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
-
if (!mc_lst)
return -ENOMEM;
mc_lst->next = NULL;
mc_lst->addr = *addr;
- if (ifindex == 0) {
- struct rt6_info *rt;
-
- rcu_read_lock();
- rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
- if (rt) {
- dev = dst_dev(&rt->dst);
- dev_hold(dev);
- ip6_rt_put(rt);
- }
- rcu_read_unlock();
- } else {
- dev = dev_get_by_index(net, ifindex);
- }
-
+ dev = ip6_mc_find_dev(net, addr, ifindex);
if (!dev) {
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return -ENODEV;
@@ -302,27 +310,14 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
}
EXPORT_SYMBOL(ipv6_sock_mc_drop);
-static struct inet6_dev *ip6_mc_find_dev(struct net *net,
- const struct in6_addr *group,
- int ifindex)
+static struct inet6_dev *ip6_mc_find_idev(struct net *net,
+ const struct in6_addr *group,
+ int ifindex)
{
- struct net_device *dev = NULL;
+ struct net_device *dev;
struct inet6_dev *idev;
- if (ifindex == 0) {
- struct rt6_info *rt;
-
- rcu_read_lock();
- rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
- if (rt) {
- dev = dst_dev(&rt->dst);
- dev_hold(dev);
- ip6_rt_put(rt);
- }
- rcu_read_unlock();
- } else {
- dev = dev_get_by_index(net, ifindex);
- }
+ dev = ip6_mc_find_dev(net, group, ifindex);
if (!dev)
return NULL;
@@ -374,7 +369,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
if (!ipv6_addr_is_multicast(group))
return -EINVAL;
- idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface);
+ idev = ip6_mc_find_idev(net, group, pgsr->gsr_interface);
if (!idev)
return -ENODEV;
@@ -509,7 +504,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
gsf->gf_fmode != MCAST_EXCLUDE)
return -EINVAL;
- idev = ip6_mc_find_dev(net, group, gsf->gf_interface);
+ idev = ip6_mc_find_idev(net, group, gsf->gf_interface);
if (!idev)
return -ENODEV;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 7d5abb3158ec..d6bb1e2f6192 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -505,7 +505,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
ip6_nd_hdr(skb, saddr, daddr, READ_ONCE(inet6_sk(sk)->hop_limit), skb->len);
- dev = dst_dev(dst);
+ dev = dst_dev_rcu(dst);
idev = __in6_dev_get(dev);
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index d21fe27fe21e..1c9b283a4132 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -104,18 +104,20 @@ EXPORT_SYMBOL(ip6_find_1stfragopt);
int ip6_dst_hoplimit(struct dst_entry *dst)
{
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+
+ rcu_read_lock();
if (hoplimit == 0) {
- struct net_device *dev = dst_dev(dst);
+ struct net_device *dev = dst_dev_rcu(dst);
struct inet6_dev *idev;
- rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev)
hoplimit = READ_ONCE(idev->cnf.hop_limit);
else
hoplimit = READ_ONCE(dev_net(dev)->ipv6.devconf_all->hop_limit);
- rcu_read_unlock();
}
+ rcu_read_unlock();
+
return hoplimit;
}
EXPORT_SYMBOL(ip6_dst_hoplimit);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 752327b10dde..eb268b070025 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -85,7 +85,6 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
SNMP_MIB_ITEM("Ip6InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
SNMP_MIB_ITEM("Ip6InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_ITEM("Ip6OutTransmits", IPSTATS_MIB_OUTPKTS),
- SNMP_MIB_SENTINEL
};
static const struct snmp_mib snmp6_icmp6_list[] = {
@@ -95,8 +94,8 @@ static const struct snmp_mib snmp6_icmp6_list[] = {
SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS),
SNMP_MIB_ITEM("Icmp6InCsumErrors", ICMP6_MIB_CSUMERRORS),
+/* ICMP6_MIB_RATELIMITHOST needs to be last, see snmp6_dev_seq_show(). */
SNMP_MIB_ITEM("Icmp6OutRateLimitHost", ICMP6_MIB_RATELIMITHOST),
- SNMP_MIB_SENTINEL
};
/* RFC 4293 v6 ICMPMsgStatsTable; named items for RFC 2466 compatibility */
@@ -129,7 +128,6 @@ static const struct snmp_mib snmp6_udp6_list[] = {
SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS),
SNMP_MIB_ITEM("Udp6IgnoredMulti", UDP_MIB_IGNOREDMULTI),
SNMP_MIB_ITEM("Udp6MemErrors", UDP_MIB_MEMERRORS),
- SNMP_MIB_SENTINEL
};
static const struct snmp_mib snmp6_udplite6_list[] = {
@@ -141,7 +139,6 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_ITEM("UdpLite6InCsumErrors", UDP_MIB_CSUMERRORS),
SNMP_MIB_ITEM("UdpLite6MemErrors", UDP_MIB_MEMERRORS),
- SNMP_MIB_SENTINEL
};
static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
@@ -182,35 +179,37 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
*/
static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
atomic_long_t *smib,
- const struct snmp_mib *itemlist)
+ const struct snmp_mib *itemlist,
+ int cnt)
{
unsigned long buff[SNMP_MIB_MAX];
int i;
if (pcpumib) {
- memset(buff, 0, sizeof(unsigned long) * SNMP_MIB_MAX);
+ memset(buff, 0, sizeof(unsigned long) * cnt);
- snmp_get_cpu_field_batch(buff, itemlist, pcpumib);
- for (i = 0; itemlist[i].name; i++)
+ snmp_get_cpu_field_batch_cnt(buff, itemlist, cnt, pcpumib);
+ for (i = 0; i < cnt; i++)
seq_printf(seq, "%-32s\t%lu\n",
itemlist[i].name, buff[i]);
} else {
- for (i = 0; itemlist[i].name; i++)
+ for (i = 0; i < cnt; i++)
seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
atomic_long_read(smib + itemlist[i].entry));
}
}
static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
- const struct snmp_mib *itemlist, size_t syncpoff)
+ const struct snmp_mib *itemlist,
+ int cnt, size_t syncpoff)
{
u64 buff64[SNMP_MIB_MAX];
int i;
- memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
+ memset(buff64, 0, sizeof(u64) * cnt);
- snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
- for (i = 0; itemlist[i].name; i++)
+ snmp_get_cpu_field64_batch_cnt(buff64, itemlist, cnt, mib, syncpoff);
+ for (i = 0; i < cnt; i++)
seq_printf(seq, "%-32s\t%llu\n", itemlist[i].name, buff64[i]);
}
@@ -219,14 +218,19 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
struct net *net = (struct net *)seq->private;
snmp6_seq_show_item64(seq, net->mib.ipv6_statistics,
- snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
+ snmp6_ipstats_list,
+ ARRAY_SIZE(snmp6_ipstats_list),
+ offsetof(struct ipstats_mib, syncp));
snmp6_seq_show_item(seq, net->mib.icmpv6_statistics,
- NULL, snmp6_icmp6_list);
+ NULL, snmp6_icmp6_list,
+ ARRAY_SIZE(snmp6_icmp6_list));
snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
snmp6_seq_show_item(seq, net->mib.udp_stats_in6,
- NULL, snmp6_udp6_list);
+ NULL, snmp6_udp6_list,
+ ARRAY_SIZE(snmp6_udp6_list));
snmp6_seq_show_item(seq, net->mib.udplite_stats_in6,
- NULL, snmp6_udplite6_list);
+ NULL, snmp6_udplite6_list,
+ ARRAY_SIZE(snmp6_udplite6_list));
return 0;
}
@@ -236,9 +240,14 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
snmp6_seq_show_item64(seq, idev->stats.ipv6,
- snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
+ snmp6_ipstats_list,
+ ARRAY_SIZE(snmp6_ipstats_list),
+ offsetof(struct ipstats_mib, syncp));
+
+ /* Per idev icmp stats do not have ICMP6_MIB_RATELIMITHOST */
snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
- snmp6_icmp6_list);
+ snmp6_icmp6_list, ARRAY_SIZE(snmp6_icmp6_list) - 1);
+
snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs);
return 0;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3299cfa12e21..3371f16b7a3e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2943,7 +2943,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
if (res.f6i->nh) {
struct fib6_nh_match_arg arg = {
- .dev = dst_dev(dst),
+ .dev = dst_dev_rcu(dst),
.gw = &rt6->rt6i_gateway,
};
@@ -3238,7 +3238,6 @@ EXPORT_SYMBOL_GPL(ip6_sk_redirect);
static unsigned int ip6_default_advmss(const struct dst_entry *dst)
{
- struct net_device *dev = dst_dev(dst);
unsigned int mtu = dst_mtu(dst);
struct net *net;
@@ -3246,7 +3245,7 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
rcu_read_lock();
- net = dev_net_rcu(dev);
+ net = dst_dev_net_rcu(dst);
if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
@@ -4301,7 +4300,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
if (res.f6i->nh) {
struct fib6_nh_match_arg arg = {
- .dev = dst_dev(dst),
+ .dev = dst_dev_rcu(dst),
.gw = &rt->rt6i_gateway,
};
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 2ed07fa121ab..7609c7c31df7 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3001,6 +3001,9 @@ static int ieee80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *req)
{
struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
+ struct ieee80211_channel *chan;
+ int radio_idx;
sdata = IEEE80211_WDEV_TO_SUB_IF(req->wdev);
@@ -3028,10 +3031,20 @@ static int ieee80211_scan(struct wiphy *wiphy,
* the frames sent while scanning on other channel will be
* lost)
*/
- if (ieee80211_num_beaconing_links(sdata) &&
- (!(wiphy->features & NL80211_FEATURE_AP_SCAN) ||
- !(req->flags & NL80211_SCAN_FLAG_AP)))
- return -EOPNOTSUPP;
+ for_each_link_data(sdata, link) {
+ /* if the link is not beaconing, ignore it */
+ if (!sdata_dereference(link->u.ap.beacon, sdata))
+ continue;
+
+ chan = link->conf->chanreq.oper.chan;
+ radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
+
+ if (ieee80211_is_radio_idx_in_scan_req(wiphy, req,
+ radio_idx) &&
+ (!(wiphy->features & NL80211_FEATURE_AP_SCAN) ||
+ !(req->flags & NL80211_SCAN_FLAG_AP)))
+ return -EOPNOTSUPP;
+ }
break;
case NL80211_IFTYPE_NAN:
default:
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 3ae6104e5cb2..78f862f79aa8 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1164,9 +1164,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (WARN_ON(!ieee80211_hw_check(hw, MFP_CAPABLE)))
return -EINVAL;
- if (WARN_ON(!ieee80211_hw_check(hw, CONNECTION_MONITOR)))
- return -EINVAL;
-
if (WARN_ON(ieee80211_hw_check(hw, NEED_DTIM_BEFORE_ASSOC)))
return -EINVAL;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 4d4ff4d4917a..59baca24aa6b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -5230,12 +5230,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
}
rx.sdata = prev_sta->sdata;
+ if (!status->link_valid && prev_sta->sta.mlo) {
+ struct link_sta_info *link_sta;
+
+ link_sta = link_sta_info_get_bss(rx.sdata,
+ hdr->addr2);
+ if (!link_sta)
+ continue;
+
+ link_id = link_sta->link_id;
+ }
+
if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
goto out;
- if (!status->link_valid && prev_sta->sta.mlo)
- continue;
-
ieee80211_prepare_and_rx_handle(&rx, skb, false);
prev_sta = sta;
@@ -5243,10 +5251,18 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (prev_sta) {
rx.sdata = prev_sta->sdata;
- if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
- goto out;
+ if (!status->link_valid && prev_sta->sta.mlo) {
+ struct link_sta_info *link_sta;
- if (!status->link_valid && prev_sta->sta.mlo)
+ link_sta = link_sta_info_get_bss(rx.sdata,
+ hdr->addr2);
+ if (!link_sta)
+ goto out;
+
+ link_id = link_sta->link_id;
+ }
+
+ if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
goto out;
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 8c550aab9bdc..ebcec5241a94 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -3206,16 +3206,20 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
struct link_sta_info *link_sta;
ether_addr_copy(sinfo->mld_addr, sta->addr);
+
+ /* assign valid links first for iteration */
+ sinfo->valid_links = sta->sta.valid_links;
+
for_each_valid_link(sinfo, link_id) {
link_sta = wiphy_dereference(sta->local->hw.wiphy,
sta->link[link_id]);
link = wiphy_dereference(sdata->local->hw.wiphy,
sdata->link[link_id]);
- if (!link_sta || !sinfo->links[link_id] || !link)
+ if (!link_sta || !sinfo->links[link_id] || !link) {
+ sinfo->valid_links &= ~BIT(link_id);
continue;
-
- sinfo->valid_links = sta->sta.valid_links;
+ }
sta_set_link_sinfo(sta, sinfo->links[link_id],
link, tidstats);
}
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index fed40dae5583..e8ffa62ec183 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -501,10 +501,15 @@ void mptcp_active_enable(struct sock *sk)
struct mptcp_pernet *pernet = mptcp_get_pernet(sock_net(sk));
if (atomic_read(&pernet->active_disable_times)) {
- struct dst_entry *dst = sk_dst_get(sk);
+ struct net_device *dev;
+ struct dst_entry *dst;
- if (dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ if (dev && (dev->flags & IFF_LOOPBACK))
atomic_set(&pernet->active_disable_times, 0);
+ rcu_read_unlock();
}
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index f31a3a79531a..e8325890a322 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1721,19 +1721,14 @@ static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
/* only the additional subflows created by kworkers have to be modified */
if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
cgroup_id(sock_cgroup_ptr(child_skcd))) {
-#ifdef CONFIG_MEMCG
- struct mem_cgroup *memcg = parent->sk_memcg;
-
- mem_cgroup_sk_free(child);
- if (memcg && css_tryget(&memcg->css))
- child->sk_memcg = memcg;
-#endif /* CONFIG_MEMCG */
-
cgroup_sk_free(child_skcd);
*child_skcd = *parent_skcd;
cgroup_sk_clone(child_skcd);
}
#endif /* CONFIG_SOCK_CGROUP_DATA */
+
+ if (mem_cgroup_sockets_enabled)
+ mem_cgroup_sk_inherit(parent, child);
}
static void mptcp_subflow_ops_override(struct sock *ssk)
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 5251524b96af..5e4453e9ef8e 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -63,7 +63,7 @@ struct hbucket {
: jhash_size((htable_bits) - HTABLE_REGION_BITS))
#define ahash_sizeof_regions(htable_bits) \
(ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
-#define ahash_region(n, htable_bits) \
+#define ahash_region(n) \
((n) / jhash_size(HTABLE_REGION_BITS))
#define ahash_bucket_start(h, htable_bits) \
((htable_bits) < HTABLE_REGION_BITS ? 0 \
@@ -702,7 +702,7 @@ mtype_resize(struct ip_set *set, bool retried)
#endif
key = HKEY(data, h->initval, htable_bits);
m = __ipset_dereference(hbucket(t, key));
- nr = ahash_region(key, htable_bits);
+ nr = ahash_region(key);
if (!m) {
m = kzalloc(sizeof(*m) +
AHASH_INIT_SIZE * dsize,
@@ -852,7 +852,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
- r = ahash_region(key, t->htable_bits);
+ r = ahash_region(key);
atomic_inc(&t->uref);
elements = t->hregion[r].elements;
maxelem = t->maxelem;
@@ -1050,7 +1050,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
- r = ahash_region(key, t->htable_bits);
+ r = ahash_region(key);
atomic_inc(&t->uref);
rcu_read_unlock_bh();
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 965f3c8e5089..37ebb0cb62b8 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -885,7 +885,7 @@ static void ip_vs_conn_expire(struct timer_list *t)
* conntrack cleanup for the net.
*/
smp_rmb();
- if (ipvs->enable)
+ if (READ_ONCE(ipvs->enable))
ip_vs_conn_drop_conntrack(cp);
}
@@ -1439,7 +1439,7 @@ void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs)
cond_resched_rcu();
/* netns clean up started, abort delayed work */
- if (!ipvs->enable)
+ if (!READ_ONCE(ipvs->enable))
break;
}
rcu_read_unlock();
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index c7a8a08b7308..5ea7ab8bf4dc 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1353,9 +1353,6 @@ ip_vs_out_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *stat
if (unlikely(!skb_dst(skb)))
return NF_ACCEPT;
- if (!ipvs->enable)
- return NF_ACCEPT;
-
ip_vs_fill_iph_skb(af, skb, false, &iph);
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
@@ -1940,7 +1937,7 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
return NF_ACCEPT;
}
/* ipvs enabled in this netns ? */
- if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
+ if (unlikely(sysctl_backup_only(ipvs)))
return NF_ACCEPT;
ip_vs_fill_iph_skb(af, skb, false, &iph);
@@ -2108,7 +2105,7 @@ ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
int r;
/* ipvs enabled in this netns ? */
- if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
+ if (unlikely(sysctl_backup_only(ipvs)))
return NF_ACCEPT;
if (state->pf == NFPROTO_IPV4) {
@@ -2295,7 +2292,7 @@ static int __net_init __ip_vs_init(struct net *net)
return -ENOMEM;
/* Hold the beast until a service is registered */
- ipvs->enable = 0;
+ WRITE_ONCE(ipvs->enable, 0);
ipvs->net = net;
/* Counters used for creating unique names */
ipvs->gen = atomic_read(&ipvs_netns_cnt);
@@ -2367,7 +2364,7 @@ static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list)
ipvs = net_ipvs(net);
ip_vs_unregister_hooks(ipvs, AF_INET);
ip_vs_unregister_hooks(ipvs, AF_INET6);
- ipvs->enable = 0; /* Disable packet reception */
+ WRITE_ONCE(ipvs->enable, 0); /* Disable packet reception */
smp_wmb();
ip_vs_sync_net_cleanup(ipvs);
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 6a6fc4478533..4c8fa22be88a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -256,7 +256,7 @@ static void est_reload_work_handler(struct work_struct *work)
struct ip_vs_est_kt_data *kd = ipvs->est_kt_arr[id];
/* netns clean up started, abort delayed work */
- if (!ipvs->enable)
+ if (!READ_ONCE(ipvs->enable))
goto unlock;
if (!kd)
continue;
@@ -1483,9 +1483,9 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
*svc_p = svc;
- if (!ipvs->enable) {
+ if (!READ_ONCE(ipvs->enable)) {
/* Now there is a service - full throttle */
- ipvs->enable = 1;
+ WRITE_ONCE(ipvs->enable, 1);
/* Start estimation for first time */
ip_vs_est_reload_start(ipvs);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 15049b826732..93a925f1ed9b 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -231,7 +231,7 @@ static int ip_vs_estimation_kthread(void *data)
void ip_vs_est_reload_start(struct netns_ipvs *ipvs)
{
/* Ignore reloads before first service is added */
- if (!ipvs->enable)
+ if (!READ_ONCE(ipvs->enable))
return;
ip_vs_est_stopped_recalc(ipvs);
/* Bump the kthread configuration genid */
@@ -306,7 +306,7 @@ static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs)
int i;
if ((unsigned long)ipvs->est_kt_count >= ipvs->est_max_threads &&
- ipvs->enable && ipvs->est_max_threads)
+ READ_ONCE(ipvs->enable) && ipvs->est_max_threads)
return -EINVAL;
mutex_lock(&ipvs->est_mutex);
@@ -343,7 +343,7 @@ static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs)
}
/* Start kthread tasks only when services are present */
- if (ipvs->enable && !ip_vs_est_stopped(ipvs)) {
+ if (READ_ONCE(ipvs->enable) && !ip_vs_est_stopped(ipvs)) {
ret = ip_vs_est_kthread_start(ipvs, kd);
if (ret < 0)
goto out;
@@ -486,7 +486,7 @@ int ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
struct ip_vs_estimator *est = &stats->est;
int ret;
- if (!ipvs->est_max_threads && ipvs->enable)
+ if (!ipvs->est_max_threads && READ_ONCE(ipvs->enable))
ipvs->est_max_threads = ip_vs_est_max_threads(ipvs);
est->ktid = -1;
@@ -663,7 +663,7 @@ static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max)
/* Wait for cpufreq frequency transition */
wait_event_idle_timeout(wq, kthread_should_stop(),
HZ / 50);
- if (!ipvs->enable || kthread_should_stop())
+ if (!READ_ONCE(ipvs->enable) || kthread_should_stop())
goto stop;
}
@@ -681,7 +681,7 @@ static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max)
rcu_read_unlock();
local_bh_enable();
- if (!ipvs->enable || kthread_should_stop())
+ if (!READ_ONCE(ipvs->enable) || kthread_should_stop())
goto stop;
cond_resched();
@@ -757,7 +757,7 @@ static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs)
mutex_lock(&ipvs->est_mutex);
for (id = 1; id < ipvs->est_kt_count; id++) {
/* netns clean up started, abort */
- if (!ipvs->enable)
+ if (!READ_ONCE(ipvs->enable))
goto unlock2;
kd = ipvs->est_kt_arr[id];
if (!kd)
@@ -787,7 +787,7 @@ static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs)
id = ipvs->est_kt_count;
next_kt:
- if (!ipvs->enable || kthread_should_stop())
+ if (!READ_ONCE(ipvs->enable) || kthread_should_stop())
goto unlock;
id--;
if (id < 0)
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index d8a284999544..206c6700e200 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -53,6 +53,7 @@ enum {
IP_VS_FTP_EPSV,
};
+static bool exiting_module;
/*
* List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper
* First port is set to the default port.
@@ -605,7 +606,7 @@ static void __ip_vs_ftp_exit(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
- if (!ipvs)
+ if (!ipvs || !exiting_module)
return;
unregister_ip_vs_app(ipvs, &ip_vs_ftp);
@@ -627,6 +628,7 @@ static int __init ip_vs_ftp_init(void)
*/
static void __exit ip_vs_ftp_exit(void)
{
+ exiting_module = true;
unregister_pernet_subsys(&ip_vs_ftp_ops);
/* rcu_barrier() is called by netns */
}
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 1f14ef0436c6..708b79380f04 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -317,6 +317,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
smp_acquire__after_ctrl_dep();
if (nf_ct_should_gc(ct)) {
+ struct ct_iter_state *st = s->private;
+
+ st->skip_elems--;
nf_ct_kill(ct);
goto release;
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index e598a2a252b0..811d02b4c4f7 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -376,6 +376,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
const struct nfnetlink_subsystem *ss;
const struct nfnl_callback *nc;
struct netlink_ext_ack extack;
+ struct nlmsghdr *onlh = nlh;
LIST_HEAD(err_list);
u32 status;
int err;
@@ -386,6 +387,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
status = 0;
replay_abort:
skb = netlink_skb_clone(oskb, GFP_KERNEL);
+ nlh = onlh;
if (!skb)
return netlink_ack(oskb, nlh, -ENOMEM, NULL);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index a818eff27e6b..418b84e2b260 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -27,11 +27,16 @@
/* Handle NCI Notification packets */
-static void nci_core_reset_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_core_reset_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
/* Handle NCI 2.x core reset notification */
- const struct nci_core_reset_ntf *ntf = (void *)skb->data;
+ const struct nci_core_reset_ntf *ntf;
+
+ if (skb->len < sizeof(struct nci_core_reset_ntf))
+ return -EINVAL;
+
+ ntf = (struct nci_core_reset_ntf *)skb->data;
ndev->nci_ver = ntf->nci_ver;
pr_debug("nci_ver 0x%x, config_status 0x%x\n",
@@ -42,15 +47,22 @@ static void nci_core_reset_ntf_packet(struct nci_dev *ndev,
__le32_to_cpu(ntf->manufact_specific_info);
nci_req_complete(ndev, NCI_STATUS_OK);
+
+ return 0;
}
-static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+static int nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
{
- struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
+ struct nci_core_conn_credit_ntf *ntf;
struct nci_conn_info *conn_info;
int i;
+ if (skb->len < sizeof(struct nci_core_conn_credit_ntf))
+ return -EINVAL;
+
+ ntf = (struct nci_core_conn_credit_ntf *)skb->data;
+
pr_debug("num_entries %d\n", ntf->num_entries);
if (ntf->num_entries > NCI_MAX_NUM_CONN)
@@ -68,7 +80,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
conn_info = nci_get_conn_info_by_conn_id(ndev,
ntf->conn_entries[i].conn_id);
if (!conn_info)
- return;
+ return 0;
atomic_add(ntf->conn_entries[i].credits,
&conn_info->credits_cnt);
@@ -77,12 +89,19 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
/* trigger the next tx */
if (!skb_queue_empty(&ndev->tx_q))
queue_work(ndev->tx_wq, &ndev->tx_work);
+
+ return 0;
}
-static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
- __u8 status = skb->data[0];
+ __u8 status;
+
+ if (skb->len < 1)
+ return -EINVAL;
+
+ status = skb->data[0];
pr_debug("status 0x%x\n", status);
@@ -91,12 +110,19 @@ static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
(the state remains the same) */
nci_req_complete(ndev, status);
}
+
+ return 0;
}
-static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+static int nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
{
- struct nci_core_intf_error_ntf *ntf = (void *) skb->data;
+ struct nci_core_intf_error_ntf *ntf;
+
+ if (skb->len < sizeof(struct nci_core_intf_error_ntf))
+ return -EINVAL;
+
+ ntf = (struct nci_core_intf_error_ntf *)skb->data;
ntf->conn_id = nci_conn_id(&ntf->conn_id);
@@ -105,6 +131,8 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
/* complete the data exchange transaction, if exists */
if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO);
+
+ return 0;
}
static const __u8 *
@@ -329,13 +357,18 @@ void nci_clear_target_list(struct nci_dev *ndev)
ndev->n_targets = 0;
}
-static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_rf_discover_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
struct nci_rf_discover_ntf ntf;
- const __u8 *data = skb->data;
+ const __u8 *data;
bool add_target = true;
+ if (skb->len < sizeof(struct nci_rf_discover_ntf))
+ return -EINVAL;
+
+ data = skb->data;
+
ntf.rf_discovery_id = *data++;
ntf.rf_protocol = *data++;
ntf.rf_tech_and_mode = *data++;
@@ -390,6 +423,8 @@ static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
nfc_targets_found(ndev->nfc_dev, ndev->targets,
ndev->n_targets);
}
+
+ return 0;
}
static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
@@ -553,14 +588,19 @@ static int nci_store_ats_nfc_iso_dep(struct nci_dev *ndev,
return NCI_STATUS_OK;
}
-static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
struct nci_conn_info *conn_info;
struct nci_rf_intf_activated_ntf ntf;
- const __u8 *data = skb->data;
+ const __u8 *data;
int err = NCI_STATUS_OK;
+ if (skb->len < sizeof(struct nci_rf_intf_activated_ntf))
+ return -EINVAL;
+
+ data = skb->data;
+
ntf.rf_discovery_id = *data++;
ntf.rf_interface = *data++;
ntf.rf_protocol = *data++;
@@ -667,7 +707,7 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
if (err == NCI_STATUS_OK) {
conn_info = ndev->rf_conn_info;
if (!conn_info)
- return;
+ return 0;
conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size;
conn_info->initial_num_credits = ntf.initial_num_credits;
@@ -721,19 +761,26 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
pr_err("error when signaling tm activation\n");
}
}
+
+ return 0;
}
-static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
const struct nci_conn_info *conn_info;
- const struct nci_rf_deactivate_ntf *ntf = (void *)skb->data;
+ const struct nci_rf_deactivate_ntf *ntf;
+
+ if (skb->len < sizeof(struct nci_rf_deactivate_ntf))
+ return -EINVAL;
+
+ ntf = (struct nci_rf_deactivate_ntf *)skb->data;
pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
conn_info = ndev->rf_conn_info;
if (!conn_info)
- return;
+ return 0;
/* drop tx data queue */
skb_queue_purge(&ndev->tx_q);
@@ -765,14 +812,20 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
}
nci_req_complete(ndev, NCI_STATUS_OK);
+
+ return 0;
}
-static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
u8 status = NCI_STATUS_OK;
- const struct nci_nfcee_discover_ntf *nfcee_ntf =
- (struct nci_nfcee_discover_ntf *)skb->data;
+ const struct nci_nfcee_discover_ntf *nfcee_ntf;
+
+ if (skb->len < sizeof(struct nci_nfcee_discover_ntf))
+ return -EINVAL;
+
+ nfcee_ntf = (struct nci_nfcee_discover_ntf *)skb->data;
/* NFCForum NCI 9.2.1 HCI Network Specific Handling
* If the NFCC supports the HCI Network, it SHALL return one,
@@ -783,6 +836,8 @@ static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
ndev->cur_params.id = nfcee_ntf->nfcee_id;
nci_req_complete(ndev, status);
+
+ return 0;
}
void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
@@ -809,35 +864,43 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
switch (ntf_opcode) {
case NCI_OP_CORE_RESET_NTF:
- nci_core_reset_ntf_packet(ndev, skb);
+ if (nci_core_reset_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_CORE_CONN_CREDITS_NTF:
- nci_core_conn_credits_ntf_packet(ndev, skb);
+ if (nci_core_conn_credits_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_CORE_GENERIC_ERROR_NTF:
- nci_core_generic_error_ntf_packet(ndev, skb);
+ if (nci_core_generic_error_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_CORE_INTF_ERROR_NTF:
- nci_core_conn_intf_error_ntf_packet(ndev, skb);
+ if (nci_core_conn_intf_error_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_RF_DISCOVER_NTF:
- nci_rf_discover_ntf_packet(ndev, skb);
+ if (nci_rf_discover_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_RF_INTF_ACTIVATED_NTF:
- nci_rf_intf_activated_ntf_packet(ndev, skb);
+ if (nci_rf_intf_activated_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_RF_DEACTIVATE_NTF:
- nci_rf_deactivate_ntf_packet(ndev, skb);
+ if (nci_rf_deactivate_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_NFCEE_DISCOVER_NTF:
- nci_nfcee_discover_ntf_packet(ndev, skb);
+ if (nci_nfcee_discover_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_RF_NFCEE_ACTION_NTF:
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 08be56dfb3f2..09745baa1017 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -509,10 +509,10 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
}
/* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */
-static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4,
+static int smc_clc_prfx_set4_rcu(struct net_device *dev, __be32 ipv4,
struct smc_clc_msg_proposal_prefix *prop)
{
- struct in_device *in_dev = __in_dev_get_rcu(dst->dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
const struct in_ifaddr *ifa;
if (!in_dev)
@@ -530,12 +530,12 @@ static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4,
}
/* fill CLC proposal msg with ipv6 prefixes from device */
-static int smc_clc_prfx_set6_rcu(struct dst_entry *dst,
+static int smc_clc_prfx_set6_rcu(struct net_device *dev,
struct smc_clc_msg_proposal_prefix *prop,
struct smc_clc_ipv6_prefix *ipv6_prfx)
{
#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_dev *in6_dev = __in6_dev_get(dst->dev);
+ struct inet6_dev *in6_dev = __in6_dev_get(dev);
struct inet6_ifaddr *ifa;
int cnt = 0;
@@ -564,41 +564,44 @@ static int smc_clc_prfx_set(struct socket *clcsock,
struct smc_clc_msg_proposal_prefix *prop,
struct smc_clc_ipv6_prefix *ipv6_prfx)
{
- struct dst_entry *dst = sk_dst_get(clcsock->sk);
struct sockaddr_storage addrs;
struct sockaddr_in6 *addr6;
struct sockaddr_in *addr;
+ struct net_device *dev;
+ struct dst_entry *dst;
int rc = -ENOENT;
- if (!dst) {
- rc = -ENOTCONN;
- goto out;
- }
- if (!dst->dev) {
- rc = -ENODEV;
- goto out_rel;
- }
/* get address to which the internal TCP socket is bound */
if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
- goto out_rel;
+ goto out;
+
/* analyze IP specific data of net_device belonging to TCP socket */
addr6 = (struct sockaddr_in6 *)&addrs;
+
rcu_read_lock();
+
+ dst = __sk_dst_get(clcsock->sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ if (!dev) {
+ rc = -ENODEV;
+ goto out_unlock;
+ }
+
if (addrs.ss_family == PF_INET) {
/* IPv4 */
addr = (struct sockaddr_in *)&addrs;
- rc = smc_clc_prfx_set4_rcu(dst, addr->sin_addr.s_addr, prop);
+ rc = smc_clc_prfx_set4_rcu(dev, addr->sin_addr.s_addr, prop);
} else if (ipv6_addr_v4mapped(&addr6->sin6_addr)) {
/* mapped IPv4 address - peer is IPv4 only */
- rc = smc_clc_prfx_set4_rcu(dst, addr6->sin6_addr.s6_addr32[3],
+ rc = smc_clc_prfx_set4_rcu(dev, addr6->sin6_addr.s6_addr32[3],
prop);
} else {
/* IPv6 */
- rc = smc_clc_prfx_set6_rcu(dst, prop, ipv6_prfx);
+ rc = smc_clc_prfx_set6_rcu(dev, prop, ipv6_prfx);
}
+
+out_unlock:
rcu_read_unlock();
-out_rel:
- dst_release(dst);
out:
return rc;
}
@@ -654,26 +657,26 @@ static int smc_clc_prfx_match6_rcu(struct net_device *dev,
int smc_clc_prfx_match(struct socket *clcsock,
struct smc_clc_msg_proposal_prefix *prop)
{
- struct dst_entry *dst = sk_dst_get(clcsock->sk);
+ struct net_device *dev;
+ struct dst_entry *dst;
int rc;
- if (!dst) {
- rc = -ENOTCONN;
- goto out;
- }
- if (!dst->dev) {
+ rcu_read_lock();
+
+ dst = __sk_dst_get(clcsock->sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ if (!dev) {
rc = -ENODEV;
- goto out_rel;
+ goto out;
}
- rcu_read_lock();
+
if (!prop->ipv6_prefixes_cnt)
- rc = smc_clc_prfx_match4_rcu(dst->dev, prop);
+ rc = smc_clc_prfx_match4_rcu(dev, prop);
else
- rc = smc_clc_prfx_match6_rcu(dst->dev, prop);
- rcu_read_unlock();
-out_rel:
- dst_release(dst);
+ rc = smc_clc_prfx_match6_rcu(dev, prop);
out:
+ rcu_read_unlock();
+
return rc;
}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 262746e304dd..2a559a98541c 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1883,35 +1883,32 @@ static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
/* Determine vlan of internal TCP socket. */
int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
{
- struct dst_entry *dst = sk_dst_get(clcsock->sk);
struct netdev_nested_priv priv;
struct net_device *ndev;
+ struct dst_entry *dst;
int rc = 0;
ini->vlan_id = 0;
- if (!dst) {
- rc = -ENOTCONN;
- goto out;
- }
- if (!dst->dev) {
+
+ rcu_read_lock();
+
+ dst = __sk_dst_get(clcsock->sk);
+ ndev = dst ? dst_dev_rcu(dst) : NULL;
+ if (!ndev) {
rc = -ENODEV;
- goto out_rel;
+ goto out;
}
- ndev = dst->dev;
if (is_vlan_dev(ndev)) {
ini->vlan_id = vlan_dev_vlan_id(ndev);
- goto out_rel;
+ goto out;
}
priv.data = (void *)&ini->vlan_id;
- rtnl_lock();
- netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
- rtnl_unlock();
-
-out_rel:
- dst_release(dst);
+ netdev_walk_all_lower_dev_rcu(ndev, smc_vlan_by_tcpsk_walk, &priv);
out:
+ rcu_read_unlock();
+
return rc;
}
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 76ad29e31d60..db3043b1e3fd 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -1126,37 +1126,38 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
*/
void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini)
{
- struct dst_entry *dst = sk_dst_get(sk);
-
- if (!dst)
- goto out;
- if (!dst->dev)
- goto out_rel;
+ struct net_device *dev;
+ struct dst_entry *dst;
- smc_pnet_find_roce_by_pnetid(dst->dev, ini);
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ dev_hold(dev);
+ rcu_read_unlock();
-out_rel:
- dst_release(dst);
-out:
- return;
+ if (dev) {
+ smc_pnet_find_roce_by_pnetid(dev, ini);
+ dev_put(dev);
+ }
}
void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini)
{
- struct dst_entry *dst = sk_dst_get(sk);
+ struct net_device *dev;
+ struct dst_entry *dst;
ini->ism_dev[0] = NULL;
- if (!dst)
- goto out;
- if (!dst->dev)
- goto out_rel;
- smc_pnet_find_ism_by_pnetid(dst->dev, ini);
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ dev_hold(dev);
+ rcu_read_unlock();
-out_rel:
- dst_release(dst);
-out:
- return;
+ if (dev) {
+ smc_pnet_find_ism_by_pnetid(dev, ini);
+ dev_put(dev);
+ }
}
/* Lookup and apply a pnet table entry to the given ib device.
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index e82212f6b562..a8ec30759a18 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -724,7 +724,7 @@ svcauth_gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
- if (flavor != RPC_AUTH_GSS) {
+ if (flavor != RPC_AUTH_GSS || checksum.len < XDR_UNIT) {
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index f672a62a9a52..a82fdcf19969 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -123,17 +123,19 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
/* We assume that the socket is already connected */
static struct net_device *get_netdev_for_sock(struct sock *sk)
{
- struct dst_entry *dst = sk_dst_get(sk);
- struct net_device *netdev = NULL;
+ struct net_device *dev, *lowest_dev = NULL;
+ struct dst_entry *dst;
- if (likely(dst)) {
- netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
- dev_hold(netdev);
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ if (likely(dev)) {
+ lowest_dev = netdev_sk_get_lowest_dev(dev, sk);
+ dev_hold(lowest_dev);
}
+ rcu_read_unlock();
- dst_release(dst);
-
- return netdev;
+ return lowest_dev;
}
static void destroy_record(struct tls_record_info *record)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 240c68baa3d1..341dbf642181 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -2992,7 +2992,7 @@ bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio,
u32 freq, width;
freq = ieee80211_chandef_to_khz(chandef);
- width = cfg80211_chandef_get_width(chandef);
+ width = MHZ_TO_KHZ(cfg80211_chandef_get_width(chandef));
if (!ieee80211_radio_freq_range_valid(radio, freq, width))
return false;
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 84d60635e8a9..7c47875cd2ae 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -99,3 +99,4 @@ const xa_mark_t RUST_CONST_HELPER_XA_PRESENT = XA_PRESENT;
const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC = XA_FLAGS_ALLOC;
const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC1 = XA_FLAGS_ALLOC1;
+const vm_flags_t RUST_CONST_HELPER_VM_MERGEABLE = VM_MERGEABLE;
diff --git a/rust/kernel/cpumask.rs b/rust/kernel/cpumask.rs
index 3fcbff438670..05e1c882404e 100644
--- a/rust/kernel/cpumask.rs
+++ b/rust/kernel/cpumask.rs
@@ -212,6 +212,7 @@ pub fn copy(&self, dstp: &mut Self) {
/// }
/// assert_eq!(mask2.weight(), count);
/// ```
+#[repr(transparent)]
pub struct CpumaskVar {
#[cfg(CONFIG_CPUMASK_OFFSTACK)]
ptr: NonNull<Cpumask>,
diff --git a/scripts/misc-check b/scripts/misc-check
index 84f08da17b2c..40e5a4b01ff4 100755
--- a/scripts/misc-check
+++ b/scripts/misc-check
@@ -45,7 +45,7 @@ check_tracked_ignored_files () {
# does not automatically fix it.
check_missing_include_linux_export_h () {
- git -C "${srctree:-.}" grep --files-with-matches -E 'EXPORT_SYMBOL((_NS)?(_GPL)?|_GPL_FOR_MODULES)\(.*\)' \
+ git -C "${srctree:-.}" grep --files-with-matches -E 'EXPORT_SYMBOL((_NS)?(_GPL)?|_FOR_MODULES)\(.*\)' \
-- '*.[ch]' :^tools/ :^include/linux/export.h |
xargs -r git -C "${srctree:-.}" grep --files-without-match '#include[[:space:]]*<linux/export\.h>' |
xargs -r printf "%s: warning: EXPORT_SYMBOL() is used, but #include <linux/export.h> is missing\n" >&2
@@ -58,7 +58,7 @@ check_unnecessary_include_linux_export_h () {
git -C "${srctree:-.}" grep --files-with-matches '#include[[:space:]]*<linux/export\.h>' \
-- '*.[c]' :^tools/ |
- xargs -r git -C "${srctree:-.}" grep --files-without-match -E 'EXPORT_SYMBOL((_NS)?(_GPL)?|_GPL_FOR_MODULES)\(.*\)' |
+ xargs -r git -C "${srctree:-.}" grep --files-without-match -E 'EXPORT_SYMBOL((_NS)?(_GPL)?|_FOR_MODULES)\(.*\)' |
xargs -r printf "%s: warning: EXPORT_SYMBOL() is not used, but #include <linux/export.h> is present\n" >&2
}
diff --git a/security/Kconfig b/security/Kconfig
index 4816fc74f81e..285f284dfcac 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -269,6 +269,7 @@ endchoice
config LSM
string "Ordered list of enabled LSMs"
+ depends on SECURITY
default "landlock,lockdown,yama,loadpin,safesetid,smack,selinux,tomoyo,apparmor,ipe,bpf" if DEFAULT_SECURITY_SMACK
default "landlock,lockdown,yama,loadpin,safesetid,apparmor,selinux,smack,tomoyo,ipe,bpf" if DEFAULT_SECURITY_APPARMOR
default "landlock,lockdown,yama,loadpin,safesetid,tomoyo,ipe,bpf" if DEFAULT_SECURITY_TOMOYO
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 1eab940fa2e5..68bee40c9ada 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -84,19 +84,24 @@ void snd_pcm_group_init(struct snd_pcm_group *group)
}
/* define group lock helpers */
-#define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
+#define DEFINE_PCM_GROUP_LOCK(action, bh_lock, bh_unlock, mutex_action) \
static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
{ \
- if (nonatomic) \
+ if (nonatomic) { \
mutex_ ## mutex_action(&group->mutex); \
- else \
- spin_ ## action(&group->lock); \
-}
-
-DEFINE_PCM_GROUP_LOCK(lock, lock);
-DEFINE_PCM_GROUP_LOCK(unlock, unlock);
-DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
-DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
+ } else { \
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && bh_lock) \
+ local_bh_disable(); \
+ spin_ ## action(&group->lock); \
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && bh_unlock) \
+ local_bh_enable(); \
+ } \
+}
+
+DEFINE_PCM_GROUP_LOCK(lock, false, false, lock);
+DEFINE_PCM_GROUP_LOCK(unlock, false, false, unlock);
+DEFINE_PCM_GROUP_LOCK(lock_irq, true, false, lock);
+DEFINE_PCM_GROUP_LOCK(unlock_irq, false, true, unlock);
/**
* snd_pcm_stream_lock - Lock the PCM stream
diff --git a/sound/hda/codecs/hdmi/hdmi.c b/sound/hda/codecs/hdmi/hdmi.c
index 44576b30f699..774969dbfde4 100644
--- a/sound/hda/codecs/hdmi/hdmi.c
+++ b/sound/hda/codecs/hdmi/hdmi.c
@@ -1583,6 +1583,7 @@ static const struct snd_pci_quirk force_connect_list[] = {
SND_PCI_QUIRK(0x103c, 0x83e2, "HP EliteDesk 800 G4", 1),
SND_PCI_QUIRK(0x103c, 0x83ef, "HP MP9 G4 Retail System AMS", 1),
SND_PCI_QUIRK(0x103c, 0x845a, "HP EliteDesk 800 G4 DM 65W", 1),
+ SND_PCI_QUIRK(0x103c, 0x83f3, "HP ProDesk 400", 1),
SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c
index f267437c9698..07ea76efa5de 100644
--- a/sound/hda/codecs/realtek/alc269.c
+++ b/sound/hda/codecs/realtek/alc269.c
@@ -6487,6 +6487,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x89da, "HP Spectre x360 14t-ea100", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX),
SND_PCI_QUIRK(0x103c, 0x89e7, "HP Elite x2 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8a0f, "HP Pavilion 14-ec1xxx", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 9d95ecb299ae..a99acd1125e7 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -316,7 +316,7 @@ static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
/* low-level dsp access */
int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
{
- u16 ret;
+ int ret;
mutex_lock(&chip->msg_lock);
@@ -330,10 +330,10 @@ int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
{
- u16 ret = 0;
u32 freq_raw = 0;
u32 freq = 0;
u32 frequency = 0;
+ int ret;
mutex_lock(&chip->msg_lock);
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index 1bb7e1dc7e6b..e92939068bf7 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -5831,6 +5831,13 @@ static const struct snd_soc_component_driver wcd934x_component_drv = {
.endianness = 1,
};
+static void wcd934x_put_device_action(void *data)
+{
+ struct device *dev = data;
+
+ put_device(dev);
+}
+
static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd)
{
struct device *dev = &wcd->sdev->dev;
@@ -5847,11 +5854,13 @@ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd)
return dev_err_probe(dev, -EINVAL, "Unable to get SLIM Interface device\n");
slim_get_logical_addr(wcd->sidev);
- wcd->if_regmap = regmap_init_slimbus(wcd->sidev,
+ wcd->if_regmap = devm_regmap_init_slimbus(wcd->sidev,
&wcd934x_ifc_regmap_config);
- if (IS_ERR(wcd->if_regmap))
+ if (IS_ERR(wcd->if_regmap)) {
+ put_device(&wcd->sidev->dev);
return dev_err_probe(dev, PTR_ERR(wcd->if_regmap),
"Failed to allocate ifc register map\n");
+ }
of_property_read_u32(dev->parent->of_node, "qcom,dmic-sample-rate",
&wcd->dmic_sample_rate);
@@ -5893,6 +5902,10 @@ static int wcd934x_codec_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, wcd934x_put_device_action, &wcd->sidev->dev);
+ if (ret)
+ return ret;
+
/* set default rate 9P6MHz */
regmap_update_bits(wcd->regmap, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK,
diff --git a/sound/soc/codecs/wcd937x.c b/sound/soc/codecs/wcd937x.c
index 3b0a8cc314e0..de2dff3c56d3 100644
--- a/sound/soc/codecs/wcd937x.c
+++ b/sound/soc/codecs/wcd937x.c
@@ -2046,9 +2046,9 @@ static const struct snd_kcontrol_new wcd937x_snd_controls[] = {
SOC_ENUM_EXT("RX HPH Mode", rx_hph_mode_mux_enum,
wcd937x_rx_hph_mode_get, wcd937x_rx_hph_mode_put),
- SOC_SINGLE_EXT("HPHL_COMP Switch", SND_SOC_NOPM, 0, 1, 0,
+ SOC_SINGLE_EXT("HPHL_COMP Switch", WCD937X_COMP_L, 0, 1, 0,
wcd937x_get_compander, wcd937x_set_compander),
- SOC_SINGLE_EXT("HPHR_COMP Switch", SND_SOC_NOPM, 1, 1, 0,
+ SOC_SINGLE_EXT("HPHR_COMP Switch", WCD937X_COMP_R, 1, 1, 0,
wcd937x_get_compander, wcd937x_set_compander),
SOC_SINGLE_TLV("HPHL Volume", WCD937X_HPH_L_EN, 0, 20, 1, line_gain),
diff --git a/sound/soc/codecs/wcd937x.h b/sound/soc/codecs/wcd937x.h
index 3ab21bb5846e..d20886a2803a 100644
--- a/sound/soc/codecs/wcd937x.h
+++ b/sound/soc/codecs/wcd937x.h
@@ -552,21 +552,21 @@ int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd,
struct device *wcd937x_sdw_device_get(struct device_node *np);
#else
-int wcd937x_sdw_free(struct wcd937x_sdw_priv *wcd,
+static inline int wcd937x_sdw_free(struct wcd937x_sdw_priv *wcd,
struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
return -EOPNOTSUPP;
}
-int wcd937x_sdw_set_sdw_stream(struct wcd937x_sdw_priv *wcd,
+static inline int wcd937x_sdw_set_sdw_stream(struct wcd937x_sdw_priv *wcd,
struct snd_soc_dai *dai,
void *stream, int direction)
{
return -EOPNOTSUPP;
}
-int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd,
+static inline int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 62594e7966ab..b384d38654e6 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -47,7 +47,8 @@ enum {
BYT_CHT_ES8316_INTMIC_IN2_MAP,
};
-#define BYT_CHT_ES8316_MAP(quirk) ((quirk) & GENMASK(3, 0))
+#define BYT_CHT_ES8316_MAP_MASK GENMASK(3, 0)
+#define BYT_CHT_ES8316_MAP(quirk) ((quirk) & BYT_CHT_ES8316_MAP_MASK)
#define BYT_CHT_ES8316_SSP0 BIT(16)
#define BYT_CHT_ES8316_MONO_SPEAKER BIT(17)
#define BYT_CHT_ES8316_JD_INVERTED BIT(18)
@@ -60,10 +61,23 @@ MODULE_PARM_DESC(quirk, "Board-specific quirk override");
static void log_quirks(struct device *dev)
{
- if (BYT_CHT_ES8316_MAP(quirk) == BYT_CHT_ES8316_INTMIC_IN1_MAP)
+ int map;
+
+ map = BYT_CHT_ES8316_MAP(quirk);
+ switch (map) {
+ case BYT_CHT_ES8316_INTMIC_IN1_MAP:
dev_info(dev, "quirk IN1_MAP enabled");
- if (BYT_CHT_ES8316_MAP(quirk) == BYT_CHT_ES8316_INTMIC_IN2_MAP)
+ break;
+ case BYT_CHT_ES8316_INTMIC_IN2_MAP:
dev_info(dev, "quirk IN2_MAP enabled");
+ break;
+ default:
+ dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to INTMIC_IN1_MAP\n", map);
+ quirk &= ~BYT_CHT_ES8316_MAP_MASK;
+ quirk |= BYT_CHT_ES8316_INTMIC_IN1_MAP;
+ break;
+ }
+
if (quirk & BYT_CHT_ES8316_SSP0)
dev_info(dev, "quirk SSP0 enabled");
if (quirk & BYT_CHT_ES8316_MONO_SPEAKER)
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 0f3b8f44e701..bc846558480e 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -68,7 +68,8 @@ enum {
BYT_RT5640_OVCD_SF_1P5 = (RT5640_OVCD_SF_1P5 << 13),
};
-#define BYT_RT5640_MAP(quirk) ((quirk) & GENMASK(3, 0))
+#define BYT_RT5640_MAP_MASK GENMASK(3, 0)
+#define BYT_RT5640_MAP(quirk) ((quirk) & BYT_RT5640_MAP_MASK)
#define BYT_RT5640_JDSRC(quirk) (((quirk) & GENMASK(7, 4)) >> 4)
#define BYT_RT5640_OVCD_TH(quirk) (((quirk) & GENMASK(12, 8)) >> 8)
#define BYT_RT5640_OVCD_SF(quirk) (((quirk) & GENMASK(14, 13)) >> 13)
@@ -140,7 +141,9 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk NO_INTERNAL_MIC_MAP enabled\n");
break;
default:
- dev_err(dev, "quirk map 0x%x is not supported, microphone input will not work\n", map);
+ dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to DMIC1_MAP\n", map);
+ byt_rt5640_quirk &= ~BYT_RT5640_MAP_MASK;
+ byt_rt5640_quirk |= BYT_RT5640_DMIC1_MAP;
break;
}
if (byt_rt5640_quirk & BYT_RT5640_HSMIC2_ON_IN1)
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 67c62844ca2a..604a35d380e9 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -58,7 +58,8 @@ enum {
BYT_RT5651_OVCD_SF_1P5 = (RT5651_OVCD_SF_1P5 << 13),
};
-#define BYT_RT5651_MAP(quirk) ((quirk) & GENMASK(3, 0))
+#define BYT_RT5651_MAP_MASK GENMASK(3, 0)
+#define BYT_RT5651_MAP(quirk) ((quirk) & BYT_RT5651_MAP_MASK)
#define BYT_RT5651_JDSRC(quirk) (((quirk) & GENMASK(7, 4)) >> 4)
#define BYT_RT5651_OVCD_TH(quirk) (((quirk) & GENMASK(12, 8)) >> 8)
#define BYT_RT5651_OVCD_SF(quirk) (((quirk) & GENMASK(14, 13)) >> 13)
@@ -100,14 +101,29 @@ MODULE_PARM_DESC(quirk, "Board-specific quirk override");
static void log_quirks(struct device *dev)
{
- if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_DMIC_MAP)
+ int map;
+
+ map = BYT_RT5651_MAP(byt_rt5651_quirk);
+ switch (map) {
+ case BYT_RT5651_DMIC_MAP:
dev_info(dev, "quirk DMIC_MAP enabled");
- if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN1_MAP)
+ break;
+ case BYT_RT5651_IN1_MAP:
dev_info(dev, "quirk IN1_MAP enabled");
- if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN2_MAP)
+ break;
+ case BYT_RT5651_IN2_MAP:
dev_info(dev, "quirk IN2_MAP enabled");
- if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN1_IN2_MAP)
+ break;
+ case BYT_RT5651_IN1_IN2_MAP:
dev_info(dev, "quirk IN1_IN2_MAP enabled");
+ break;
+ default:
+ dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to DMIC_MAP\n", map);
+ byt_rt5651_quirk &= ~BYT_RT5651_MAP_MASK;
+ byt_rt5651_quirk |= BYT_RT5651_DMIC_MAP;
+ break;
+ }
+
if (BYT_RT5651_JDSRC(byt_rt5651_quirk)) {
dev_info(dev, "quirk realtek,jack-detect-source %ld\n",
BYT_RT5651_JDSRC(byt_rt5651_quirk));
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 28f03a5f29f7..c013e31d098e 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -841,7 +841,7 @@ static int create_sdw_dailink(struct snd_soc_card *card,
(*codec_conf)++;
}
- if (sof_end->include_sidecar) {
+ if (sof_end->include_sidecar && sof_end->codec_info->add_sidecar) {
ret = sof_end->codec_info->add_sidecar(card, dai_links, codec_conf);
if (ret)
return ret;
diff --git a/sound/soc/qcom/sc8280xp.c b/sound/soc/qcom/sc8280xp.c
index 288ccd7f8866..6847ae4acbd1 100644
--- a/sound/soc/qcom/sc8280xp.c
+++ b/sound/soc/qcom/sc8280xp.c
@@ -191,8 +191,8 @@ static const struct of_device_id snd_sc8280xp_dt_match[] = {
{.compatible = "qcom,qcm6490-idp-sndcard", "qcm6490"},
{.compatible = "qcom,qcs6490-rb3gen2-sndcard", "qcs6490"},
{.compatible = "qcom,qcs8275-sndcard", "qcs8300"},
- {.compatible = "qcom,qcs9075-sndcard", "qcs9075"},
- {.compatible = "qcom,qcs9100-sndcard", "qcs9100"},
+ {.compatible = "qcom,qcs9075-sndcard", "sa8775p"},
+ {.compatible = "qcom,qcs9100-sndcard", "sa8775p"},
{.compatible = "qcom,sc8280xp-sndcard", "sc8280xp"},
{.compatible = "qcom,sm8450-sndcard", "sm8450"},
{.compatible = "qcom,sm8550-sndcard", "sm8550"},
diff --git a/sound/soc/sof/intel/hda-sdw-bpt.c b/sound/soc/sof/intel/hda-sdw-bpt.c
index 1327f1cad0bc..ff5abccf0d88 100644
--- a/sound/soc/sof/intel/hda-sdw-bpt.c
+++ b/sound/soc/sof/intel/hda-sdw-bpt.c
@@ -150,7 +150,7 @@ static int hda_sdw_bpt_dma_deprepare(struct device *dev, struct hdac_ext_stream
u32 mask;
int ret;
- ret = hda_cl_cleanup(sdev->dev, dmab_bdl, true, sdw_bpt_stream);
+ ret = hda_cl_cleanup(sdev->dev, dmab_bdl, false, sdw_bpt_stream);
if (ret < 0) {
dev_err(sdev->dev, "%s: SDW BPT DMA cleanup failed\n",
__func__);
diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c
index 473d416bc910..f449362a2905 100644
--- a/sound/soc/sof/ipc3-topology.c
+++ b/sound/soc/sof/ipc3-topology.c
@@ -2473,11 +2473,6 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
if (ret < 0)
return ret;
- /* free all the scheduler widgets now */
- ret = sof_ipc3_free_widgets_in_list(sdev, true, &dyn_widgets, verify);
- if (ret < 0)
- return ret;
-
/*
* Tear down all pipelines associated with PCMs that did not get suspended
* and unset the prepare flag so that they can be set up again during resume.
@@ -2493,6 +2488,11 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
}
}
+ /* free all the scheduler widgets now. This will also power down the secondary cores */
+ ret = sof_ipc3_free_widgets_in_list(sdev, true, &dyn_widgets, verify);
+ if (ret < 0)
+ return ret;
+
list_for_each_entry(sroute, &sdev->route_list, list)
sroute->setup = false;
diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
index 374dc10d10fd..37d72a50c127 100644
--- a/sound/soc/sof/ipc4-pcm.c
+++ b/sound/soc/sof/ipc4-pcm.c
@@ -19,12 +19,14 @@
* struct sof_ipc4_timestamp_info - IPC4 timestamp info
* @host_copier: the host copier of the pcm stream
* @dai_copier: the dai copier of the pcm stream
- * @stream_start_offset: reported by fw in memory window (converted to frames)
- * @stream_end_offset: reported by fw in memory window (converted to frames)
+ * @stream_start_offset: reported by fw in memory window (converted to
+ * frames at host_copier sampling rate)
+ * @stream_end_offset: reported by fw in memory window (converted to
+ * frames at host_copier sampling rate)
* @llp_offset: llp offset in memory window
- * @boundary: wrap boundary should be used for the LLP frame counter
* @delay: Calculated and stored in pointer callback. The stored value is
- * returned in the delay callback.
+ * returned in the delay callback. Expressed in frames at host copier
+ * sampling rate.
*/
struct sof_ipc4_timestamp_info {
struct sof_ipc4_copier *host_copier;
@@ -33,7 +35,6 @@ struct sof_ipc4_timestamp_info {
u64 stream_end_offset;
u32 llp_offset;
- u64 boundary;
snd_pcm_sframes_t delay;
};
@@ -48,6 +49,16 @@ struct sof_ipc4_pcm_stream_priv {
bool chain_dma_allocated;
};
+/*
+ * Modulus to use to compare host and link position counters. The sampling
+ * rates may be different, so the raw hardware counters will wrap
+ * around at different times. To calculate differences, use
+ * DELAY_BOUNDARY as a common modulus. This value must be smaller than
+ * the wrap-around point of any hardware counter, and larger than any
+ * valid delay measurement.
+ */
+#define DELAY_BOUNDARY U32_MAX
+
static inline struct sof_ipc4_timestamp_info *
sof_ipc4_sps_to_time_info(struct snd_sof_pcm_stream *sps)
{
@@ -639,14 +650,14 @@ static int ipc4_ssp_dai_config_pcm_params_match(struct snd_sof_dev *sdev,
if (params_rate(params) == le32_to_cpu(hw_config->fsync_rate) &&
params_width(params) == le32_to_cpu(hw_config->tdm_slot_width) &&
- params_channels(params) == le32_to_cpu(hw_config->tdm_slots)) {
+ params_channels(params) <= le32_to_cpu(hw_config->tdm_slots)) {
current_config = le32_to_cpu(hw_config->id);
partial_match = false;
/* best match found */
break;
} else if (current_config < 0 &&
params_rate(params) == le32_to_cpu(hw_config->fsync_rate) &&
- params_channels(params) == le32_to_cpu(hw_config->tdm_slots)) {
+ params_channels(params) <= le32_to_cpu(hw_config->tdm_slots)) {
current_config = le32_to_cpu(hw_config->id);
partial_match = true;
/* keep looking for better match */
@@ -993,6 +1004,35 @@ static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component,
return 0;
}
+static u64 sof_ipc4_frames_dai_to_host(struct sof_ipc4_timestamp_info *time_info, u64 value)
+{
+ u64 dai_rate, host_rate;
+
+ if (!time_info->dai_copier || !time_info->host_copier)
+ return value;
+
+ /*
+ * copiers do not change sampling rate, so we can use the
+ * out_format independently of stream direction
+ */
+ dai_rate = time_info->dai_copier->data.out_format.sampling_frequency;
+ host_rate = time_info->host_copier->data.out_format.sampling_frequency;
+
+ if (!dai_rate || !host_rate || dai_rate == host_rate)
+ return value;
+
+ /* take care not to overflow u64, rates can be up to 768000 */
+ if (value > U32_MAX) {
+ value = div64_u64(value, dai_rate);
+ value *= host_rate;
+ } else {
+ value *= host_rate;
+ value = div64_u64(value, dai_rate);
+ }
+
+ return value;
+}
+
static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream,
struct snd_sof_pcm_stream *sps,
@@ -1012,7 +1052,7 @@ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
return -EINVAL;
} else if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_CHAIN_DMA_NODE_ID) {
/*
- * While the firmware does not supports time_info reporting for
+ * While the firmware does not support time_info reporting for
* streams using ChainDMA, it is granted that ChainDMA can only
* be used on Host+Link pairs where the link position is
* accessible from the host side.
@@ -1020,10 +1060,16 @@ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
* Enable delay calculation in case of ChainDMA via host
* accessible registers.
*
- * The ChainDMA uses 2x 1ms ping-pong buffer, dai side starts
- * when 1ms data is available
+ * The ChainDMA prefills the link DMA with a preamble
+ * of zero samples. Set the stream start offset based
+ * on size of the preamble (driver provided fifo size
+ * multiplied by 2.5). We add 1ms of margin as the FW
+ * will align the buffer size to DMA hardware
+ * alignment that is not known to host.
*/
- time_info->stream_start_offset = substream->runtime->rate / MSEC_PER_SEC;
+ int pre_ms = SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS * 5 / 2 + 1;
+
+ time_info->stream_start_offset = pre_ms * substream->runtime->rate / MSEC_PER_SEC;
goto out;
}
@@ -1043,14 +1089,13 @@ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
time_info->stream_end_offset = ppl_reg.stream_end_offset;
do_div(time_info->stream_end_offset, dai_sample_size);
+ /* convert to host frame time */
+ time_info->stream_start_offset =
+ sof_ipc4_frames_dai_to_host(time_info, time_info->stream_start_offset);
+ time_info->stream_end_offset =
+ sof_ipc4_frames_dai_to_host(time_info, time_info->stream_end_offset);
+
out:
- /*
- * Calculate the wrap boundary need to be used for delay calculation
- * The host counter is in bytes, it will wrap earlier than the frames
- * based link counter.
- */
- time_info->boundary = div64_u64(~((u64)0),
- frames_to_bytes(substream->runtime, 1));
/* Initialize the delay value to 0 (no delay) */
time_info->delay = 0;
@@ -1093,6 +1138,8 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
/* For delay calculation we need the host counter */
host_cnt = snd_sof_pcm_get_host_byte_counter(sdev, component, substream);
+
+ /* Store the original value to host_ptr */
host_ptr = host_cnt;
/* convert the host_cnt to frames */
@@ -1111,6 +1158,8 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
sof_mailbox_read(sdev, time_info->llp_offset, &llp, sizeof(llp));
dai_cnt = ((u64)llp.reading.llp_u << 32) | llp.reading.llp_l;
}
+
+ dai_cnt = sof_ipc4_frames_dai_to_host(time_info, dai_cnt);
dai_cnt += time_info->stream_end_offset;
/* In two cases dai dma counter is not accurate
@@ -1144,8 +1193,9 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
dai_cnt -= time_info->stream_start_offset;
}
- /* Wrap the dai counter at the boundary where the host counter wraps */
- div64_u64_rem(dai_cnt, time_info->boundary, &dai_cnt);
+ /* Convert to a common base before comparisons */
+ dai_cnt &= DELAY_BOUNDARY;
+ host_cnt &= DELAY_BOUNDARY;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
head_cnt = host_cnt;
@@ -1155,14 +1205,11 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
tail_cnt = host_cnt;
}
- if (head_cnt < tail_cnt) {
- time_info->delay = time_info->boundary - tail_cnt + head_cnt;
- goto out;
- }
-
- time_info->delay = head_cnt - tail_cnt;
+ if (unlikely(head_cnt < tail_cnt))
+ time_info->delay = DELAY_BOUNDARY - tail_cnt + head_cnt;
+ else
+ time_info->delay = head_cnt - tail_cnt;
-out:
/*
* Convert the host byte counter to PCM pointer which wraps in buffer
* and it is in frames
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
index 591ee30551ba..c93db452bbc0 100644
--- a/sound/soc/sof/ipc4-topology.c
+++ b/sound/soc/sof/ipc4-topology.c
@@ -33,7 +33,6 @@ MODULE_PARM_DESC(ipc4_ignore_cpc,
#define SOF_IPC4_GAIN_PARAM_ID 0
#define SOF_IPC4_TPLG_ABI_SIZE 6
-#define SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS 2
static DEFINE_IDA(alh_group_ida);
static DEFINE_IDA(pipeline_ida);
diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
index 14ba58d2be03..659e1ae0a85f 100644
--- a/sound/soc/sof/ipc4-topology.h
+++ b/sound/soc/sof/ipc4-topology.h
@@ -247,6 +247,8 @@ struct sof_ipc4_dma_stream_ch_map {
#define SOF_IPC4_DMA_METHOD_HDA 1
#define SOF_IPC4_DMA_METHOD_GPDMA 2 /* defined for consistency but not used */
+#define SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS 2
+
/**
* struct sof_ipc4_dma_config: DMA configuration
* @dma_method: HDAudio or GPDMA
diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
index c199ade200c2..d2f5aa085f8e 100644
--- a/tools/include/nolibc/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -116,6 +116,7 @@
#include "sched.h"
#include "signal.h"
#include "unistd.h"
+#include "stdbool.h"
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
diff --git a/tools/include/nolibc/std.h b/tools/include/nolibc/std.h
index ba950f0e7338..2c1ad23b9b5c 100644
--- a/tools/include/nolibc/std.h
+++ b/tools/include/nolibc/std.h
@@ -29,6 +29,6 @@ typedef unsigned long nlink_t;
typedef signed long off_t;
typedef signed long blksize_t;
typedef signed long blkcnt_t;
-typedef __kernel_old_time_t time_t;
+typedef __kernel_time_t time_t;
#endif /* _NOLIBC_STD_H */
diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
index 295e71d34aba..90aadad31f6c 100644
--- a/tools/include/nolibc/sys.h
+++ b/tools/include/nolibc/sys.h
@@ -238,6 +238,19 @@ static __attribute__((unused))
int sys_dup2(int old, int new)
{
#if defined(__NR_dup3)
+ int ret, nr_fcntl;
+
+#ifdef __NR_fcntl64
+ nr_fcntl = __NR_fcntl64;
+#else
+ nr_fcntl = __NR_fcntl;
+#endif
+
+ if (old == new) {
+ ret = my_syscall2(nr_fcntl, old, F_GETFD);
+ return ret < 0 ? ret : old;
+ }
+
return my_syscall3(__NR_dup3, old, new, 0);
#elif defined(__NR_dup2)
return my_syscall2(__NR_dup2, old, new);
diff --git a/tools/include/nolibc/time.h b/tools/include/nolibc/time.h
index d02bc44d2643..e9c1b976791a 100644
--- a/tools/include/nolibc/time.h
+++ b/tools/include/nolibc/time.h
@@ -133,7 +133,8 @@ static __attribute__((unused))
int clock_nanosleep(clockid_t clockid, int flags, const struct timespec *rqtp,
struct timespec *rmtp)
{
- return __sysret(sys_clock_nanosleep(clockid, flags, rqtp, rmtp));
+ /* Directly return a positive error number */
+ return -sys_clock_nanosleep(clockid, flags, rqtp, rmtp);
}
static __inline__
@@ -145,7 +146,7 @@ double difftime(time_t time1, time_t time2)
static __inline__
int nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
{
- return clock_nanosleep(CLOCK_REALTIME, 0, rqtp, rmtp);
+ return __sysret(sys_clock_nanosleep(CLOCK_REALTIME, 0, rqtp, rmtp));
}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 8f5a81b672e1..8f9261279b92 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1013,35 +1013,33 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
const struct btf_member *kern_data_member;
struct btf *btf = NULL;
__s32 kern_vtype_id, kern_type_id;
- char tname[256];
+ char tname[192], stname[256];
__u32 i;
snprintf(tname, sizeof(tname), "%.*s",
(int)bpf_core_essential_name_len(tname_raw), tname_raw);
- kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
- &btf, mod_btf);
- if (kern_type_id < 0) {
- pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
- tname);
- return kern_type_id;
- }
- kern_type = btf__type_by_id(btf, kern_type_id);
+ snprintf(stname, sizeof(stname), "%s%s", STRUCT_OPS_VALUE_PREFIX, tname);
- /* Find the corresponding "map_value" type that will be used
- * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
- * find "struct bpf_struct_ops_tcp_congestion_ops" from the
- * btf_vmlinux.
+ /* Look for the corresponding "map_value" type that will be used
+ * in map_update(BPF_MAP_TYPE_STRUCT_OPS) first, figure out the btf
+ * and the mod_btf.
+ * For example, find "struct bpf_struct_ops_tcp_congestion_ops".
*/
- kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
- tname, BTF_KIND_STRUCT);
+ kern_vtype_id = find_ksym_btf_id(obj, stname, BTF_KIND_STRUCT, &btf, mod_btf);
if (kern_vtype_id < 0) {
- pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
- STRUCT_OPS_VALUE_PREFIX, tname);
+ pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", stname);
return kern_vtype_id;
}
kern_vtype = btf__type_by_id(btf, kern_vtype_id);
+ kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
+ if (kern_type_id < 0) {
+ pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname);
+ return kern_type_id;
+ }
+ kern_type = btf__type_by_id(btf, kern_type_id);
+
/* Find "struct tcp_congestion_ops" from
* struct bpf_struct_ops_tcp_congestion_ops {
* [ ... ]
@@ -1054,8 +1052,8 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
break;
}
if (i == btf_vlen(kern_vtype)) {
- pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
- tname, STRUCT_OPS_VALUE_PREFIX, tname);
+ pr_warn("struct_ops init_kern: struct %s data is not found in struct %s\n",
+ tname, stname);
return -EINVAL;
}
@@ -5093,6 +5091,16 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
return false;
}
+ /*
+ * bpf_get_map_info_by_fd() for DEVMAP will always return flags with
+ * BPF_F_RDONLY_PROG set, but it generally is not set at map creation time.
+ * Thus, ignore the BPF_F_RDONLY_PROG flag in the flags returned from
+ * bpf_get_map_info_by_fd() when checking for compatibility with an
+ * existing DEVMAP.
+ */
+ if (map->def.type == BPF_MAP_TYPE_DEVMAP || map->def.type == BPF_MAP_TYPE_DEVMAP_HASH)
+ map_info.map_flags &= ~BPF_F_RDONLY_PROG;
+
return (map_info.type == map->def.type &&
map_info.key_size == map->def.key_size &&
map_info.value_size == map->def.value_size &&
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 455a957cb702..2b86e21190d3 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -252,7 +252,7 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
* @return 0, on success; negative error code, otherwise, error code is
* stored in errno
*/
-int bpf_object__prepare(struct bpf_object *obj);
+LIBBPF_API int bpf_object__prepare(struct bpf_object *obj);
/**
* @brief **bpf_object__load()** loads BPF object into kernel.
diff --git a/tools/net/ynl/pyynl/lib/ynl.py b/tools/net/ynl/pyynl/lib/ynl.py
index 8244a5f440b2..15ddb0b1adb6 100644
--- a/tools/net/ynl/pyynl/lib/ynl.py
+++ b/tools/net/ynl/pyynl/lib/ynl.py
@@ -746,7 +746,7 @@ class YnlFamily(SpecFamily):
subdict = self._decode(NlAttrs(attr.raw, offset), msg_format.attr_set)
decoded.update(subdict)
else:
- raise Exception(f"Unknown attribute-set '{attr_space}' when decoding '{attr_spec.name}'")
+ raise Exception(f"Unknown attribute-set '{msg_format.attr_set}' when decoding '{attr_spec.name}'")
return decoded
def _decode(self, attrs, space, outer_attrs = None):
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 9741e7503591..de93067a5da3 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -995,7 +995,7 @@ static acpi_status osl_list_customized_tables(char *directory)
{
void *table_dir;
u32 instance;
- char temp_name[ACPI_NAMESEG_SIZE];
+ char temp_name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
char *filename;
acpi_status status = AE_OK;
@@ -1312,7 +1312,7 @@ osl_get_customized_table(char *pathname,
{
void *table_dir;
u32 current_instance = 0;
- char temp_name[ACPI_NAMESEG_SIZE];
+ char temp_name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
char table_filename[PATH_MAX];
char *filename;
acpi_status status;
diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c
index 68a064ce598c..8e3b6be53839 100644
--- a/tools/testing/nvdimm/test/ndtest.c
+++ b/tools/testing/nvdimm/test/ndtest.c
@@ -850,11 +850,22 @@ static int ndtest_probe(struct platform_device *pdev)
p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
sizeof(dma_addr_t), GFP_KERNEL);
+ if (!p->dcr_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
sizeof(dma_addr_t), GFP_KERNEL);
+ if (!p->label_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
sizeof(dma_addr_t), GFP_KERNEL);
-
+ if (!p->dimm_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
rc = ndtest_nvdimm_init(p);
if (rc)
goto err;
diff --git a/tools/testing/selftests/arm64/abi/tpidr2.c b/tools/testing/selftests/arm64/abi/tpidr2.c
index f58a9f89b952..4c89ab0f1010 100644
--- a/tools/testing/selftests/arm64/abi/tpidr2.c
+++ b/tools/testing/selftests/arm64/abi/tpidr2.c
@@ -227,10 +227,10 @@ int main(int argc, char **argv)
ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0);
if (ret >= 0) {
ksft_test_result(default_value(), "default_value\n");
- ksft_test_result(write_read, "write_read\n");
- ksft_test_result(write_sleep_read, "write_sleep_read\n");
- ksft_test_result(write_fork_read, "write_fork_read\n");
- ksft_test_result(write_clone_read, "write_clone_read\n");
+ ksft_test_result(write_read(), "write_read\n");
+ ksft_test_result(write_sleep_read(), "write_sleep_read\n");
+ ksft_test_result(write_fork_read(), "write_fork_read\n");
+ ksft_test_result(write_clone_read(), "write_clone_read\n");
} else {
ksft_print_msg("SME support not present\n");
diff --git a/tools/testing/selftests/arm64/gcs/basic-gcs.c b/tools/testing/selftests/arm64/gcs/basic-gcs.c
index 54f9c888249d..100d2a983155 100644
--- a/tools/testing/selftests/arm64/gcs/basic-gcs.c
+++ b/tools/testing/selftests/arm64/gcs/basic-gcs.c
@@ -410,7 +410,7 @@ int main(void)
}
/* One last test: disable GCS, we can do this one time */
- my_syscall5(__NR_prctl, PR_SET_SHADOW_STACK_STATUS, 0, 0, 0, 0);
+ ret = my_syscall5(__NR_prctl, PR_SET_SHADOW_STACK_STATUS, 0, 0, 0, 0);
if (ret != 0)
ksft_print_msg("Failed to disable GCS: %d\n", ret);
diff --git a/tools/testing/selftests/arm64/pauth/exec_target.c b/tools/testing/selftests/arm64/pauth/exec_target.c
index 4435600ca400..e597861b26d6 100644
--- a/tools/testing/selftests/arm64/pauth/exec_target.c
+++ b/tools/testing/selftests/arm64/pauth/exec_target.c
@@ -13,7 +13,12 @@ int main(void)
unsigned long hwcaps;
size_t val;
- fread(&val, sizeof(size_t), 1, stdin);
+ size_t size = fread(&val, sizeof(size_t), 1, stdin);
+
+ if (size != 1) {
+ fprintf(stderr, "Could not read input from stdin\n");
+ return EXIT_FAILURE;
+ }
/* don't try to execute illegal (unimplemented) instructions) caller
* should have checked this and keep worker simple
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 4863106034df..fd6b370c8169 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -137,7 +137,7 @@ TEST_GEN_PROGS_EXTENDED = \
xdping \
xskxceiver
-TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi
+TEST_GEN_FILES += $(TEST_KMODS) liburandom_read.so urandom_read sign-file uprobe_multi
ifneq ($(V),1)
submake_extras := feature_display=0
@@ -398,7 +398,7 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
endif
-# vmlinux.h is first dumped to a temprorary file and then compared to
+# vmlinux.h is first dumped to a temporary file and then compared to
# the previous version. This helps to avoid unnecessary re-builds of
# $(TRUNNER_BPF_OBJS)
$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index ddd73d06a1eb..3ecc226ea7b2 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -499,7 +499,7 @@ extern const struct bench bench_rename_rawtp;
extern const struct bench bench_rename_fentry;
extern const struct bench bench_rename_fexit;
-/* pure counting benchmarks to establish theoretical lmits */
+/* pure counting benchmarks to establish theoretical limits */
extern const struct bench bench_trig_usermode_count;
extern const struct bench bench_trig_syscall_count;
extern const struct bench bench_trig_kernel_count;
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 82903585c870..10cba526d3e6 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -63,7 +63,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
/* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
* so it's impossible to determine correct pointer size; but if they
- * do, it should be 8 regardless of host architecture, becaues BPF
+ * do, it should be 8 regardless of host architecture, because BPF
* target is always 64-bit
*/
if (!t->known_ptr_sz) {
diff --git a/tools/testing/selftests/bpf/prog_tests/fd_array.c b/tools/testing/selftests/bpf/prog_tests/fd_array.c
index 241b2c8c6e0f..c534b4d5f9da 100644
--- a/tools/testing/selftests/bpf/prog_tests/fd_array.c
+++ b/tools/testing/selftests/bpf/prog_tests/fd_array.c
@@ -293,7 +293,7 @@ static int get_btf_id_by_fd(int btf_fd, __u32 *id)
* 1) Create a new btf, it's referenced only by a file descriptor, so refcnt=1
* 2) Load a BPF prog with fd_array[0] = btf_fd; now btf's refcnt=2
* 3) Close the btf_fd, now refcnt=1
- * Wait and check that BTF stil exists.
+ * Wait and check that BTF still exists.
*/
static void check_fd_array_cnt__referenced_btfs(void)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index e19ef509ebf8..171706e78da8 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -422,220 +422,6 @@ static void test_unique_match(void)
kprobe_multi__destroy(skel);
}
-static size_t symbol_hash(long key, void *ctx __maybe_unused)
-{
- return str_hash((const char *) key);
-}
-
-static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
-{
- return strcmp((const char *) key1, (const char *) key2) == 0;
-}
-
-static bool is_invalid_entry(char *buf, bool kernel)
-{
- if (kernel && strchr(buf, '['))
- return true;
- if (!kernel && !strchr(buf, '['))
- return true;
- return false;
-}
-
-static bool skip_entry(char *name)
-{
- /*
- * We attach to almost all kernel functions and some of them
- * will cause 'suspicious RCU usage' when fprobe is attached
- * to them. Filter out the current culprits - arch_cpu_idle
- * default_idle and rcu_* functions.
- */
- if (!strcmp(name, "arch_cpu_idle"))
- return true;
- if (!strcmp(name, "default_idle"))
- return true;
- if (!strncmp(name, "rcu_", 4))
- return true;
- if (!strcmp(name, "bpf_dispatcher_xdp_func"))
- return true;
- if (!strncmp(name, "__ftrace_invalid_address__",
- sizeof("__ftrace_invalid_address__") - 1))
- return true;
- return false;
-}
-
-/* Do comparision by ignoring '.llvm.<hash>' suffixes. */
-static int compare_name(const char *name1, const char *name2)
-{
- const char *res1, *res2;
- int len1, len2;
-
- res1 = strstr(name1, ".llvm.");
- res2 = strstr(name2, ".llvm.");
- len1 = res1 ? res1 - name1 : strlen(name1);
- len2 = res2 ? res2 - name2 : strlen(name2);
-
- if (len1 == len2)
- return strncmp(name1, name2, len1);
- if (len1 < len2)
- return strncmp(name1, name2, len1) <= 0 ? -1 : 1;
- return strncmp(name1, name2, len2) >= 0 ? 1 : -1;
-}
-
-static int load_kallsyms_compare(const void *p1, const void *p2)
-{
- return compare_name(((const struct ksym *)p1)->name, ((const struct ksym *)p2)->name);
-}
-
-static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
-{
- return compare_name(p1, p2->name);
-}
-
-static int get_syms(char ***symsp, size_t *cntp, bool kernel)
-{
- size_t cap = 0, cnt = 0;
- char *name = NULL, *ksym_name, **syms = NULL;
- struct hashmap *map;
- struct ksyms *ksyms;
- struct ksym *ks;
- char buf[256];
- FILE *f;
- int err = 0;
-
- ksyms = load_kallsyms_custom_local(load_kallsyms_compare);
- if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_custom_local"))
- return -EINVAL;
-
- /*
- * The available_filter_functions contains many duplicates,
- * but other than that all symbols are usable in kprobe multi
- * interface.
- * Filtering out duplicates by using hashmap__add, which won't
- * add existing entry.
- */
-
- if (access("/sys/kernel/tracing/trace", F_OK) == 0)
- f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
- else
- f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
-
- if (!f)
- return -EINVAL;
-
- map = hashmap__new(symbol_hash, symbol_equal, NULL);
- if (IS_ERR(map)) {
- err = libbpf_get_error(map);
- goto error;
- }
-
- while (fgets(buf, sizeof(buf), f)) {
- if (is_invalid_entry(buf, kernel))
- continue;
-
- free(name);
- if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
- continue;
- if (skip_entry(name))
- continue;
-
- ks = search_kallsyms_custom_local(ksyms, name, search_kallsyms_compare);
- if (!ks) {
- err = -EINVAL;
- goto error;
- }
-
- ksym_name = ks->name;
- err = hashmap__add(map, ksym_name, 0);
- if (err == -EEXIST) {
- err = 0;
- continue;
- }
- if (err)
- goto error;
-
- err = libbpf_ensure_mem((void **) &syms, &cap,
- sizeof(*syms), cnt + 1);
- if (err)
- goto error;
-
- syms[cnt++] = ksym_name;
- }
-
- *symsp = syms;
- *cntp = cnt;
-
-error:
- free(name);
- fclose(f);
- hashmap__free(map);
- if (err)
- free(syms);
- return err;
-}
-
-static int get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel)
-{
- unsigned long *addr, *addrs, *tmp_addrs;
- int err = 0, max_cnt, inc_cnt;
- char *name = NULL;
- size_t cnt = 0;
- char buf[256];
- FILE *f;
-
- if (access("/sys/kernel/tracing/trace", F_OK) == 0)
- f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r");
- else
- f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
-
- if (!f)
- return -ENOENT;
-
- /* In my local setup, the number of entries is 50k+ so Let us initially
- * allocate space to hold 64k entries. If 64k is not enough, incrementally
- * increase 1k each time.
- */
- max_cnt = 65536;
- inc_cnt = 1024;
- addrs = malloc(max_cnt * sizeof(long));
- if (addrs == NULL) {
- err = -ENOMEM;
- goto error;
- }
-
- while (fgets(buf, sizeof(buf), f)) {
- if (is_invalid_entry(buf, kernel))
- continue;
-
- free(name);
- if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2)
- continue;
- if (skip_entry(name))
- continue;
-
- if (cnt == max_cnt) {
- max_cnt += inc_cnt;
- tmp_addrs = realloc(addrs, max_cnt);
- if (!tmp_addrs) {
- err = -ENOMEM;
- goto error;
- }
- addrs = tmp_addrs;
- }
-
- addrs[cnt++] = (unsigned long)addr;
- }
-
- *addrsp = addrs;
- *cntp = cnt;
-
-error:
- free(name);
- fclose(f);
- if (err)
- free(addrs);
- return err;
-}
-
static void do_bench_test(struct kprobe_multi_empty *skel, struct bpf_kprobe_multi_opts *opts)
{
long attach_start_ns, attach_end_ns;
@@ -670,7 +456,7 @@ static void test_kprobe_multi_bench_attach(bool kernel)
char **syms = NULL;
size_t cnt = 0;
- if (!ASSERT_OK(get_syms(&syms, &cnt, kernel), "get_syms"))
+ if (!ASSERT_OK(bpf_get_ksyms(&syms, &cnt, kernel), "bpf_get_ksyms"))
return;
skel = kprobe_multi_empty__open_and_load();
@@ -696,13 +482,13 @@ static void test_kprobe_multi_bench_attach_addr(bool kernel)
size_t cnt = 0;
int err;
- err = get_addrs(&addrs, &cnt, kernel);
+ err = bpf_get_addrs(&addrs, &cnt, kernel);
if (err == -ENOENT) {
test__skip();
return;
}
- if (!ASSERT_OK(err, "get_addrs"))
+ if (!ASSERT_OK(err, "bpf_get_addrs"))
return;
skel = kprobe_multi_empty__open_and_load();
diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c
index 6d391d95f96e..70fa7ae93173 100644
--- a/tools/testing/selftests/bpf/prog_tests/module_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c
@@ -90,7 +90,7 @@ void test_module_attach(void)
test_module_attach__detach(skel);
- /* attach fentry/fexit and make sure it get's module reference */
+ /* attach fentry/fexit and make sure it gets module reference */
link = bpf_program__attach(skel->progs.handle_fentry);
if (!ASSERT_OK_PTR(link, "attach_fentry"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
index e261b0e872db..d93a0c7b1786 100644
--- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -623,7 +623,7 @@ static void range_cond(enum num_t t, struct range x, struct range y,
*newx = range(t, x.a, x.b);
*newy = range(t, y.a + 1, y.b);
} else if (x.a == x.b && x.b == y.b) {
- /* X is a constant matching rigth side of Y */
+ /* X is a constant matching right side of Y */
*newx = range(t, x.a, x.b);
*newy = range(t, y.a, y.b - 1);
} else if (y.a == y.b && x.a == y.a) {
@@ -631,7 +631,7 @@ static void range_cond(enum num_t t, struct range x, struct range y,
*newx = range(t, x.a + 1, x.b);
*newy = range(t, y.a, y.b);
} else if (y.a == y.b && x.b == y.b) {
- /* Y is a constant matching rigth side of X */
+ /* Y is a constant matching right side of X */
*newx = range(t, x.a, x.b - 1);
*newy = range(t, y.a, y.b);
} else {
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
index b7ba5cd47d96..271b5cc9fc01 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
@@ -39,7 +39,7 @@ void test_stacktrace_build_id(void)
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
index 0832fd787457..b277dddd5af7 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
@@ -66,7 +66,7 @@ void test_stacktrace_build_id_nmi(void)
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
index df59e4ae2951..84a7e405e912 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
@@ -50,7 +50,7 @@ void test_stacktrace_map(void)
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
index c6ef06f55cdb..e0cb4697b4b3 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
@@ -46,7 +46,7 @@ void test_stacktrace_map_raw_tp(void)
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
index 1932b1e0685c..dc2ccf6a14d1 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
@@ -40,7 +40,7 @@ void test_stacktrace_map_skip(void)
skel->bss->control = 1;
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap"))
diff --git a/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
index 1654a530aa3d..4e51785e7606 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
@@ -101,7 +101,7 @@ static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
tp->snd_cwnd = pkts_in_flight + sndcnt;
}
-/* Decide wheather to run the increase function of congestion control. */
+/* Decide whether to run the increase function of congestion control. */
static bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
{
if (tcp_sk(sk)->reordering > TCP_REORDERING)
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index 7cd73e75f52a..32c511bcd60b 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
-/* WARNING: This implemenation is not necessarily the same
+/* WARNING: This implementation is not necessarily the same
* as the tcp_dctcp.c. The purpose is mainly for testing
* the kernel BPF logic.
*/
diff --git a/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c b/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
index 544e5ac90461..d09bbd8ae8a8 100644
--- a/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
+++ b/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
@@ -12,7 +12,7 @@
SEC("freplace/connect_v4_prog")
int new_connect_v4_prog(struct bpf_sock_addr *ctx)
{
- // return value thats in invalid range
+ // return value that's in invalid range
return 255;
}
diff --git a/tools/testing/selftests/bpf/progs/iters_state_safety.c b/tools/testing/selftests/bpf/progs/iters_state_safety.c
index f41257eadbb2..b381ac0c736c 100644
--- a/tools/testing/selftests/bpf/progs/iters_state_safety.c
+++ b/tools/testing/selftests/bpf/progs/iters_state_safety.c
@@ -345,7 +345,7 @@ int __naked read_from_iter_slot_fail(void)
"r3 = 1000;"
"call %[bpf_iter_num_new];"
- /* attemp to leak bpf_iter_num state */
+ /* attempt to leak bpf_iter_num state */
"r7 = *(u64 *)(r6 + 0);"
"r8 = *(u64 *)(r6 + 8);"
diff --git a/tools/testing/selftests/bpf/progs/rbtree_search.c b/tools/testing/selftests/bpf/progs/rbtree_search.c
index 098ef970fac1..b05565d1db0d 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_search.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_search.c
@@ -183,7 +183,7 @@ long test_##op##_spinlock_##dolock(void *ctx) \
}
/*
- * Use a spearate MSG macro instead of passing to TEST_XXX(..., MSG)
+ * Use a separate MSG macro instead of passing to TEST_XXX(..., MSG)
* to ensure the message itself is not in the bpf prog lineinfo
* which the verifier includes in its log.
* Otherwise, the test_loader will incorrectly match the prog lineinfo
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
index 36386b3c23a1..2b98b7710816 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
@@ -9,7 +9,7 @@ void bpf_task_release(struct task_struct *p) __ksym;
/* This test struct_ops BPF programs returning referenced kptr. The verifier should
* allow a referenced kptr or a NULL pointer to be returned. A referenced kptr to task
- * here is acquried automatically as the task argument is tagged with "__ref".
+ * here is acquired automatically as the task argument is tagged with "__ref".
*/
SEC("struct_ops/test_return_ref_kptr")
struct task_struct *BPF_PROG(kptr_return, int dummy,
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
index 76dcb6089d7f..9c0a65466356 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
@@ -9,7 +9,7 @@ __attribute__((nomerge)) extern void bpf_task_release(struct task_struct *p) __k
/* This is a test BPF program that uses struct_ops to access a referenced
* kptr argument. This is a test for the verifier to ensure that it
- * 1) recongnizes the task as a referenced object (i.e., ref_obj_id > 0), and
+ * 1) recognizes the task as a referenced object (i.e., ref_obj_id > 0), and
* 2) the same reference can be acquired from multiple paths as long as it
* has not been released.
*/
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index f344c6835e84..823169fb6e4c 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -129,7 +129,7 @@ typedef uint8_t *net_ptr __attribute__((align_value(8)));
typedef struct buf {
struct __sk_buff *skb;
net_ptr head;
- /* NB: tail musn't have alignment other than 1, otherwise
+ /* NB: tail mustn't have alignment other than 1, otherwise
* LLVM will go and eliminate code, e.g. when checking packet lengths.
*/
uint8_t *const tail;
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
index d0f7670351e5..dfd4a2710391 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
@@ -494,7 +494,7 @@ static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_header
*offset += sizeof(*next_hop);
- /* Skip the remainig next hops (may be zero). */
+ /* Skip the remaining next hops (may be zero). */
return skip_next_hops(offset, encap->unigue.hop_count - encap->unigue.next_hop - 1);
}
diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index 540181c115a8..ef00d38b0a8d 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
@@ -23,7 +23,6 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(max_entries, 2);
__type(key, int);
__type(value, __u32);
} perf_event_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/uretprobe_stack.c b/tools/testing/selftests/bpf/progs/uretprobe_stack.c
index 9fdcf396b8f4..a2951e2f1711 100644
--- a/tools/testing/selftests/bpf/progs/uretprobe_stack.c
+++ b/tools/testing/selftests/bpf/progs/uretprobe_stack.c
@@ -26,8 +26,8 @@ int usdt_len;
SEC("uprobe//proc/self/exe:target_1")
int BPF_UPROBE(uprobe_1)
{
- /* target_1 is recursive wit depth of 2, so we capture two separate
- * stack traces, depending on which occurence it is
+ /* target_1 is recursive with depth of 2, so we capture two separate
+ * stack traces, depending on which occurrence it is
*/
static bool recur = false;
diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
index 7c5e5e6d10eb..dba3ca728f6e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
+++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
@@ -349,7 +349,7 @@ __naked void precision_two_ids(void)
SEC("socket")
__success __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)
-/* check thar r0 and r6 have different IDs after 'if',
+/* check that r0 and r6 have different IDs after 'if',
* collect_linked_regs() can't tie more than 6 registers for a single insn.
*/
__msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1")
diff --git a/tools/testing/selftests/bpf/progs/verifier_var_off.c b/tools/testing/selftests/bpf/progs/verifier_var_off.c
index 1d36d01b746e..f345466bca68 100644
--- a/tools/testing/selftests/bpf/progs/verifier_var_off.c
+++ b/tools/testing/selftests/bpf/progs/verifier_var_off.c
@@ -114,8 +114,8 @@ __naked void stack_write_priv_vs_unpriv(void)
}
/* Similar to the previous test, but this time also perform a read from the
- * address written to with a variable offset. The read is allowed, showing that,
- * after a variable-offset write, a priviledged program can read the slots that
+ * address written to with a variable offet. The read is allowed, showing that,
+ * after a variable-offset write, a privileged program can read the slots that
* were in the range of that write (even if the verifier doesn't actually know if
* the slot being read was really written to or not.
*
@@ -157,7 +157,7 @@ __naked void stack_write_followed_by_read(void)
SEC("socket")
__description("variable-offset stack write clobbers spilled regs")
__failure
-/* In the priviledged case, dereferencing a spilled-and-then-filled
+/* In the privileged case, dereferencing a spilled-and-then-filled
* register is rejected because the previous variable offset stack
* write might have overwritten the spilled pointer (i.e. we lose track
* of the spilled register when we analyze the write).
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index fd2da2234cc9..76568db7a664 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -1372,7 +1372,7 @@ static int run_options(struct sockmap_options *options, int cg_fd, int test)
} else
fprintf(stderr, "unknown test\n");
out:
- /* Detatch and zero all the maps */
+ /* Detach and zero all the maps */
bpf_prog_detach2(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS);
for (i = 0; i < ARRAY_SIZE(links); i++) {
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index 595194453ff8..35b4893ccdf8 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -15,20 +15,18 @@
#include <bpf/libbpf.h>
#include <sys/ioctl.h>
#include <linux/rtnetlink.h>
-#include <signal.h>
#include <linux/perf_event.h>
-#include <linux/err.h>
-#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "test_tcpnotify.h"
-#include "trace_helpers.h"
#include "testing_helpers.h"
#define SOCKET_BUFFER_SIZE (getpagesize() < 8192L ? getpagesize() : 8192L)
pthread_t tid;
+static bool exit_thread;
+
int rx_callbacks;
static void dummyfn(void *ctx, int cpu, void *data, __u32 size)
@@ -45,7 +43,7 @@ void tcp_notifier_poller(struct perf_buffer *pb)
{
int err;
- while (1) {
+ while (!exit_thread) {
err = perf_buffer__poll(pb, 100);
if (err < 0 && err != -EINTR) {
printf("failed perf_buffer__poll: %d\n", err);
@@ -78,15 +76,10 @@ int main(int argc, char **argv)
int error = EXIT_FAILURE;
struct bpf_object *obj;
char test_script[80];
- cpu_set_t cpuset;
__u32 key = 0;
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
- CPU_ZERO(&cpuset);
- CPU_SET(0, &cpuset);
- pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
-
cg_fd = cgroup_setup_and_join(cg_path);
if (cg_fd < 0)
goto err;
@@ -151,6 +144,13 @@ int main(int argc, char **argv)
sleep(10);
+ exit_thread = true;
+ int ret = pthread_join(tid, NULL);
+ if (ret) {
+ printf("FAILED: pthread_join\n");
+ goto err;
+ }
+
if (verify_result(&g)) {
printf("FAILED: Wrong stats Expected %d calls, got %d\n",
g.ncalls, rx_callbacks);
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 81943c6254e6..03f223333aa4 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -17,6 +17,7 @@
#include <linux/limits.h>
#include <libelf.h>
#include <gelf.h>
+#include "bpf/hashmap.h"
#include "bpf/libbpf_internal.h"
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
@@ -519,3 +520,216 @@ void read_trace_pipe(void)
{
read_trace_pipe_iter(trace_pipe_cb, NULL, 0);
}
+
+static size_t symbol_hash(long key, void *ctx __maybe_unused)
+{
+ return str_hash((const char *) key);
+}
+
+static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
+{
+ return strcmp((const char *) key1, (const char *) key2) == 0;
+}
+
+static bool is_invalid_entry(char *buf, bool kernel)
+{
+ if (kernel && strchr(buf, '['))
+ return true;
+ if (!kernel && !strchr(buf, '['))
+ return true;
+ return false;
+}
+
+static bool skip_entry(char *name)
+{
+ /*
+ * We attach to almost all kernel functions and some of them
+ * will cause 'suspicious RCU usage' when fprobe is attached
+ * to them. Filter out the current culprits - arch_cpu_idle
+ * default_idle and rcu_* functions.
+ */
+ if (!strcmp(name, "arch_cpu_idle"))
+ return true;
+ if (!strcmp(name, "default_idle"))
+ return true;
+ if (!strncmp(name, "rcu_", 4))
+ return true;
+ if (!strcmp(name, "bpf_dispatcher_xdp_func"))
+ return true;
+ if (!strncmp(name, "__ftrace_invalid_address__",
+ sizeof("__ftrace_invalid_address__") - 1))
+ return true;
+ return false;
+}
+
+/* Do comparison by ignoring '.llvm.<hash>' suffixes. */
+static int compare_name(const char *name1, const char *name2)
+{
+ const char *res1, *res2;
+ int len1, len2;
+
+ res1 = strstr(name1, ".llvm.");
+ res2 = strstr(name2, ".llvm.");
+ len1 = res1 ? res1 - name1 : strlen(name1);
+ len2 = res2 ? res2 - name2 : strlen(name2);
+
+ if (len1 == len2)
+ return strncmp(name1, name2, len1);
+ if (len1 < len2)
+ return strncmp(name1, name2, len1) <= 0 ? -1 : 1;
+ return strncmp(name1, name2, len2) >= 0 ? 1 : -1;
+}
+
+static int load_kallsyms_compare(const void *p1, const void *p2)
+{
+ return compare_name(((const struct ksym *)p1)->name, ((const struct ksym *)p2)->name);
+}
+
+static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
+{
+ return compare_name(p1, p2->name);
+}
+
+int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel)
+{
+ size_t cap = 0, cnt = 0;
+ char *name = NULL, *ksym_name, **syms = NULL;
+ struct hashmap *map;
+ struct ksyms *ksyms;
+ struct ksym *ks;
+ char buf[256];
+ FILE *f;
+ int err = 0;
+
+ ksyms = load_kallsyms_custom_local(load_kallsyms_compare);
+ if (!ksyms)
+ return -EINVAL;
+
+ /*
+ * The available_filter_functions contains many duplicates,
+ * but other than that all symbols are usable to trace.
+ * Filtering out duplicates by using hashmap__add, which won't
+ * add existing entry.
+ */
+
+ if (access("/sys/kernel/tracing/trace", F_OK) == 0)
+ f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
+ else
+ f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
+
+ if (!f)
+ return -EINVAL;
+
+ map = hashmap__new(symbol_hash, symbol_equal, NULL);
+ if (IS_ERR(map)) {
+ err = libbpf_get_error(map);
+ goto error;
+ }
+
+ while (fgets(buf, sizeof(buf), f)) {
+ if (is_invalid_entry(buf, kernel))
+ continue;
+
+ free(name);
+ if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
+ continue;
+ if (skip_entry(name))
+ continue;
+
+ ks = search_kallsyms_custom_local(ksyms, name, search_kallsyms_compare);
+ if (!ks) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ ksym_name = ks->name;
+ err = hashmap__add(map, ksym_name, 0);
+ if (err == -EEXIST) {
+ err = 0;
+ continue;
+ }
+ if (err)
+ goto error;
+
+ err = libbpf_ensure_mem((void **) &syms, &cap,
+ sizeof(*syms), cnt + 1);
+ if (err)
+ goto error;
+
+ syms[cnt++] = ksym_name;
+ }
+
+ *symsp = syms;
+ *cntp = cnt;
+
+error:
+ free(name);
+ fclose(f);
+ hashmap__free(map);
+ if (err)
+ free(syms);
+ return err;
+}
+
+int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel)
+{
+ unsigned long *addr, *addrs, *tmp_addrs;
+ int err = 0, max_cnt, inc_cnt;
+ char *name = NULL;
+ size_t cnt = 0;
+ char buf[256];
+ FILE *f;
+
+ if (access("/sys/kernel/tracing/trace", F_OK) == 0)
+ f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r");
+ else
+ f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
+
+ if (!f)
+ return -ENOENT;
+
+ /* In my local setup, the number of entries is 50k+ so Let us initially
+ * allocate space to hold 64k entries. If 64k is not enough, incrementally
+ * increase 1k each time.
+ */
+ max_cnt = 65536;
+ inc_cnt = 1024;
+ addrs = malloc(max_cnt * sizeof(long));
+ if (addrs == NULL) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ while (fgets(buf, sizeof(buf), f)) {
+ if (is_invalid_entry(buf, kernel))
+ continue;
+
+ free(name);
+ if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2)
+ continue;
+ if (skip_entry(name))
+ continue;
+
+ if (cnt == max_cnt) {
+ max_cnt += inc_cnt;
+ tmp_addrs = realloc(addrs, max_cnt * sizeof(long));
+ if (!tmp_addrs) {
+ err = -ENOMEM;
+ goto error;
+ }
+ addrs = tmp_addrs;
+ }
+
+ addrs[cnt++] = (unsigned long)addr;
+ }
+
+ *addrsp = addrs;
+ *cntp = cnt;
+
+error:
+ free(name);
+ fclose(f);
+ if (err)
+ free(addrs);
+ return err;
+}
diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h
index 2ce873c9f9aa..9437bdd4afa5 100644
--- a/tools/testing/selftests/bpf/trace_helpers.h
+++ b/tools/testing/selftests/bpf/trace_helpers.h
@@ -41,4 +41,7 @@ ssize_t get_rel_offset(uintptr_t addr);
int read_build_id(const char *path, char *build_id, size_t size);
+int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel);
+int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel);
+
#endif
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index f3492efc8834..c8d640802cce 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -1375,7 +1375,7 @@
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- /* fetch secound map_value_ptr from the stack */
+ /* fetch second map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* write into map value */
@@ -1439,7 +1439,7 @@
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch secound map_value_ptr from the stack */
+ /* fetch second map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
@@ -1493,7 +1493,7 @@
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- /* fetch secound map_value_ptr from the stack */
+ /* fetch second map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
@@ -2380,7 +2380,7 @@
*/
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_8),
- /* r9 = *r9 ; verifier get's to this point via two paths:
+ /* r9 = *r9 ; verifier gets to this point via two paths:
* ; (I) one including r9 = r8, verified first;
* ; (II) one excluding r9 = r8, verified next.
* ; After load of *r9 to r9 the frame[0].fp[-24].id == r9.id.
diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c
index 1503a1d2faa0..9ed8c796645d 100644
--- a/tools/testing/selftests/bpf/xdping.c
+++ b/tools/testing/selftests/bpf/xdping.c
@@ -155,7 +155,7 @@ int main(int argc, char **argv)
}
if (!server) {
- /* Only supports IPv4; see hints initiailization above. */
+ /* Only supports IPv4; see hints initialization above. */
if (getaddrinfo(argv[optind], NULL, &hints, &a) || !a) {
fprintf(stderr, "Could not resolve %s\n", argv[optind]);
return 1;
diff --git a/tools/testing/selftests/bpf/xsk.h b/tools/testing/selftests/bpf/xsk.h
index 93c2cc413cfc..48729da142c2 100644
--- a/tools/testing/selftests/bpf/xsk.h
+++ b/tools/testing/selftests/bpf/xsk.h
@@ -93,8 +93,8 @@ static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
/* Refresh the local tail pointer.
* cached_cons is r->size bigger than the real consumer pointer so
* that this addition can be avoided in the more frequently
- * executed code that computs free_entries in the beginning of
- * this function. Without this optimization it whould have been
+ * executed code that computes free_entries in the beginning of
+ * this function. Without this optimization it would have been
* free_entries = r->cached_prod - r->cached_cons + r->size.
*/
r->cached_cons = __atomic_load_n(r->consumer, __ATOMIC_ACQUIRE);
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index a29de0713f19..352adc8df2d1 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -2276,25 +2276,13 @@ static int testapp_xdp_metadata_copy(struct test_spec *test)
{
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
- struct bpf_map *data_map;
- int count = 0;
- int key = 0;
test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata,
skel_tx->progs.xsk_xdp_populate_metadata,
skel_rx->maps.xsk, skel_tx->maps.xsk);
test->ifobj_rx->use_metadata = true;
- data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
- if (!data_map || !bpf_map__is_internal(data_map)) {
- ksft_print_msg("Error: could not find bss section of XDP program\n");
- return TEST_FAILURE;
- }
-
- if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) {
- ksft_print_msg("Error: could not update count element\n");
- return TEST_FAILURE;
- }
+ skel_rx->bss->count = 0;
return testapp_validate_traffic(test);
}
diff --git a/tools/testing/selftests/cgroup/lib/cgroup_util.c b/tools/testing/selftests/cgroup/lib/cgroup_util.c
index 0e89fcff4d05..44c52f620fda 100644
--- a/tools/testing/selftests/cgroup/lib/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/lib/cgroup_util.c
@@ -522,6 +522,18 @@ int proc_mount_contains(const char *option)
return strstr(buf, option) != NULL;
}
+int cgroup_feature(const char *feature)
+{
+ char buf[PAGE_SIZE];
+ ssize_t read;
+
+ read = read_text("/sys/kernel/cgroup/features", buf, sizeof(buf));
+ if (read < 0)
+ return read;
+
+ return strstr(buf, feature) != NULL;
+}
+
ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size)
{
char path[PATH_MAX];
diff --git a/tools/testing/selftests/cgroup/lib/include/cgroup_util.h b/tools/testing/selftests/cgroup/lib/include/cgroup_util.h
index c69cab66254b..9dc90a1b386d 100644
--- a/tools/testing/selftests/cgroup/lib/include/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/lib/include/cgroup_util.h
@@ -60,6 +60,7 @@ extern int cg_run_nowait(const char *cgroup,
extern int cg_wait_for_proc_count(const char *cgroup, int count);
extern int cg_killall(const char *cgroup);
int proc_mount_contains(const char *option);
+int cgroup_feature(const char *feature);
extern ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size);
extern int proc_read_strstr(int pid, bool thread, const char *item, const char *needle);
extern pid_t clone_into_cgroup(int cgroup_fd);
diff --git a/tools/testing/selftests/cgroup/test_pids.c b/tools/testing/selftests/cgroup/test_pids.c
index 9ecb83c6cc5c..d8a1d1cd5007 100644
--- a/tools/testing/selftests/cgroup/test_pids.c
+++ b/tools/testing/selftests/cgroup/test_pids.c
@@ -77,6 +77,9 @@ static int test_pids_events(const char *root)
char *cg_parent = NULL, *cg_child = NULL;
int pid;
+ if (cgroup_feature("pids_localevents") <= 0)
+ return KSFT_SKIP;
+
cg_parent = cg_name(root, "pids_parent");
cg_child = cg_name(cg_parent, "pids_child");
if (!cg_parent || !cg_child)
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index 8cfb87f7f7c5..bd50aecfca8a 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
+PKG_CONFIG ?= pkg-config
+LIBNUMA_TEST = $(shell sh -c "$(PKG_CONFIG) numa --atleast-version 2.0.16 > /dev/null 2>&1 && echo SUFFICIENT || echo NO")
+
INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
-CFLAGS := $(CFLAGS) -g -O2 -Wall -pthread $(INCLUDES) $(KHDR_INCLUDES)
+CFLAGS := $(CFLAGS) -g -O2 -Wall -pthread -D_FILE_OFFSET_BITS=64 -D_TIME_BITS=64 $(INCLUDES) $(KHDR_INCLUDES) -DLIBNUMA_VER_$(LIBNUMA_TEST)=1
LDLIBS := -lpthread -lrt -lnuma
LOCAL_HDRS := \
diff --git a/tools/testing/selftests/futex/functional/futex_numa_mpol.c b/tools/testing/selftests/futex/functional/futex_numa_mpol.c
index a9ecfb2d3932..7f2b2e1ff9f8 100644
--- a/tools/testing/selftests/futex/functional/futex_numa_mpol.c
+++ b/tools/testing/selftests/futex/functional/futex_numa_mpol.c
@@ -77,7 +77,7 @@ static void join_max_threads(void)
}
}
-static void __test_futex(void *futex_ptr, int must_fail, unsigned int futex_flags)
+static void __test_futex(void *futex_ptr, int err_value, unsigned int futex_flags)
{
int to_wake, ret, i, need_exit = 0;
@@ -88,11 +88,17 @@ static void __test_futex(void *futex_ptr, int must_fail, unsigned int futex_flag
do {
ret = futex2_wake(futex_ptr, to_wake, futex_flags);
- if (must_fail) {
- if (ret < 0)
- break;
- ksft_exit_fail_msg("futex2_wake(%d, 0x%x) should fail, but didn't\n",
- to_wake, futex_flags);
+
+ if (err_value) {
+ if (ret >= 0)
+ ksft_exit_fail_msg("futex2_wake(%d, 0x%x) should fail, but didn't\n",
+ to_wake, futex_flags);
+
+ if (errno != err_value)
+ ksft_exit_fail_msg("futex2_wake(%d, 0x%x) expected error was %d, but returned %d (%s)\n",
+ to_wake, futex_flags, err_value, errno, strerror(errno));
+
+ break;
}
if (ret < 0) {
ksft_exit_fail_msg("Failed futex2_wake(%d, 0x%x): %m\n",
@@ -106,12 +112,12 @@ static void __test_futex(void *futex_ptr, int must_fail, unsigned int futex_flag
join_max_threads();
for (i = 0; i < MAX_THREADS; i++) {
- if (must_fail && thread_args[i].result != -1) {
+ if (err_value && thread_args[i].result != -1) {
ksft_print_msg("Thread %d should fail but succeeded (%d)\n",
i, thread_args[i].result);
need_exit = 1;
}
- if (!must_fail && thread_args[i].result != 0) {
+ if (!err_value && thread_args[i].result != 0) {
ksft_print_msg("Thread %d failed (%d)\n", i, thread_args[i].result);
need_exit = 1;
}
@@ -120,14 +126,9 @@ static void __test_futex(void *futex_ptr, int must_fail, unsigned int futex_flag
ksft_exit_fail_msg("Aborting due to earlier errors.\n");
}
-static void test_futex(void *futex_ptr, int must_fail)
+static void test_futex(void *futex_ptr, int err_value)
{
- __test_futex(futex_ptr, must_fail, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA);
-}
-
-static void test_futex_mpol(void *futex_ptr, int must_fail)
-{
- __test_futex(futex_ptr, must_fail, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA | FUTEX2_MPOL);
+ __test_futex(futex_ptr, err_value, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA);
}
static void usage(char *prog)
@@ -142,7 +143,7 @@ static void usage(char *prog)
int main(int argc, char *argv[])
{
struct futex32_numa *futex_numa;
- int mem_size, i;
+ int mem_size;
void *futex_ptr;
int c;
@@ -165,7 +166,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
- ksft_set_plan(1);
+ ksft_set_plan(2);
mem_size = sysconf(_SC_PAGE_SIZE);
futex_ptr = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
@@ -182,27 +183,28 @@ int main(int argc, char *argv[])
if (futex_numa->numa == FUTEX_NO_NODE)
ksft_exit_fail_msg("NUMA node is left uninitialized\n");
- ksft_print_msg("Memory too small\n");
- test_futex(futex_ptr + mem_size - 4, 1);
-
- ksft_print_msg("Memory out of range\n");
- test_futex(futex_ptr + mem_size, 1);
+ /* FUTEX2_NUMA futex must be 8-byte aligned */
+ ksft_print_msg("Mis-aligned futex\n");
+ test_futex(futex_ptr + mem_size - 4, EINVAL);
futex_numa->numa = FUTEX_NO_NODE;
mprotect(futex_ptr, mem_size, PROT_READ);
ksft_print_msg("Memory, RO\n");
- test_futex(futex_ptr, 1);
+ test_futex(futex_ptr, EFAULT);
mprotect(futex_ptr, mem_size, PROT_NONE);
ksft_print_msg("Memory, no access\n");
- test_futex(futex_ptr, 1);
+ test_futex(futex_ptr, EFAULT);
mprotect(futex_ptr, mem_size, PROT_READ | PROT_WRITE);
ksft_print_msg("Memory back to RW\n");
test_futex(futex_ptr, 0);
+ ksft_test_result_pass("futex2 memory boundarie tests passed\n");
+
/* MPOL test. Does not work as expected */
- for (i = 0; i < 4; i++) {
+#ifdef LIBNUMA_VER_SUFFICIENT
+ for (int i = 0; i < 4; i++) {
unsigned long nodemask;
int ret;
@@ -221,15 +223,16 @@ int main(int argc, char *argv[])
ret = futex2_wake(futex_ptr, 0, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA | FUTEX2_MPOL);
if (ret < 0)
ksft_test_result_fail("Failed to wake 0 with MPOL: %m\n");
- if (0)
- test_futex_mpol(futex_numa, 0);
if (futex_numa->numa != i) {
ksft_exit_fail_msg("Returned NUMA node is %d expected %d\n",
futex_numa->numa, i);
}
}
}
- ksft_test_result_pass("NUMA MPOL tests passed\n");
+ ksft_test_result_pass("futex2 MPOL hints test passed\n");
+#else
+ ksft_test_result_skip("futex2 MPOL hints test requires libnuma 2.0.16+\n");
+#endif
ksft_finished();
return 0;
}
diff --git a/tools/testing/selftests/futex/functional/futex_priv_hash.c b/tools/testing/selftests/futex/functional/futex_priv_hash.c
index aea001ac4946..ec032faca6a9 100644
--- a/tools/testing/selftests/futex/functional/futex_priv_hash.c
+++ b/tools/testing/selftests/futex/functional/futex_priv_hash.c
@@ -132,7 +132,6 @@ static void usage(char *prog)
{
printf("Usage: %s\n", prog);
printf(" -c Use color\n");
- printf(" -g Test global hash instead intead local immutable \n");
printf(" -h Display this help message\n");
printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
VQUIET, VCRITICAL, VINFO);
diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
index 81739849f299..5470088dc4df 100755
--- a/tools/testing/selftests/futex/functional/run.sh
+++ b/tools/testing/selftests/futex/functional/run.sh
@@ -85,7 +85,6 @@ echo
echo
./futex_priv_hash $COLOR
-./futex_priv_hash -g $COLOR
echo
./futex_numa_mpol $COLOR
diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h
index 7a5fd1d5355e..3d48e9789d9f 100644
--- a/tools/testing/selftests/futex/include/futextest.h
+++ b/tools/testing/selftests/futex/include/futextest.h
@@ -58,6 +58,17 @@ typedef volatile u_int32_t futex_t;
#define SYS_futex SYS_futex_time64
#endif
+/*
+ * On 32bit systems if we use "-D_FILE_OFFSET_BITS=64 -D_TIME_BITS=64" or if
+ * we are using a newer compiler then the size of the timestamps will be 64bit,
+ * however, the SYS_futex will still point to the 32bit futex system call.
+ */
+#if __SIZEOF_POINTER__ == 4 && defined(SYS_futex_time64) && \
+ defined(_TIME_BITS) && _TIME_BITS == 64
+# undef SYS_futex
+# define SYS_futex SYS_futex_time64
+#endif
+
/**
* futex() - SYS_futex syscall wrapper
* @uaddr: address of first futex
diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
index 3c3e08b8c90e..772ca1db6e59 100644
--- a/tools/testing/selftests/iommu/iommufd_utils.h
+++ b/tools/testing/selftests/iommu/iommufd_utils.h
@@ -1042,15 +1042,13 @@ static int _test_cmd_trigger_vevents(int fd, __u32 dev_id, __u32 nvevents)
.dev_id = dev_id,
},
};
- int ret;
while (nvevents--) {
- ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT),
- &trigger_vevent_cmd);
- if (ret < 0)
+ if (!ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT),
+ &trigger_vevent_cmd))
return -1;
}
- return ret;
+ return 0;
}
#define test_cmd_trigger_vevents(dev_id, nvevents) \
diff --git a/tools/testing/selftests/kselftest_harness/Makefile b/tools/testing/selftests/kselftest_harness/Makefile
index 0617535a6ce4..d2369c01701a 100644
--- a/tools/testing/selftests/kselftest_harness/Makefile
+++ b/tools/testing/selftests/kselftest_harness/Makefile
@@ -2,6 +2,7 @@
TEST_GEN_PROGS_EXTENDED := harness-selftest
TEST_PROGS := harness-selftest.sh
+TEST_FILES := harness-selftest.expected
EXTRA_CLEAN := harness-selftest.seen
include ../lib.mk
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 530390033929..a448fae57831 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -228,7 +228,10 @@ $(OUTPUT)/%:%.S
$(LINK.S) $^ $(LDLIBS) -o $@
endif
+# Extract the expected header directory
+khdr_output := $(patsubst %/usr/include,%,$(filter %/usr/include,$(KHDR_INCLUDES)))
+
headers:
- $(Q)$(MAKE) -C $(top_srcdir) headers
+ $(Q)$(MAKE) -f $(top_srcdir)/Makefile -C $(khdr_output) headers
.PHONY: run_tests all clean install emit_tests gen_mods_dir clean_mods_dir headers
diff --git a/tools/testing/selftests/mm/madv_populate.c b/tools/testing/selftests/mm/madv_populate.c
index b6fabd5c27ed..d8d11bc67ddc 100644
--- a/tools/testing/selftests/mm/madv_populate.c
+++ b/tools/testing/selftests/mm/madv_populate.c
@@ -264,23 +264,6 @@ static void test_softdirty(void)
munmap(addr, SIZE);
}
-static int system_has_softdirty(void)
-{
- /*
- * There is no way to check if the kernel supports soft-dirty, other
- * than by writing to a page and seeing if the bit was set. But the
- * tests are intended to check that the bit gets set when it should, so
- * doing that check would turn a potentially legitimate fail into a
- * skip. Fortunately, we know for sure that arm64 does not support
- * soft-dirty. So for now, let's just use the arch as a corse guide.
- */
-#if defined(__aarch64__)
- return 0;
-#else
- return 1;
-#endif
-}
-
int main(int argc, char **argv)
{
int nr_tests = 16;
@@ -288,7 +271,7 @@ int main(int argc, char **argv)
pagesize = getpagesize();
- if (system_has_softdirty())
+ if (softdirty_supported())
nr_tests += 5;
ksft_print_header();
@@ -300,7 +283,7 @@ int main(int argc, char **argv)
test_holes();
test_populate_read();
test_populate_write();
- if (system_has_softdirty())
+ if (softdirty_supported())
test_softdirty();
err = ksft_get_fail_cnt();
diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
index 8a3f2b4b2186..4ee4db3750c1 100644
--- a/tools/testing/selftests/mm/soft-dirty.c
+++ b/tools/testing/selftests/mm/soft-dirty.c
@@ -200,8 +200,11 @@ int main(int argc, char **argv)
int pagesize;
ksft_print_header();
- ksft_set_plan(15);
+ if (!softdirty_supported())
+ ksft_exit_skip("soft-dirty is not support\n");
+
+ ksft_set_plan(15);
pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY);
if (pagemap_fd < 0)
ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH);
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.c b/tools/testing/selftests/mm/va_high_addr_switch.c
index 896b3f73fc53..306eba825107 100644
--- a/tools/testing/selftests/mm/va_high_addr_switch.c
+++ b/tools/testing/selftests/mm/va_high_addr_switch.c
@@ -230,10 +230,10 @@ void testcases_init(void)
.msg = "mmap(-1, MAP_HUGETLB) again",
},
{
- .addr = (void *)(addr_switch_hint - pagesize),
+ .addr = (void *)(addr_switch_hint - hugepagesize),
.size = 2 * hugepagesize,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(addr_switch_hint - pagesize, 2*hugepagesize, MAP_HUGETLB)",
+ .msg = "mmap(addr_switch_hint - hugepagesize, 2*hugepagesize, MAP_HUGETLB)",
.low_addr_required = 1,
.keep_mapped = 1,
},
diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
index 9dafa7669ef9..79ec33efcd57 100644
--- a/tools/testing/selftests/mm/vm_util.c
+++ b/tools/testing/selftests/mm/vm_util.c
@@ -426,6 +426,23 @@ bool check_vmflag_io(void *addr)
}
}
+bool softdirty_supported(void)
+{
+ char *addr;
+ bool supported = false;
+ const size_t pagesize = getpagesize();
+
+ /* New mappings are expected to be marked with VM_SOFTDIRTY (sd). */
+ addr = mmap(0, pagesize, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ if (!addr)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ supported = check_vmflag(addr, "sd");
+ munmap(addr, pagesize);
+ return supported;
+}
+
/*
* Open an fd at /proc/$pid/maps and configure procmap_out ready for
* PROCMAP_QUERY query. Returns 0 on success, or an error code otherwise.
diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
index b55d1809debc..f6fad71c4f2e 100644
--- a/tools/testing/selftests/mm/vm_util.h
+++ b/tools/testing/selftests/mm/vm_util.h
@@ -99,6 +99,7 @@ bool find_vma_procmap(struct procmap_fd *procmap, void *address);
int close_procmap(struct procmap_fd *procmap);
int write_sysfs(const char *file_path, unsigned long val);
int read_sysfs(const char *file_path, unsigned long *val);
+bool softdirty_supported(void);
static inline int open_self_procmap(struct procmap_fd *procmap_out)
{
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
index a297ee0d6d07..d074878eb234 100644
--- a/tools/testing/selftests/nolibc/nolibc-test.c
+++ b/tools/testing/selftests/nolibc/nolibc-test.c
@@ -196,8 +196,8 @@ int expect_zr(int expr, int llen)
}
-#define EXPECT_NZ(cond, expr, val) \
- do { if (!(cond)) result(llen, SKIPPED); else ret += expect_nz(expr, llen; } while (0)
+#define EXPECT_NZ(cond, expr) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_nz(expr, llen); } while (0)
static __attribute__((unused))
int expect_nz(int expr, int llen)
@@ -1334,6 +1334,7 @@ int run_syscall(int min, int max)
CASE_TEST(chroot_root); EXPECT_SYSZR(euid0, chroot("/")); break;
CASE_TEST(chroot_blah); EXPECT_SYSER(1, chroot("/proc/self/blah"), -1, ENOENT); break;
CASE_TEST(chroot_exe); EXPECT_SYSER(1, chroot(argv0), -1, ENOTDIR); break;
+ CASE_TEST(clock_nanosleep); ts.tv_nsec = -1; EXPECT_EQ(1, EINVAL, clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL)); break;
CASE_TEST(close_m1); EXPECT_SYSER(1, close(-1), -1, EBADF); break;
CASE_TEST(close_dup); EXPECT_SYSZR(1, close(dup(0))); break;
CASE_TEST(dup_0); tmp = dup(0); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
diff --git a/tools/testing/selftests/vDSO/vdso_call.h b/tools/testing/selftests/vDSO/vdso_call.h
index bb237d771051..e7205584cbdc 100644
--- a/tools/testing/selftests/vDSO/vdso_call.h
+++ b/tools/testing/selftests/vDSO/vdso_call.h
@@ -44,7 +44,6 @@
register long _r6 asm ("r6"); \
register long _r7 asm ("r7"); \
register long _r8 asm ("r8"); \
- register long _rval asm ("r3"); \
\
LOADARGS_##nr(fn, args); \
\
@@ -54,13 +53,13 @@
" bns+ 1f\n" \
" neg 3, 3\n" \
"1:" \
- : "+r" (_r0), "=r" (_r3), "+r" (_r4), "+r" (_r5), \
+ : "+r" (_r0), "+r" (_r3), "+r" (_r4), "+r" (_r5), \
"+r" (_r6), "+r" (_r7), "+r" (_r8) \
- : "r" (_rval) \
+ : \
: "r9", "r10", "r11", "r12", "cr0", "cr1", "cr5", \
"cr6", "cr7", "xer", "lr", "ctr", "memory" \
); \
- _rval; \
+ _r3; \
})
#else
diff --git a/tools/testing/selftests/vDSO/vdso_test_abi.c b/tools/testing/selftests/vDSO/vdso_test_abi.c
index a54424e2336f..67cbfc56e4e1 100644
--- a/tools/testing/selftests/vDSO/vdso_test_abi.c
+++ b/tools/testing/selftests/vDSO/vdso_test_abi.c
@@ -182,12 +182,11 @@ int main(int argc, char **argv)
unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
ksft_print_header();
- ksft_set_plan(VDSO_TEST_PLAN);
- if (!sysinfo_ehdr) {
- ksft_print_msg("AT_SYSINFO_EHDR is not present!\n");
- return KSFT_SKIP;
- }
+ if (!sysinfo_ehdr)
+ ksft_exit_skip("AT_SYSINFO_EHDR is not present!\n");
+
+ ksft_set_plan(VDSO_TEST_PLAN);
version = versions[VDSO_VERSION];
name = (const char **)&names[VDSO_NAMES];
diff --git a/tools/testing/selftests/watchdog/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c
index a1f506ba5578..4f09c5db0c7f 100644
--- a/tools/testing/selftests/watchdog/watchdog-test.c
+++ b/tools/testing/selftests/watchdog/watchdog-test.c
@@ -332,6 +332,12 @@ int main(int argc, char *argv[])
if (oneshot)
goto end;
+ /* Check if WDIOF_KEEPALIVEPING is supported */
+ if (!(info.options & WDIOF_KEEPALIVEPING)) {
+ printf("WDIOC_KEEPALIVE not supported by this device\n");
+ goto end;
+ }
+
printf("Watchdog Ticking Away!\n");
/*
Powered by blists - more mailing lists