mirror of
https://github.com/armbian/build.git
synced 2025-08-09 12:46:58 +02:00
14034 lines
466 KiB
Diff
14034 lines
466 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index a3b7a26021003..87597736db035 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 5
|
|
PATCHLEVEL = 11
|
|
-SUBLEVEL = 19
|
|
+SUBLEVEL = 20
|
|
EXTRAVERSION =
|
|
NAME = 💕 Valentine's Day Edition 💕
|
|
|
|
@@ -774,16 +774,16 @@ KBUILD_CFLAGS += -Wno-gnu
|
|
KBUILD_CFLAGS += -mno-global-merge
|
|
else
|
|
|
|
-# These warnings generated too much noise in a regular build.
|
|
-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
|
|
-KBUILD_CFLAGS += -Wno-unused-but-set-variable
|
|
-
|
|
# Warn about unmarked fall-throughs in switch statement.
|
|
# Disabled for clang while comment to attribute conversion happens and
|
|
# https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
|
|
KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
|
|
endif
|
|
|
|
+# These warnings generated too much noise in a regular build.
|
|
+# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
|
|
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
|
|
+
|
|
KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
|
|
ifdef CONFIG_FRAME_POINTER
|
|
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
|
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
|
|
index fb521efcc6c20..54307db7854d5 100644
|
|
--- a/arch/arm/boot/compressed/Makefile
|
|
+++ b/arch/arm/boot/compressed/Makefile
|
|
@@ -115,8 +115,8 @@ asflags-y := -DZIMAGE
|
|
|
|
# Supply kernel BSS size to the decompressor via a linker symbol.
|
|
KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
|
|
- sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
|
|
- -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
|
|
+ sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
|
|
+ -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
|
|
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
|
|
# Supply ZRELADDR to the decompressor via a linker symbol.
|
|
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
|
|
diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
|
|
index 775ceb3acb6c0..edca66c232c15 100644
|
|
--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
|
|
+++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
|
|
@@ -8,6 +8,7 @@
|
|
*/
|
|
/dts-v1/;
|
|
#include "sam9x60.dtsi"
|
|
+#include <dt-bindings/input/input.h>
|
|
|
|
/ {
|
|
model = "Microchip SAM9X60-EK";
|
|
@@ -84,7 +85,7 @@
|
|
sw1 {
|
|
label = "SW1";
|
|
gpios = <&pioD 18 GPIO_ACTIVE_LOW>;
|
|
- linux,code=<0x104>;
|
|
+ linux,code=<KEY_PROG1>;
|
|
wakeup-source;
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
|
|
index 0e159f879c15e..d3cd2443ba252 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
|
|
@@ -11,6 +11,7 @@
|
|
#include "at91-sama5d27_som1.dtsi"
|
|
#include <dt-bindings/mfd/atmel-flexcom.h>
|
|
#include <dt-bindings/gpio/gpio.h>
|
|
+#include <dt-bindings/input/input.h>
|
|
|
|
/ {
|
|
model = "Atmel SAMA5D27 SOM1 EK";
|
|
@@ -467,7 +468,7 @@
|
|
pb4 {
|
|
label = "USER";
|
|
gpios = <&pioA PIN_PA29 GPIO_ACTIVE_LOW>;
|
|
- linux,code = <0x104>;
|
|
+ linux,code = <KEY_PROG1>;
|
|
wakeup-source;
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
|
|
index 6b38fa3f5568f..4883b84b4eded 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
|
|
@@ -8,6 +8,7 @@
|
|
*/
|
|
/dts-v1/;
|
|
#include "at91-sama5d27_wlsom1.dtsi"
|
|
+#include <dt-bindings/input/input.h>
|
|
|
|
/ {
|
|
model = "Microchip SAMA5D27 WLSOM1 EK";
|
|
@@ -35,7 +36,7 @@
|
|
sw4 {
|
|
label = "USER BUTTON";
|
|
gpios = <&pioA PIN_PB2 GPIO_ACTIVE_LOW>;
|
|
- linux,code = <0x104>;
|
|
+ linux,code = <KEY_PROG1>;
|
|
wakeup-source;
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
|
|
index 6783cf16ff818..19bb50f50c1fc 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
|
|
@@ -12,6 +12,7 @@
|
|
#include "sama5d2.dtsi"
|
|
#include "sama5d2-pinfunc.h"
|
|
#include <dt-bindings/gpio/gpio.h>
|
|
+#include <dt-bindings/input/input.h>
|
|
#include <dt-bindings/mfd/atmel-flexcom.h>
|
|
|
|
/ {
|
|
@@ -51,7 +52,7 @@
|
|
sw4 {
|
|
label = "USER_PB1";
|
|
gpios = <&pioA PIN_PD0 GPIO_ACTIVE_LOW>;
|
|
- linux,code = <0x104>;
|
|
+ linux,code = <KEY_PROG1>;
|
|
wakeup-source;
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
|
|
index c894c7c788a93..1c6361ba1aca4 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
|
|
@@ -11,6 +11,7 @@
|
|
#include "sama5d2-pinfunc.h"
|
|
#include <dt-bindings/mfd/atmel-flexcom.h>
|
|
#include <dt-bindings/gpio/gpio.h>
|
|
+#include <dt-bindings/input/input.h>
|
|
#include <dt-bindings/pinctrl/at91.h>
|
|
|
|
/ {
|
|
@@ -403,7 +404,7 @@
|
|
bp1 {
|
|
label = "PB_USER";
|
|
gpios = <&pioA PIN_PA10 GPIO_ACTIVE_LOW>;
|
|
- linux,code = <0x104>;
|
|
+ linux,code = <KEY_PROG1>;
|
|
wakeup-source;
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
|
|
index 058fae1b4a76e..d767968ae2175 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
|
|
@@ -10,6 +10,7 @@
|
|
#include "sama5d2-pinfunc.h"
|
|
#include <dt-bindings/mfd/atmel-flexcom.h>
|
|
#include <dt-bindings/gpio/gpio.h>
|
|
+#include <dt-bindings/input/input.h>
|
|
#include <dt-bindings/regulator/active-semi,8945a-regulator.h>
|
|
|
|
/ {
|
|
@@ -713,7 +714,7 @@
|
|
bp1 {
|
|
label = "PB_USER";
|
|
gpios = <&pioA PIN_PB9 GPIO_ACTIVE_LOW>;
|
|
- linux,code = <0x104>;
|
|
+ linux,code = <KEY_PROG1>;
|
|
wakeup-source;
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
|
|
index 5179258f92470..9c55a921263bd 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
|
|
@@ -7,6 +7,7 @@
|
|
*/
|
|
/dts-v1/;
|
|
#include "sama5d36.dtsi"
|
|
+#include <dt-bindings/input/input.h>
|
|
|
|
/ {
|
|
model = "SAMA5D3 Xplained";
|
|
@@ -354,7 +355,7 @@
|
|
bp3 {
|
|
label = "PB_USER";
|
|
gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
|
|
- linux,code = <0x104>;
|
|
+ linux,code = <KEY_PROG1>;
|
|
wakeup-source;
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
|
|
index d3446e42b5983..ce96345d28a39 100644
|
|
--- a/arch/arm/boot/dts/at91sam9260ek.dts
|
|
+++ b/arch/arm/boot/dts/at91sam9260ek.dts
|
|
@@ -7,6 +7,7 @@
|
|
*/
|
|
/dts-v1/;
|
|
#include "at91sam9260.dtsi"
|
|
+#include <dt-bindings/input/input.h>
|
|
|
|
/ {
|
|
model = "Atmel at91sam9260ek";
|
|
@@ -156,7 +157,7 @@
|
|
btn4 {
|
|
label = "Button 4";
|
|
gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
|
|
- linux,code = <0x104>;
|
|
+ linux,code = <KEY_PROG1>;
|
|
wakeup-source;
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
|
|
index 6e6e672c0b86d..87bb39060e8be 100644
|
|
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
|
|
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
|
|
@@ -5,6 +5,7 @@
|
|
* Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
|
|
*/
|
|
#include "at91sam9g20.dtsi"
|
|
+#include <dt-bindings/input/input.h>
|
|
|
|
/ {
|
|
|
|
@@ -234,7 +235,7 @@
|
|
btn4 {
|
|
label = "Button 4";
|
|
gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
|
|
- linux,code = <0x104>;
|
|
+ linux,code = <KEY_PROG1>;
|
|
wakeup-source;
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
|
|
index 6a96655d86260..8ed403767540e 100644
|
|
--- a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
|
|
+++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
|
|
index 3b0029e61b4c6..667b118ba4ee1 100644
|
|
--- a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
|
|
+++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
|
|
index 90f57bad6b243..ff31ce45831a7 100644
|
|
--- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
|
|
+++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x18000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x18000000>;
|
|
};
|
|
|
|
spi {
|
|
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
|
|
index fed75e6ab58ca..61c7b137607e5 100644
|
|
--- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
|
|
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
|
|
@@ -22,8 +22,8 @@
|
|
|
|
memory {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
|
|
index 79542e18915c5..4c60eda296d97 100644
|
|
--- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
|
|
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
|
|
index 51c64f0b25603..9ca6d1b2590d4 100644
|
|
--- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
|
|
+++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
|
|
index c29950b43a953..0e273c598732f 100644
|
|
--- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
|
|
+++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
|
|
index 2f2d2b0a6893c..d857751ec5076 100644
|
|
--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
|
|
+++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
spi {
|
|
diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
|
|
index 0e349e39f6081..8b1a05a0f1a11 100644
|
|
--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
|
|
+++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
spi {
|
|
diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
|
|
index 8f1e565c3db45..6c6bb7b17d27a 100644
|
|
--- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
|
|
+++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
|
|
index ce888b1835d1f..d29e7f80ea6aa 100644
|
|
--- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
|
|
+++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x18000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x18000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
|
|
index ed8619b54d692..38fbefdf2e4e4 100644
|
|
--- a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
|
|
+++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
|
|
@@ -18,8 +18,8 @@
|
|
|
|
memory {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
gpio-keys {
|
|
diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
|
|
index 1f87993eae1d1..7989a53597d4f 100644
|
|
--- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
|
|
+++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
|
|
index 6c6199a53d091..87b655be674c5 100644
|
|
--- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
|
|
+++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
|
|
@@ -32,8 +32,8 @@
|
|
|
|
memory {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
|
|
index 911c65fbf2510..e635a15041dd8 100644
|
|
--- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
|
|
+++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
|
|
@@ -21,8 +21,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
nand: nand@18028000 {
|
|
diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
|
|
index 3725f2b0d60bd..4b24b25389b5f 100644
|
|
--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
|
|
+++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
|
|
@@ -18,8 +18,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
gpio-keys {
|
|
diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
|
|
index 50f7cd08cfbbc..a6dc99955e191 100644
|
|
--- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
|
|
+++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
|
|
@@ -18,8 +18,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x18000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x18000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
|
|
index bcc420f85b566..ff98837bc0db0 100644
|
|
--- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
|
|
+++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
|
|
@@ -18,8 +18,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x18000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x18000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
|
|
index 4f8d777ae18de..452b8d0ab180e 100644
|
|
--- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
|
|
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
|
|
@@ -18,8 +18,8 @@
|
|
|
|
memory {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x18000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x18000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
|
|
index e17e9a17fb008..b76bfe6efcd4a 100644
|
|
--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
|
|
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
|
|
@@ -18,8 +18,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x08000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x08000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
|
|
index 60cc87ecc7ece..32d5a50578ec1 100644
|
|
--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
|
|
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
|
|
@@ -18,8 +18,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x18000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x18000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
|
|
index f42a1703f4ab1..42097a4c2659f 100644
|
|
--- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
|
|
+++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
|
|
@@ -18,8 +18,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x18000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x18000000>;
|
|
};
|
|
|
|
leds {
|
|
diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
|
|
index ac3a4483dcb3f..a2566ad4619c4 100644
|
|
--- a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
|
|
+++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
|
|
@@ -15,8 +15,8 @@
|
|
|
|
memory@0 {
|
|
device_type = "memory";
|
|
- reg = <0x00000000 0x08000000
|
|
- 0x88000000 0x18000000>;
|
|
+ reg = <0x00000000 0x08000000>,
|
|
+ <0x88000000 0x18000000>;
|
|
};
|
|
|
|
gpio-keys {
|
|
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
|
|
index cb3677f0a1cbb..b580397ede833 100644
|
|
--- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
|
|
+++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
|
|
@@ -8,37 +8,43 @@
|
|
/ {
|
|
soc {
|
|
i2c@80128000 {
|
|
- /* Marked:
|
|
- * 129
|
|
- * M35
|
|
- * L3GD20
|
|
- */
|
|
- l3gd20@6a {
|
|
- /* Gyroscope */
|
|
- compatible = "st,l3gd20";
|
|
- status = "disabled";
|
|
+ accelerometer@19 {
|
|
+ compatible = "st,lsm303dlhc-accel";
|
|
st,drdy-int-pin = <1>;
|
|
- drive-open-drain;
|
|
- reg = <0x6a>; // 0x6a or 0x6b
|
|
+ reg = <0x19>;
|
|
vdd-supply = <&ab8500_ldo_aux1_reg>;
|
|
vddio-supply = <&db8500_vsmps2_reg>;
|
|
+ interrupt-parent = <&gpio2>;
|
|
+ interrupts = <18 IRQ_TYPE_EDGE_RISING>,
|
|
+ <19 IRQ_TYPE_EDGE_RISING>;
|
|
+ pinctrl-names = "default";
|
|
+ pinctrl-0 = <&accel_tvk_mode>;
|
|
};
|
|
- /*
|
|
- * Marked:
|
|
- * 2122
|
|
- * C3H
|
|
- * DQEEE
|
|
- * LIS3DH?
|
|
- */
|
|
- lis3dh@18 {
|
|
- /* Accelerometer */
|
|
- compatible = "st,lis3dh-accel";
|
|
+ magnetometer@1e {
|
|
+ compatible = "st,lsm303dlm-magn";
|
|
st,drdy-int-pin = <1>;
|
|
- reg = <0x18>;
|
|
+ reg = <0x1e>;
|
|
vdd-supply = <&ab8500_ldo_aux1_reg>;
|
|
vddio-supply = <&db8500_vsmps2_reg>;
|
|
+ // This interrupt is not properly working with the driver
|
|
+ // interrupt-parent = <&gpio1>;
|
|
+ // interrupts = <0 IRQ_TYPE_EDGE_RISING>;
|
|
pinctrl-names = "default";
|
|
- pinctrl-0 = <&accel_tvk_mode>;
|
|
+ pinctrl-0 = <&magn_tvk_mode>;
|
|
+ };
|
|
+ gyroscope@68 {
|
|
+ /* Gyroscope */
|
|
+ compatible = "st,l3g4200d-gyro";
|
|
+ reg = <0x68>;
|
|
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
|
|
+ vddio-supply = <&db8500_vsmps2_reg>;
|
|
+ };
|
|
+ pressure@5c {
|
|
+ /* Barometer/pressure sensor */
|
|
+ compatible = "st,lps001wp-press";
|
|
+ reg = <0x5c>;
|
|
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
|
|
+ vddio-supply = <&db8500_vsmps2_reg>;
|
|
};
|
|
};
|
|
|
|
@@ -54,5 +60,26 @@
|
|
};
|
|
};
|
|
};
|
|
+
|
|
+ pinctrl {
|
|
+ accelerometer {
|
|
+ accel_tvk_mode: accel_tvk {
|
|
+ /* Accelerometer interrupt lines 1 & 2 */
|
|
+ tvk_cfg {
|
|
+ pins = "GPIO82_C1", "GPIO83_D3";
|
|
+ ste,config = <&gpio_in_pd>;
|
|
+ };
|
|
+ };
|
|
+ };
|
|
+ magnetometer {
|
|
+ magn_tvk_mode: magn_tvk {
|
|
+ /* GPIO 32 used for DRDY, pull this down */
|
|
+ tvk_cfg {
|
|
+ pins = "GPIO32_V2";
|
|
+ ste,config = <&gpio_in_pd>;
|
|
+ };
|
|
+ };
|
|
+ };
|
|
+ };
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
|
|
index d3b99535d755e..f9c0f6884cc1e 100644
|
|
--- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
|
|
+++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
|
|
@@ -448,7 +448,7 @@
|
|
|
|
reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>;
|
|
|
|
- avdd-supply = <&vdd_3v3_sys>;
|
|
+ vdda-supply = <&vdd_3v3_sys>;
|
|
vdd-supply = <&vdd_3v3_sys>;
|
|
};
|
|
|
|
diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S
|
|
index be18af52e7dc9..b697fa5d059a2 100644
|
|
--- a/arch/arm/crypto/curve25519-core.S
|
|
+++ b/arch/arm/crypto/curve25519-core.S
|
|
@@ -10,8 +10,8 @@
|
|
#include <linux/linkage.h>
|
|
|
|
.text
|
|
-.fpu neon
|
|
.arch armv7-a
|
|
+.fpu neon
|
|
.align 4
|
|
|
|
ENTRY(curve25519_neon)
|
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
|
|
index 6704ea2c72a35..cc29223ca188c 100644
|
|
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
|
|
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
|
|
@@ -22,6 +22,10 @@
|
|
ti,termination-current = <144000>; /* uA */
|
|
};
|
|
|
|
+&buck3_reg {
|
|
+ regulator-always-on;
|
|
+};
|
|
+
|
|
&proximity {
|
|
proximity-near-level = <25>;
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
index d5b6c0a1c54a5..a89e47d95eef2 100644
|
|
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
@@ -156,7 +156,8 @@
|
|
};
|
|
|
|
nb_periph_clk: nb-periph-clk@13000 {
|
|
- compatible = "marvell,armada-3700-periph-clock-nb";
|
|
+ compatible = "marvell,armada-3700-periph-clock-nb",
|
|
+ "syscon";
|
|
reg = <0x13000 0x100>;
|
|
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
|
|
<&tbg 3>, <&xtalclk>;
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
|
|
index 7fa870e4386a3..ecb37a7e68705 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
|
|
@@ -1235,7 +1235,7 @@
|
|
<&mmsys CLK_MM_DSI1_DIGITAL>,
|
|
<&mipi_tx1>;
|
|
clock-names = "engine", "digital", "hs";
|
|
- phy = <&mipi_tx1>;
|
|
+ phys = <&mipi_tx1>;
|
|
phy-names = "dphy";
|
|
status = "disabled";
|
|
};
|
|
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
|
|
index 61dbb4c838ef7..a5e61e09ea927 100644
|
|
--- a/arch/arm64/kernel/vdso/vdso.lds.S
|
|
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
|
|
@@ -31,6 +31,13 @@ SECTIONS
|
|
.gnu.version_d : { *(.gnu.version_d) }
|
|
.gnu.version_r : { *(.gnu.version_r) }
|
|
|
|
+ /*
|
|
+ * Discard .note.gnu.property sections which are unused and have
|
|
+ * different alignment requirement from vDSO note sections.
|
|
+ */
|
|
+ /DISCARD/ : {
|
|
+ *(.note.GNU-stack .note.gnu.property)
|
|
+ }
|
|
.note : { *(.note.*) } :text :note
|
|
|
|
. = ALIGN(16);
|
|
@@ -48,7 +55,6 @@ SECTIONS
|
|
PROVIDE(end = .);
|
|
|
|
/DISCARD/ : {
|
|
- *(.note.GNU-stack)
|
|
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
|
*(.bss .sbss .dynbss .dynsbss)
|
|
*(.eh_frame .eh_frame_hdr)
|
|
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
|
|
index d5821834dba96..3ed149ac9d250 100644
|
|
--- a/arch/powerpc/include/asm/mmu_context.h
|
|
+++ b/arch/powerpc/include/asm/mmu_context.h
|
|
@@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
|
|
static inline void arch_unmap(struct mm_struct *mm,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
- unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
|
|
+ unsigned long vdso_base = (unsigned long)mm->context.vdso;
|
|
|
|
if (start <= vdso_base && vdso_base < end)
|
|
mm->context.vdso = NULL;
|
|
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
|
|
index e40a921d78f96..8e3743486827d 100644
|
|
--- a/arch/powerpc/include/asm/reg.h
|
|
+++ b/arch/powerpc/include/asm/reg.h
|
|
@@ -441,6 +441,7 @@
|
|
#define LPCR_VRMA_LP1 ASM_CONST(0x0000800000000000)
|
|
#define LPCR_RMLS 0x1C000000 /* Implementation dependent RMO limit sel */
|
|
#define LPCR_RMLS_SH 26
|
|
+#define LPCR_HAIL ASM_CONST(0x0000000004000000) /* HV AIL (ISAv3.1) */
|
|
#define LPCR_ILE ASM_CONST(0x0000000002000000) /* !HV irqs set MSR:LE */
|
|
#define LPCR_AIL ASM_CONST(0x0000000001800000) /* Alternate interrupt location */
|
|
#define LPCR_AIL_0 ASM_CONST(0x0000000000000000) /* MMU off exception offset 0x0 */
|
|
diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
|
|
index cc79856896a19..4ba87de32be00 100644
|
|
--- a/arch/powerpc/include/uapi/asm/errno.h
|
|
+++ b/arch/powerpc/include/uapi/asm/errno.h
|
|
@@ -2,6 +2,7 @@
|
|
#ifndef _ASM_POWERPC_ERRNO_H
|
|
#define _ASM_POWERPC_ERRNO_H
|
|
|
|
+#undef EDEADLOCK
|
|
#include <asm-generic/errno.h>
|
|
|
|
#undef EDEADLOCK
|
|
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
|
|
index 813713c9120c0..20c417ad9c6de 100644
|
|
--- a/arch/powerpc/kernel/eeh.c
|
|
+++ b/arch/powerpc/kernel/eeh.c
|
|
@@ -362,14 +362,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
|
|
pa = pte_pfn(*ptep);
|
|
|
|
/* On radix we can do hugepage mappings for io, so handle that */
|
|
- if (hugepage_shift) {
|
|
- pa <<= hugepage_shift;
|
|
- pa |= token & ((1ul << hugepage_shift) - 1);
|
|
- } else {
|
|
- pa <<= PAGE_SHIFT;
|
|
- pa |= token & (PAGE_SIZE - 1);
|
|
- }
|
|
+ if (!hugepage_shift)
|
|
+ hugepage_shift = PAGE_SHIFT;
|
|
|
|
+ pa <<= PAGE_SHIFT;
|
|
+ pa |= token & ((1ul << hugepage_shift) - 1);
|
|
return pa;
|
|
}
|
|
|
|
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
|
|
index c28e949cc2229..3b871ecb3a921 100644
|
|
--- a/arch/powerpc/kernel/setup_64.c
|
|
+++ b/arch/powerpc/kernel/setup_64.c
|
|
@@ -231,10 +231,23 @@ static void cpu_ready_for_interrupts(void)
|
|
* If we are not in hypervisor mode the job is done once for
|
|
* the whole partition in configure_exceptions().
|
|
*/
|
|
- if (cpu_has_feature(CPU_FTR_HVMODE) &&
|
|
- cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
|
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
|
|
unsigned long lpcr = mfspr(SPRN_LPCR);
|
|
- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
|
|
+ unsigned long new_lpcr = lpcr;
|
|
+
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
|
|
+ /* P10 DD1 does not have HAIL */
|
|
+ if (pvr_version_is(PVR_POWER10) &&
|
|
+ (mfspr(SPRN_PVR) & 0xf00) == 0x100)
|
|
+ new_lpcr |= LPCR_AIL_3;
|
|
+ else
|
|
+ new_lpcr |= LPCR_HAIL;
|
|
+ } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
|
+ new_lpcr |= LPCR_AIL_3;
|
|
+ }
|
|
+
|
|
+ if (new_lpcr != lpcr)
|
|
+ mtspr(SPRN_LPCR, new_lpcr);
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
|
|
index e839a906fdf23..b14907209822e 100644
|
|
--- a/arch/powerpc/kernel/vdso.c
|
|
+++ b/arch/powerpc/kernel/vdso.c
|
|
@@ -55,10 +55,10 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
|
|
{
|
|
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
|
|
|
|
- if (new_size != text_size + PAGE_SIZE)
|
|
+ if (new_size != text_size)
|
|
return -EINVAL;
|
|
|
|
- current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE;
|
|
+ current->mm->context.vdso = (void __user *)new_vma->vm_start;
|
|
|
|
return 0;
|
|
}
|
|
@@ -73,6 +73,10 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
|
|
return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
|
|
}
|
|
|
|
+static struct vm_special_mapping vvar_spec __ro_after_init = {
|
|
+ .name = "[vvar]",
|
|
+};
|
|
+
|
|
static struct vm_special_mapping vdso32_spec __ro_after_init = {
|
|
.name = "[vdso]",
|
|
.mremap = vdso32_mremap,
|
|
@@ -89,11 +93,11 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = {
|
|
*/
|
|
static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
{
|
|
- struct mm_struct *mm = current->mm;
|
|
+ unsigned long vdso_size, vdso_base, mappings_size;
|
|
struct vm_special_mapping *vdso_spec;
|
|
+ unsigned long vvar_size = PAGE_SIZE;
|
|
+ struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
- unsigned long vdso_size;
|
|
- unsigned long vdso_base;
|
|
|
|
if (is_32bit_task()) {
|
|
vdso_spec = &vdso32_spec;
|
|
@@ -110,8 +114,8 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
|
|
vdso_base = 0;
|
|
}
|
|
|
|
- /* Add a page to the vdso size for the data page */
|
|
- vdso_size += PAGE_SIZE;
|
|
+ mappings_size = vdso_size + vvar_size;
|
|
+ mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
|
|
|
|
/*
|
|
* pick a base address for the vDSO in process space. We try to put it
|
|
@@ -119,9 +123,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
|
|
* and end up putting it elsewhere.
|
|
* Add enough to the size so that the result can be aligned.
|
|
*/
|
|
- vdso_base = get_unmapped_area(NULL, vdso_base,
|
|
- vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
|
|
- 0, 0);
|
|
+ vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
|
|
if (IS_ERR_VALUE(vdso_base))
|
|
return vdso_base;
|
|
|
|
@@ -133,7 +135,13 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
|
|
* install_special_mapping or the perf counter mmap tracking code
|
|
* will fail to recognise it as a vDSO.
|
|
*/
|
|
- mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
|
|
+ mm->context.vdso = (void __user *)vdso_base + vvar_size;
|
|
+
|
|
+ vma = _install_special_mapping(mm, vdso_base, vvar_size,
|
|
+ VM_READ | VM_MAYREAD | VM_IO |
|
|
+ VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
|
|
+ if (IS_ERR(vma))
|
|
+ return PTR_ERR(vma);
|
|
|
|
/*
|
|
* our vma flags don't have VM_WRITE so by default, the process isn't
|
|
@@ -145,9 +153,12 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
|
|
* It's fine to use that for setting breakpoints in the vDSO code
|
|
* pages though.
|
|
*/
|
|
- vma = _install_special_mapping(mm, vdso_base, vdso_size,
|
|
+ vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
|
|
VM_READ | VM_EXEC | VM_MAYREAD |
|
|
VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
|
|
+ if (IS_ERR(vma))
|
|
+ do_munmap(mm, vdso_base, vvar_size, NULL);
|
|
+
|
|
return PTR_ERR_OR_ZERO(vma);
|
|
}
|
|
|
|
@@ -249,11 +260,22 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
|
|
if (!pagelist)
|
|
panic("%s: Cannot allocate page list for VDSO", __func__);
|
|
|
|
- pagelist[0] = virt_to_page(vdso_data);
|
|
-
|
|
for (i = 0; i < pages; i++)
|
|
- pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
|
|
+ pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
|
|
+
|
|
+ return pagelist;
|
|
+}
|
|
+
|
|
+static struct page ** __init vvar_setup_pages(void)
|
|
+{
|
|
+ struct page **pagelist;
|
|
|
|
+ /* .pages is NULL-terminated */
|
|
+ pagelist = kcalloc(2, sizeof(struct page *), GFP_KERNEL);
|
|
+ if (!pagelist)
|
|
+ panic("%s: Cannot allocate page list for VVAR", __func__);
|
|
+
|
|
+ pagelist[0] = virt_to_page(vdso_data);
|
|
return pagelist;
|
|
}
|
|
|
|
@@ -295,6 +317,8 @@ static int __init vdso_init(void)
|
|
if (IS_ENABLED(CONFIG_PPC64))
|
|
vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
|
|
|
|
+ vvar_spec.pages = vvar_setup_pages();
|
|
+
|
|
smp_wmb();
|
|
|
|
return 0;
|
|
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
|
|
index 02b9e4d0dc40b..a8a7cb71086b3 100644
|
|
--- a/arch/powerpc/kexec/file_load_64.c
|
|
+++ b/arch/powerpc/kexec/file_load_64.c
|
|
@@ -960,6 +960,93 @@ unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
|
|
return fdt_size;
|
|
}
|
|
|
|
+/**
|
|
+ * add_node_props - Reads node properties from device node structure and add
|
|
+ * them to fdt.
|
|
+ * @fdt: Flattened device tree of the kernel
|
|
+ * @node_offset: offset of the node to add a property at
|
|
+ * @dn: device node pointer
|
|
+ *
|
|
+ * Returns 0 on success, negative errno on error.
|
|
+ */
|
|
+static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct property *pp;
|
|
+
|
|
+ if (!dn)
|
|
+ return -EINVAL;
|
|
+
|
|
+ for_each_property_of_node(dn, pp) {
|
|
+ ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
|
|
+ if (ret < 0) {
|
|
+ pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * update_cpus_node - Update cpus node of flattened device tree using of_root
|
|
+ * device node.
|
|
+ * @fdt: Flattened device tree of the kernel.
|
|
+ *
|
|
+ * Returns 0 on success, negative errno on error.
|
|
+ */
|
|
+static int update_cpus_node(void *fdt)
|
|
+{
|
|
+ struct device_node *cpus_node, *dn;
|
|
+ int cpus_offset, cpus_subnode_offset, ret = 0;
|
|
+
|
|
+ cpus_offset = fdt_path_offset(fdt, "/cpus");
|
|
+ if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
|
|
+ pr_err("Malformed device tree: error reading /cpus node: %s\n",
|
|
+ fdt_strerror(cpus_offset));
|
|
+ return cpus_offset;
|
|
+ }
|
|
+
|
|
+ if (cpus_offset > 0) {
|
|
+ ret = fdt_del_node(fdt, cpus_offset);
|
|
+ if (ret < 0) {
|
|
+ pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Add cpus node to fdt */
|
|
+ cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
|
|
+ if (cpus_offset < 0) {
|
|
+ pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* Add cpus node properties */
|
|
+ cpus_node = of_find_node_by_path("/cpus");
|
|
+ ret = add_node_props(fdt, cpus_offset, cpus_node);
|
|
+ of_node_put(cpus_node);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Loop through all subnodes of cpus and add them to fdt */
|
|
+ for_each_node_by_type(dn, "cpu") {
|
|
+ cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
|
|
+ if (cpus_subnode_offset < 0) {
|
|
+ pr_err("Unable to add %s subnode: %s\n", dn->full_name,
|
|
+ fdt_strerror(cpus_subnode_offset));
|
|
+ ret = cpus_subnode_offset;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ ret = add_node_props(fdt, cpus_subnode_offset, dn);
|
|
+ if (ret < 0)
|
|
+ goto out;
|
|
+ }
|
|
+out:
|
|
+ of_node_put(dn);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
/**
|
|
* setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
|
|
* being loaded.
|
|
@@ -1020,6 +1107,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
|
|
}
|
|
}
|
|
|
|
+ /* Update cpus nodes information to account hotplug CPUs. */
|
|
+ ret = update_cpus_node(fdt);
|
|
+ if (ret < 0)
|
|
+ goto out;
|
|
+
|
|
/* Update memory reserve map */
|
|
ret = get_reserved_memory_ranges(&rmem);
|
|
if (ret)
|
|
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
|
|
index e452158a18d77..c3e31fef0be1c 100644
|
|
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
|
|
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
|
|
@@ -8,6 +8,7 @@
|
|
*/
|
|
|
|
#include <linux/kvm_host.h>
|
|
+#include <linux/pkeys.h>
|
|
|
|
#include <asm/kvm_ppc.h>
|
|
#include <asm/kvm_book3s.h>
|
|
@@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
|
else
|
|
kvmppc_mmu_flush_icache(pfn);
|
|
|
|
+ rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
|
|
rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
|
|
|
|
/*
|
|
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
|
|
index 69a91b571845d..58991233381ed 100644
|
|
--- a/arch/powerpc/lib/Makefile
|
|
+++ b/arch/powerpc/lib/Makefile
|
|
@@ -5,6 +5,9 @@
|
|
|
|
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
|
|
|
|
+CFLAGS_code-patching.o += -fno-stack-protector
|
|
+CFLAGS_feature-fixups.o += -fno-stack-protector
|
|
+
|
|
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
|
|
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
|
|
|
|
diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
|
|
index 7b947728d57ef..56007c763902a 100644
|
|
--- a/arch/s390/crypto/arch_random.c
|
|
+++ b/arch/s390/crypto/arch_random.c
|
|
@@ -54,6 +54,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
|
|
|
|
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
|
|
{
|
|
+ /* max hunk is ARCH_RNG_BUF_SIZE */
|
|
+ if (nbytes > ARCH_RNG_BUF_SIZE)
|
|
+ return false;
|
|
+
|
|
/* lock rng buffer */
|
|
if (!spin_trylock(&arch_rng_lock))
|
|
return false;
|
|
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
|
|
index a7eab7be4db05..5412efe328f80 100644
|
|
--- a/arch/s390/kernel/dis.c
|
|
+++ b/arch/s390/kernel/dis.c
|
|
@@ -563,7 +563,7 @@ void show_code(struct pt_regs *regs)
|
|
|
|
void print_fn_code(unsigned char *code, unsigned long len)
|
|
{
|
|
- char buffer[64], *ptr;
|
|
+ char buffer[128], *ptr;
|
|
int opsize, i;
|
|
|
|
while (len) {
|
|
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
|
index 21f851179ff08..95aefc3752008 100644
|
|
--- a/arch/x86/Kconfig
|
|
+++ b/arch/x86/Kconfig
|
|
@@ -1416,7 +1416,7 @@ config HIGHMEM4G
|
|
|
|
config HIGHMEM64G
|
|
bool "64GB"
|
|
- depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
|
|
+ depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
|
|
select X86_PAE
|
|
help
|
|
Select this if you have a 32-bit processor and more than 4
|
|
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
|
|
index 828f24d547b2f..708b2d23d9f4d 100644
|
|
--- a/arch/x86/Makefile
|
|
+++ b/arch/x86/Makefile
|
|
@@ -33,6 +33,7 @@ REALMODE_CFLAGS += -ffreestanding
|
|
REALMODE_CFLAGS += -fno-stack-protector
|
|
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
|
|
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
|
|
+REALMODE_CFLAGS += $(CLANG_FLAGS)
|
|
export REALMODE_CFLAGS
|
|
|
|
# BITS is used as extension for files which are available in a 32 bit
|
|
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
|
|
index e0bc3988c3faa..6e5522aebbbd4 100644
|
|
--- a/arch/x86/boot/compressed/Makefile
|
|
+++ b/arch/x86/boot/compressed/Makefile
|
|
@@ -46,6 +46,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
|
|
# Disable relocation relaxation in case the link is not PIE.
|
|
KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
|
|
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
|
|
+KBUILD_CFLAGS += $(CLANG_FLAGS)
|
|
|
|
# sev-es.c indirectly inludes inat-table.h which is generated during
|
|
# compilation and stored in $(objtree). Add the directory to the includes so
|
|
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
|
|
index aa561795efd16..a6dea4e8a082f 100644
|
|
--- a/arch/x86/boot/compressed/mem_encrypt.S
|
|
+++ b/arch/x86/boot/compressed/mem_encrypt.S
|
|
@@ -23,12 +23,6 @@ SYM_FUNC_START(get_sev_encryption_bit)
|
|
push %ecx
|
|
push %edx
|
|
|
|
- /* Check if running under a hypervisor */
|
|
- movl $1, %eax
|
|
- cpuid
|
|
- bt $31, %ecx /* Check the hypervisor bit */
|
|
- jnc .Lno_sev
|
|
-
|
|
movl $0x80000000, %eax /* CPUID to check the highest leaf */
|
|
cpuid
|
|
cmpl $0x8000001f, %eax /* See if 0x8000001f is available */
|
|
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
|
index 35ad8480c464e..25148ebd36341 100644
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -1847,7 +1847,7 @@ static inline void setup_getcpu(int cpu)
|
|
unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
|
|
struct desc_struct d = { };
|
|
|
|
- if (boot_cpu_has(X86_FEATURE_RDTSCP))
|
|
+ if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
|
|
write_rdtscp_aux(cpudata);
|
|
|
|
/* Store CPU and node number in limit. */
|
|
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
|
|
index cdc04d0912423..387b716698187 100644
|
|
--- a/arch/x86/kernel/sev-es-shared.c
|
|
+++ b/arch/x86/kernel/sev-es-shared.c
|
|
@@ -186,7 +186,6 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
|
|
* make it accessible to the hypervisor.
|
|
*
|
|
* In particular, check for:
|
|
- * - Hypervisor CPUID bit
|
|
* - Availability of CPUID leaf 0x8000001f
|
|
* - SEV CPUID bit.
|
|
*
|
|
@@ -194,10 +193,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
|
|
* can't be checked here.
|
|
*/
|
|
|
|
- if ((fn == 1 && !(regs->cx & BIT(31))))
|
|
- /* Hypervisor bit */
|
|
- goto fail;
|
|
- else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
|
|
+ if (fn == 0x80000000 && (regs->ax < 0x8000001f))
|
|
/* SEV leaf check */
|
|
goto fail;
|
|
else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
|
|
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
|
|
index 6c5eb6f3f14f4..a19374d261013 100644
|
|
--- a/arch/x86/mm/mem_encrypt_identity.c
|
|
+++ b/arch/x86/mm/mem_encrypt_identity.c
|
|
@@ -503,14 +503,10 @@ void __init sme_enable(struct boot_params *bp)
|
|
|
|
#define AMD_SME_BIT BIT(0)
|
|
#define AMD_SEV_BIT BIT(1)
|
|
- /*
|
|
- * Set the feature mask (SME or SEV) based on whether we are
|
|
- * running under a hypervisor.
|
|
- */
|
|
- eax = 1;
|
|
- ecx = 0;
|
|
- native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
- feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
|
|
+
|
|
+ /* Check the SEV MSR whether SEV or SME is enabled */
|
|
+ sev_status = __rdmsr(MSR_AMD64_SEV);
|
|
+ feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
|
|
|
|
/*
|
|
* Check for the SME/SEV feature:
|
|
@@ -530,19 +526,26 @@ void __init sme_enable(struct boot_params *bp)
|
|
|
|
/* Check if memory encryption is enabled */
|
|
if (feature_mask == AMD_SME_BIT) {
|
|
+ /*
|
|
+ * No SME if Hypervisor bit is set. This check is here to
|
|
+ * prevent a guest from trying to enable SME. For running as a
|
|
+ * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
|
|
+ * might be other hypervisors which emulate that MSR as non-zero
|
|
+ * or even pass it through to the guest.
|
|
+ * A malicious hypervisor can still trick a guest into this
|
|
+ * path, but there is no way to protect against that.
|
|
+ */
|
|
+ eax = 1;
|
|
+ ecx = 0;
|
|
+ native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
+ if (ecx & BIT(31))
|
|
+ return;
|
|
+
|
|
/* For SME, check the SYSCFG MSR */
|
|
msr = __rdmsr(MSR_K8_SYSCFG);
|
|
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
|
|
return;
|
|
} else {
|
|
- /* For SEV, check the SEV MSR */
|
|
- msr = __rdmsr(MSR_AMD64_SEV);
|
|
- if (!(msr & MSR_AMD64_SEV_ENABLED))
|
|
- return;
|
|
-
|
|
- /* Save SEV_STATUS to avoid reading MSR again */
|
|
- sev_status = msr;
|
|
-
|
|
/* SEV state cannot be controlled by a command line option */
|
|
sme_me_mask = me_mask;
|
|
sev_enabled = true;
|
|
diff --git a/crypto/api.c b/crypto/api.c
|
|
index ed08cbd5b9d3f..c4eda56cff891 100644
|
|
--- a/crypto/api.c
|
|
+++ b/crypto/api.c
|
|
@@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
|
|
{
|
|
struct crypto_alg *alg;
|
|
|
|
- if (unlikely(!mem))
|
|
+ if (IS_ERR_OR_NULL(mem))
|
|
return;
|
|
|
|
alg = tfm->__crt_alg;
|
|
diff --git a/crypto/rng.c b/crypto/rng.c
|
|
index a888d84b524a4..fea082b25fe4b 100644
|
|
--- a/crypto/rng.c
|
|
+++ b/crypto/rng.c
|
|
@@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
|
|
u8 *buf = NULL;
|
|
int err;
|
|
|
|
- crypto_stats_get(alg);
|
|
if (!seed && slen) {
|
|
buf = kmalloc(slen, GFP_KERNEL);
|
|
- if (!buf) {
|
|
- crypto_alg_put(alg);
|
|
+ if (!buf)
|
|
return -ENOMEM;
|
|
- }
|
|
|
|
err = get_random_bytes_wait(buf, slen);
|
|
- if (err) {
|
|
- crypto_alg_put(alg);
|
|
+ if (err)
|
|
goto out;
|
|
- }
|
|
seed = buf;
|
|
}
|
|
|
|
+ crypto_stats_get(alg);
|
|
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
|
|
crypto_stats_rng_seed(alg, err);
|
|
out:
|
|
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
|
|
index f2d0e5915dab5..0a0a982f9c28d 100644
|
|
--- a/drivers/acpi/arm64/gtdt.c
|
|
+++ b/drivers/acpi/arm64/gtdt.c
|
|
@@ -329,7 +329,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
|
|
int index)
|
|
{
|
|
struct platform_device *pdev;
|
|
- int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
|
|
+ int irq;
|
|
|
|
/*
|
|
* According to SBSA specification the size of refresh and control
|
|
@@ -338,7 +338,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
|
|
struct resource res[] = {
|
|
DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
|
|
DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
|
|
- DEFINE_RES_IRQ(irq),
|
|
+ {},
|
|
};
|
|
int nr_res = ARRAY_SIZE(res);
|
|
|
|
@@ -348,10 +348,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
|
|
|
|
if (!(wd->refresh_frame_address && wd->control_frame_address)) {
|
|
pr_err(FW_BUG "failed to get the Watchdog base address.\n");
|
|
- acpi_unregister_gsi(wd->timer_interrupt);
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
|
|
+ res[2] = (struct resource)DEFINE_RES_IRQ(irq);
|
|
if (irq <= 0) {
|
|
pr_warn("failed to map the Watchdog interrupt.\n");
|
|
nr_res--;
|
|
@@ -364,7 +365,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
|
|
*/
|
|
pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
|
|
if (IS_ERR(pdev)) {
|
|
- acpi_unregister_gsi(wd->timer_interrupt);
|
|
+ if (irq > 0)
|
|
+ acpi_unregister_gsi(wd->timer_interrupt);
|
|
return PTR_ERR(pdev);
|
|
}
|
|
|
|
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
|
|
index 7b54dc95d36b3..4058e02410917 100644
|
|
--- a/drivers/acpi/custom_method.c
|
|
+++ b/drivers/acpi/custom_method.c
|
|
@@ -42,6 +42,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
|
|
sizeof(struct acpi_table_header)))
|
|
return -EFAULT;
|
|
uncopied_bytes = max_size = table.length;
|
|
+ /* make sure the buf is not allocated */
|
|
+ kfree(buf);
|
|
buf = kzalloc(max_size, GFP_KERNEL);
|
|
if (!buf)
|
|
return -ENOMEM;
|
|
@@ -55,6 +57,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
|
|
(*ppos + count < count) ||
|
|
(count > uncopied_bytes)) {
|
|
kfree(buf);
|
|
+ buf = NULL;
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -76,7 +79,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
|
|
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
|
|
}
|
|
|
|
- kfree(buf);
|
|
return count;
|
|
}
|
|
|
|
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
|
|
index 00ba8e5a1ccc0..33192a8f687d6 100644
|
|
--- a/drivers/ata/ahci.c
|
|
+++ b/drivers/ata/ahci.c
|
|
@@ -1772,6 +1772,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
|
|
|
|
#ifdef CONFIG_ARM64
|
|
+ if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
|
|
+ pdev->device == 0xa235 &&
|
|
+ pdev->revision < 0x30)
|
|
+ hpriv->flags |= AHCI_HFLAG_NO_SXS;
|
|
+
|
|
if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
|
|
hpriv->irq_handler = ahci_thunderx_irq_handler;
|
|
#endif
|
|
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
|
|
index 98b8baa47dc5e..d1f284f0c83d9 100644
|
|
--- a/drivers/ata/ahci.h
|
|
+++ b/drivers/ata/ahci.h
|
|
@@ -242,6 +242,7 @@ enum {
|
|
suspend/resume */
|
|
AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP
|
|
from phy_power_on() */
|
|
+ AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */
|
|
|
|
/* ap->flags bits */
|
|
|
|
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
|
|
index ea5bf5f4cbed5..fec2e9754aed2 100644
|
|
--- a/drivers/ata/libahci.c
|
|
+++ b/drivers/ata/libahci.c
|
|
@@ -493,6 +493,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
|
|
cap |= HOST_CAP_ALPM;
|
|
}
|
|
|
|
+ if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
|
|
+ dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
|
|
+ cap &= ~HOST_CAP_SXS;
|
|
+ }
|
|
+
|
|
if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
|
|
dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
|
|
port_map, hpriv->force_port_map);
|
|
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
|
|
index d4aa6bfc95557..526c77cd7a506 100644
|
|
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
|
|
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
|
|
@@ -432,10 +432,14 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
|
|
* i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
|
|
* of sysfs link already was removed already.
|
|
*/
|
|
- if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) {
|
|
- sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
|
|
+ if (dev->blk_symlink_name) {
|
|
+ if (try_module_get(THIS_MODULE)) {
|
|
+ sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
|
|
+ module_put(THIS_MODULE);
|
|
+ }
|
|
+ /* It should be freed always. */
|
|
kfree(dev->blk_symlink_name);
|
|
- module_put(THIS_MODULE);
|
|
+ dev->blk_symlink_name = NULL;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
|
|
index a6a68d44f517c..677770f32843f 100644
|
|
--- a/drivers/block/rnbd/rnbd-srv.c
|
|
+++ b/drivers/block/rnbd/rnbd-srv.c
|
|
@@ -341,7 +341,9 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
|
|
struct rnbd_srv_session *sess = sess_dev->sess;
|
|
|
|
sess_dev->keep_id = true;
|
|
- mutex_lock(&sess->lock);
|
|
+ /* It is already started to close by client's close message. */
|
|
+ if (!mutex_trylock(&sess->lock))
|
|
+ return;
|
|
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
|
|
mutex_unlock(&sess->lock);
|
|
}
|
|
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
|
|
index 08c45457c90fe..9ed047f698d19 100644
|
|
--- a/drivers/bus/mhi/core/init.c
|
|
+++ b/drivers/bus/mhi/core/init.c
|
|
@@ -547,6 +547,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
|
struct mhi_ring *buf_ring;
|
|
struct mhi_ring *tre_ring;
|
|
struct mhi_chan_ctxt *chan_ctxt;
|
|
+ u32 tmp;
|
|
|
|
buf_ring = &mhi_chan->buf_ring;
|
|
tre_ring = &mhi_chan->tre_ring;
|
|
@@ -560,7 +561,19 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
|
vfree(buf_ring->base);
|
|
|
|
buf_ring->base = tre_ring->base = NULL;
|
|
+ tre_ring->ctxt_wp = NULL;
|
|
chan_ctxt->rbase = 0;
|
|
+ chan_ctxt->rlen = 0;
|
|
+ chan_ctxt->rp = 0;
|
|
+ chan_ctxt->wp = 0;
|
|
+
|
|
+ tmp = chan_ctxt->chcfg;
|
|
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
|
|
+ tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
|
|
+ chan_ctxt->chcfg = tmp;
|
|
+
|
|
+ /* Update to all cores */
|
|
+ smp_wmb();
|
|
}
|
|
|
|
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
|
@@ -858,12 +871,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
|
|
u32 soc_info;
|
|
int ret, i;
|
|
|
|
- if (!mhi_cntrl)
|
|
- return -EINVAL;
|
|
-
|
|
- if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
|
|
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
|
|
+ !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
|
|
!mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
|
|
- !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
|
|
+ !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq)
|
|
return -EINVAL;
|
|
|
|
ret = parse_config(mhi_cntrl, config);
|
|
@@ -885,8 +896,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
|
|
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
|
|
init_waitqueue_head(&mhi_cntrl->state_event);
|
|
|
|
- mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
|
|
- ("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
|
|
+ mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
|
|
if (!mhi_cntrl->hiprio_wq) {
|
|
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
|
|
ret = -ENOMEM;
|
|
@@ -1291,7 +1301,8 @@ static int mhi_driver_remove(struct device *dev)
|
|
|
|
mutex_lock(&mhi_chan->mutex);
|
|
|
|
- if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
|
|
+ if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
|
|
+ ch_state[dir] == MHI_CH_STATE_STOP) &&
|
|
!mhi_chan->offload_ch)
|
|
mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
|
|
|
|
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
|
|
index d34d7e90e38d9..da495f68f70ec 100644
|
|
--- a/drivers/bus/mhi/core/main.c
|
|
+++ b/drivers/bus/mhi/core/main.c
|
|
@@ -222,10 +222,17 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
|
|
smp_wmb();
|
|
}
|
|
|
|
+static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
|
|
+{
|
|
+ return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
|
|
+}
|
|
+
|
|
int mhi_destroy_device(struct device *dev, void *data)
|
|
{
|
|
+ struct mhi_chan *ul_chan, *dl_chan;
|
|
struct mhi_device *mhi_dev;
|
|
struct mhi_controller *mhi_cntrl;
|
|
+ enum mhi_ee_type ee = MHI_EE_MAX;
|
|
|
|
if (dev->bus != &mhi_bus_type)
|
|
return 0;
|
|
@@ -237,6 +244,17 @@ int mhi_destroy_device(struct device *dev, void *data)
|
|
if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
|
|
return 0;
|
|
|
|
+ ul_chan = mhi_dev->ul_chan;
|
|
+ dl_chan = mhi_dev->dl_chan;
|
|
+
|
|
+ /*
|
|
+ * If execution environment is specified, remove only those devices that
|
|
+ * started in them based on ee_mask for the channels as we move on to a
|
|
+ * different execution environment
|
|
+ */
|
|
+ if (data)
|
|
+ ee = *(enum mhi_ee_type *)data;
|
|
+
|
|
/*
|
|
* For the suspend and resume case, this function will get called
|
|
* without mhi_unregister_controller(). Hence, we need to drop the
|
|
@@ -244,11 +262,19 @@ int mhi_destroy_device(struct device *dev, void *data)
|
|
* be sure that there will be no instances of mhi_dev left after
|
|
* this.
|
|
*/
|
|
- if (mhi_dev->ul_chan)
|
|
- put_device(&mhi_dev->ul_chan->mhi_dev->dev);
|
|
+ if (ul_chan) {
|
|
+ if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
|
|
+ return 0;
|
|
+
|
|
+ put_device(&ul_chan->mhi_dev->dev);
|
|
+ }
|
|
|
|
- if (mhi_dev->dl_chan)
|
|
- put_device(&mhi_dev->dl_chan->mhi_dev->dev);
|
|
+ if (dl_chan) {
|
|
+ if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
|
|
+ return 0;
|
|
+
|
|
+ put_device(&dl_chan->mhi_dev->dev);
|
|
+ }
|
|
|
|
dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
|
|
mhi_dev->name);
|
|
@@ -351,7 +377,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
|
|
struct mhi_event_ctxt *er_ctxt =
|
|
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
|
|
struct mhi_ring *ev_ring = &mhi_event->ring;
|
|
- void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
|
+ dma_addr_t ptr = er_ctxt->rp;
|
|
+ void *dev_rp;
|
|
+
|
|
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
|
+ dev_err(&mhi_cntrl->mhi_dev->dev,
|
|
+ "Event ring rp points outside of the event ring\n");
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
+
|
|
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
|
|
|
|
/* Only proceed if event ring has pending events */
|
|
if (ev_ring->rp == dev_rp)
|
|
@@ -377,7 +412,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
|
|
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
|
enum mhi_state state = MHI_STATE_MAX;
|
|
enum mhi_pm_state pm_state = 0;
|
|
- enum mhi_ee_type ee = 0;
|
|
+ enum mhi_ee_type ee = MHI_EE_MAX;
|
|
|
|
write_lock_irq(&mhi_cntrl->pm_lock);
|
|
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
|
|
@@ -386,8 +421,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
|
|
}
|
|
|
|
state = mhi_get_mhi_state(mhi_cntrl);
|
|
- ee = mhi_cntrl->ee;
|
|
- mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
|
|
+ ee = mhi_get_exec_env(mhi_cntrl);
|
|
dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
|
|
TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
|
|
TO_MHI_STATE_STR(state));
|
|
@@ -399,27 +433,30 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
|
|
}
|
|
write_unlock_irq(&mhi_cntrl->pm_lock);
|
|
|
|
- /* If device supports RDDM don't bother processing SYS error */
|
|
- if (mhi_cntrl->rddm_image) {
|
|
- /* host may be performing a device power down already */
|
|
- if (!mhi_is_active(mhi_cntrl))
|
|
- goto exit_intvec;
|
|
+ if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
|
|
+ goto exit_intvec;
|
|
|
|
- if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
|
|
+ switch (ee) {
|
|
+ case MHI_EE_RDDM:
|
|
+ /* proceed if power down is not already in progress */
|
|
+ if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
|
|
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
|
|
+ mhi_cntrl->ee = ee;
|
|
wake_up_all(&mhi_cntrl->state_event);
|
|
}
|
|
- goto exit_intvec;
|
|
- }
|
|
-
|
|
- if (pm_state == MHI_PM_SYS_ERR_DETECT) {
|
|
+ break;
|
|
+ case MHI_EE_PBL:
|
|
+ case MHI_EE_EDL:
|
|
+ case MHI_EE_PTHRU:
|
|
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
|
|
+ mhi_cntrl->ee = ee;
|
|
wake_up_all(&mhi_cntrl->state_event);
|
|
-
|
|
- /* For fatal errors, we let controller decide next step */
|
|
- if (MHI_IN_PBL(ee))
|
|
- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
|
|
- else
|
|
- mhi_pm_sys_err_handler(mhi_cntrl);
|
|
+ mhi_pm_sys_err_handler(mhi_cntrl);
|
|
+ break;
|
|
+ default:
|
|
+ wake_up_all(&mhi_cntrl->state_event);
|
|
+ mhi_pm_sys_err_handler(mhi_cntrl);
|
|
+ break;
|
|
}
|
|
|
|
exit_intvec:
|
|
@@ -504,6 +541,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
|
|
struct mhi_buf_info *buf_info;
|
|
u16 xfer_len;
|
|
|
|
+ if (!is_valid_ring_ptr(tre_ring, ptr)) {
|
|
+ dev_err(&mhi_cntrl->mhi_dev->dev,
|
|
+ "Event element points outside of the tre ring\n");
|
|
+ break;
|
|
+ }
|
|
/* Get the TRB this event points to */
|
|
ev_tre = mhi_to_virtual(tre_ring, ptr);
|
|
|
|
@@ -663,6 +705,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
|
|
struct mhi_chan *mhi_chan;
|
|
u32 chan;
|
|
|
|
+ if (!is_valid_ring_ptr(mhi_ring, ptr)) {
|
|
+ dev_err(&mhi_cntrl->mhi_dev->dev,
|
|
+ "Event element points outside of the cmd ring\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
|
|
|
|
chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
|
|
@@ -687,6 +735,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
|
|
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
|
u32 chan;
|
|
int count = 0;
|
|
+ dma_addr_t ptr = er_ctxt->rp;
|
|
|
|
/*
|
|
* This is a quick check to avoid unnecessary event processing
|
|
@@ -696,7 +745,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
|
|
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
|
|
return -EIO;
|
|
|
|
- dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
|
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
|
+ dev_err(&mhi_cntrl->mhi_dev->dev,
|
|
+ "Event ring rp points outside of the event ring\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
|
|
local_rp = ev_ring->rp;
|
|
|
|
while (dev_rp != local_rp) {
|
|
@@ -802,6 +857,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
|
|
*/
|
|
if (chan < mhi_cntrl->max_chan) {
|
|
mhi_chan = &mhi_cntrl->mhi_chan[chan];
|
|
+ if (!mhi_chan->configured)
|
|
+ break;
|
|
parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
|
|
event_quota--;
|
|
}
|
|
@@ -813,7 +870,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
|
|
|
|
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
|
|
local_rp = ev_ring->rp;
|
|
- dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
|
+
|
|
+ ptr = er_ctxt->rp;
|
|
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
|
+ dev_err(&mhi_cntrl->mhi_dev->dev,
|
|
+ "Event ring rp points outside of the event ring\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
|
|
count++;
|
|
}
|
|
|
|
@@ -836,11 +901,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
|
|
int count = 0;
|
|
u32 chan;
|
|
struct mhi_chan *mhi_chan;
|
|
+ dma_addr_t ptr = er_ctxt->rp;
|
|
|
|
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
|
|
return -EIO;
|
|
|
|
- dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
|
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
|
+ dev_err(&mhi_cntrl->mhi_dev->dev,
|
|
+ "Event ring rp points outside of the event ring\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
|
|
local_rp = ev_ring->rp;
|
|
|
|
while (dev_rp != local_rp && event_quota > 0) {
|
|
@@ -854,7 +926,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
|
|
* Only process the event ring elements whose channel
|
|
* ID is within the maximum supported range.
|
|
*/
|
|
- if (chan < mhi_cntrl->max_chan) {
|
|
+ if (chan < mhi_cntrl->max_chan &&
|
|
+ mhi_cntrl->mhi_chan[chan].configured) {
|
|
mhi_chan = &mhi_cntrl->mhi_chan[chan];
|
|
|
|
if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
|
|
@@ -868,7 +941,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
|
|
|
|
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
|
|
local_rp = ev_ring->rp;
|
|
- dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
|
+
|
|
+ ptr = er_ctxt->rp;
|
|
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
|
+ dev_err(&mhi_cntrl->mhi_dev->dev,
|
|
+ "Event ring rp points outside of the event ring\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
|
|
count++;
|
|
}
|
|
read_lock_bh(&mhi_cntrl->pm_lock);
|
|
@@ -1407,6 +1488,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
|
|
struct mhi_ring *ev_ring;
|
|
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
|
unsigned long flags;
|
|
+ dma_addr_t ptr;
|
|
|
|
dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
|
|
|
|
@@ -1414,7 +1496,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
|
|
|
|
/* mark all stale events related to channel as STALE event */
|
|
spin_lock_irqsave(&mhi_event->lock, flags);
|
|
- dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
|
+
|
|
+ ptr = er_ctxt->rp;
|
|
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
|
+ dev_err(&mhi_cntrl->mhi_dev->dev,
|
|
+ "Event ring rp points outside of the event ring\n");
|
|
+ dev_rp = ev_ring->rp;
|
|
+ } else {
|
|
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
|
|
+ }
|
|
|
|
local_rp = ev_ring->rp;
|
|
while (dev_rp != local_rp) {
|
|
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
|
|
index 681960c72d2a8..277704af7eb6f 100644
|
|
--- a/drivers/bus/mhi/core/pm.c
|
|
+++ b/drivers/bus/mhi/core/pm.c
|
|
@@ -377,24 +377,28 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
|
|
{
|
|
struct mhi_event *mhi_event;
|
|
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
|
+ enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
|
|
int i, ret;
|
|
|
|
dev_dbg(dev, "Processing Mission Mode transition\n");
|
|
|
|
write_lock_irq(&mhi_cntrl->pm_lock);
|
|
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
|
|
- mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
|
|
+ ee = mhi_get_exec_env(mhi_cntrl);
|
|
|
|
- if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
|
|
+ if (!MHI_IN_MISSION_MODE(ee)) {
|
|
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
|
|
write_unlock_irq(&mhi_cntrl->pm_lock);
|
|
wake_up_all(&mhi_cntrl->state_event);
|
|
return -EIO;
|
|
}
|
|
+ mhi_cntrl->ee = ee;
|
|
write_unlock_irq(&mhi_cntrl->pm_lock);
|
|
|
|
wake_up_all(&mhi_cntrl->state_event);
|
|
|
|
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee,
|
|
+ mhi_destroy_device);
|
|
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
|
|
|
|
/* Force MHI to be in M0 state before continuing */
|
|
@@ -1092,7 +1096,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
|
|
&val) ||
|
|
!val,
|
|
msecs_to_jiffies(mhi_cntrl->timeout_ms));
|
|
- if (ret) {
|
|
+ if (!ret) {
|
|
ret = -EIO;
|
|
dev_info(dev, "Failed to reset MHI due to syserr state\n");
|
|
goto error_bhi_offset;
|
|
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
|
|
index 3d74f237f005b..9e535336689fd 100644
|
|
--- a/drivers/bus/ti-sysc.c
|
|
+++ b/drivers/bus/ti-sysc.c
|
|
@@ -635,6 +635,51 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
|
|
return 0;
|
|
}
|
|
|
|
+/* Interconnect instances to probe before l4_per instances */
|
|
+static struct resource early_bus_ranges[] = {
|
|
+ /* am3/4 l4_wkup */
|
|
+ { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
|
|
+ /* omap4/5 and dra7 l4_cfg */
|
|
+ { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
|
|
+ /* omap4 l4_wkup */
|
|
+ { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
|
|
+ /* omap5 and dra7 l4_wkup without dra7 dcan segment */
|
|
+ { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
|
|
+};
|
|
+
|
|
+static atomic_t sysc_defer = ATOMIC_INIT(10);
|
|
+
|
|
+/**
|
|
+ * sysc_defer_non_critical - defer non_critical interconnect probing
|
|
+ * @ddata: device driver data
|
|
+ *
|
|
+ * We want to probe l4_cfg and l4_wkup interconnect instances before any
|
|
+ * l4_per instances as l4_per instances depend on resources on l4_cfg and
|
|
+ * l4_wkup interconnects.
|
|
+ */
|
|
+static int sysc_defer_non_critical(struct sysc *ddata)
|
|
+{
|
|
+ struct resource *res;
|
|
+ int i;
|
|
+
|
|
+ if (!atomic_read(&sysc_defer))
|
|
+ return 0;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
|
|
+ res = &early_bus_ranges[i];
|
|
+ if (ddata->module_pa >= res->start &&
|
|
+ ddata->module_pa <= res->end) {
|
|
+ atomic_set(&sysc_defer, 0);
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ atomic_dec_if_positive(&sysc_defer);
|
|
+
|
|
+ return -EPROBE_DEFER;
|
|
+}
|
|
+
|
|
static struct device_node *stdout_path;
|
|
|
|
static void sysc_init_stdout_path(struct sysc *ddata)
|
|
@@ -863,6 +908,10 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
|
|
if (error)
|
|
return error;
|
|
|
|
+ error = sysc_defer_non_critical(ddata);
|
|
+ if (error)
|
|
+ return error;
|
|
+
|
|
sysc_check_children(ddata);
|
|
|
|
error = sysc_parse_registers(ddata);
|
|
diff --git a/drivers/char/random.c b/drivers/char/random.c
|
|
index a894c0559a8cf..ffec899f44509 100644
|
|
--- a/drivers/char/random.c
|
|
+++ b/drivers/char/random.c
|
|
@@ -819,7 +819,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
|
|
|
|
static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
|
|
{
|
|
- memcpy(&crng->state[0], "expand 32-byte k", 16);
|
|
+ chacha_init_consts(crng->state);
|
|
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
|
|
crng_init_try_arch(crng);
|
|
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
|
|
@@ -827,7 +827,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
|
|
|
|
static void __init crng_initialize_primary(struct crng_state *crng)
|
|
{
|
|
- memcpy(&crng->state[0], "expand 32-byte k", 16);
|
|
+ chacha_init_consts(crng->state);
|
|
_extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
|
|
if (crng_init_try_arch_early(crng) && trust_cpu) {
|
|
invalidate_batched_entropy();
|
|
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
|
|
index 3633ed70f48fa..1b18ce5ebab1e 100644
|
|
--- a/drivers/char/tpm/eventlog/acpi.c
|
|
+++ b/drivers/char/tpm/eventlog/acpi.c
|
|
@@ -41,6 +41,27 @@ struct acpi_tcpa {
|
|
};
|
|
};
|
|
|
|
+/* Check that the given log is indeed a TPM2 log. */
|
|
+static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
|
|
+{
|
|
+ struct tcg_efi_specid_event_head *efispecid;
|
|
+ struct tcg_pcr_event *event_header;
|
|
+ int n;
|
|
+
|
|
+ if (len < sizeof(*event_header))
|
|
+ return false;
|
|
+ len -= sizeof(*event_header);
|
|
+ event_header = bios_event_log;
|
|
+
|
|
+ if (len < sizeof(*efispecid))
|
|
+ return false;
|
|
+ efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
|
|
+
|
|
+ n = memcmp(efispecid->signature, TCG_SPECID_SIG,
|
|
+ sizeof(TCG_SPECID_SIG));
|
|
+ return n == 0;
|
|
+}
|
|
+
|
|
/* read binary bios log */
|
|
int tpm_read_log_acpi(struct tpm_chip *chip)
|
|
{
|
|
@@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
|
|
struct acpi_table_tpm2 *tbl;
|
|
struct acpi_tpm2_phy *tpm2_phy;
|
|
int format;
|
|
+ int ret;
|
|
|
|
log = &chip->log;
|
|
|
|
@@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
|
|
|
|
log->bios_event_log_end = log->bios_event_log + len;
|
|
|
|
+ ret = -EIO;
|
|
virt = acpi_os_map_iomem(start, len);
|
|
if (!virt)
|
|
goto err;
|
|
@@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
|
|
memcpy_fromio(log->bios_event_log, virt, len);
|
|
|
|
acpi_os_unmap_iomem(virt, len);
|
|
+
|
|
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
|
|
+ !tpm_is_tpm2_log(log->bios_event_log, len)) {
|
|
+ /* try EFI log next */
|
|
+ ret = -ENODEV;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
return format;
|
|
|
|
err:
|
|
kfree(log->bios_event_log);
|
|
log->bios_event_log = NULL;
|
|
- return -EIO;
|
|
+ return ret;
|
|
|
|
}
|
|
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
|
|
index 7460f230bae4c..8512ec76d5260 100644
|
|
--- a/drivers/char/tpm/eventlog/common.c
|
|
+++ b/drivers/char/tpm/eventlog/common.c
|
|
@@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
|
|
int log_version;
|
|
int rc = 0;
|
|
|
|
+ if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
|
|
+ return;
|
|
+
|
|
rc = tpm_read_log(chip);
|
|
if (rc < 0)
|
|
return;
|
|
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
|
|
index 35229e5143cac..e6cb9d525e30c 100644
|
|
--- a/drivers/char/tpm/eventlog/efi.c
|
|
+++ b/drivers/char/tpm/eventlog/efi.c
|
|
@@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
|
|
{
|
|
|
|
struct efi_tcg2_final_events_table *final_tbl = NULL;
|
|
+ int final_events_log_size = efi_tpm_final_log_size;
|
|
struct linux_efi_tpm_eventlog *log_tbl;
|
|
struct tpm_bios_log *log;
|
|
u32 log_size;
|
|
@@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
|
|
ret = tpm_log_version;
|
|
|
|
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
|
|
- efi_tpm_final_log_size == 0 ||
|
|
+ final_events_log_size == 0 ||
|
|
tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
|
|
goto out;
|
|
|
|
final_tbl = memremap(efi.tpm_final_log,
|
|
- sizeof(*final_tbl) + efi_tpm_final_log_size,
|
|
+ sizeof(*final_tbl) + final_events_log_size,
|
|
MEMREMAP_WB);
|
|
if (!final_tbl) {
|
|
pr_err("Could not map UEFI TPM final log\n");
|
|
@@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
|
|
goto out;
|
|
}
|
|
|
|
- efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
|
|
+ /*
|
|
+ * The 'final events log' size excludes the 'final events preboot log'
|
|
+ * at its beginning.
|
|
+ */
|
|
+ final_events_log_size -= log_tbl->final_events_preboot_size;
|
|
|
|
+ /*
|
|
+ * Allocate memory for the 'combined log' where we will append the
|
|
+ * 'final events log' to.
|
|
+ */
|
|
tmp = krealloc(log->bios_event_log,
|
|
- log_size + efi_tpm_final_log_size,
|
|
+ log_size + final_events_log_size,
|
|
GFP_KERNEL);
|
|
if (!tmp) {
|
|
kfree(log->bios_event_log);
|
|
@@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
|
|
log->bios_event_log = tmp;
|
|
|
|
/*
|
|
- * Copy any of the final events log that didn't also end up in the
|
|
- * main log. Events can be logged in both if events are generated
|
|
+ * Append any of the 'final events log' that didn't also end up in the
|
|
+ * 'main log'. Events can be logged in both if events are generated
|
|
* between GetEventLog() and ExitBootServices().
|
|
*/
|
|
memcpy((void *)log->bios_event_log + log_size,
|
|
final_tbl->events + log_tbl->final_events_preboot_size,
|
|
- efi_tpm_final_log_size);
|
|
+ final_events_log_size);
|
|
+ /*
|
|
+ * The size of the 'combined log' is the size of the 'main log' plus
|
|
+ * the size of the 'final events log'.
|
|
+ */
|
|
log->bios_event_log_end = log->bios_event_log +
|
|
- log_size + efi_tpm_final_log_size;
|
|
+ log_size + final_events_log_size;
|
|
|
|
out:
|
|
memunmap(final_tbl);
|
|
diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
|
|
index cd5df91036142..d62778884208c 100644
|
|
--- a/drivers/clk/socfpga/clk-gate-a10.c
|
|
+++ b/drivers/clk/socfpga/clk-gate-a10.c
|
|
@@ -146,6 +146,7 @@ static void __init __socfpga_gate_init(struct device_node *node,
|
|
if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) {
|
|
pr_err("%s: failed to find altr,sys-mgr regmap!\n",
|
|
__func__);
|
|
+ kfree(socfpga_clk);
|
|
return;
|
|
}
|
|
}
|
|
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
|
|
index 42e7e43b8fcd9..b1e2b697b21bd 100644
|
|
--- a/drivers/clocksource/dw_apb_timer_of.c
|
|
+++ b/drivers/clocksource/dw_apb_timer_of.c
|
|
@@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np,
|
|
return 0;
|
|
|
|
timer_clk = of_clk_get_by_name(np, "timer");
|
|
- if (IS_ERR(timer_clk))
|
|
- return PTR_ERR(timer_clk);
|
|
+ if (IS_ERR(timer_clk)) {
|
|
+ ret = PTR_ERR(timer_clk);
|
|
+ goto out_pclk_disable;
|
|
+ }
|
|
|
|
ret = clk_prepare_enable(timer_clk);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto out_timer_clk_put;
|
|
|
|
*rate = clk_get_rate(timer_clk);
|
|
- if (!(*rate))
|
|
- return -EINVAL;
|
|
+ if (!(*rate)) {
|
|
+ ret = -EINVAL;
|
|
+ goto out_timer_clk_disable;
|
|
+ }
|
|
|
|
return 0;
|
|
+
|
|
+out_timer_clk_disable:
|
|
+ clk_disable_unprepare(timer_clk);
|
|
+out_timer_clk_put:
|
|
+ clk_put(timer_clk);
|
|
+out_pclk_disable:
|
|
+ if (!IS_ERR(pclk)) {
|
|
+ clk_disable_unprepare(pclk);
|
|
+ clk_put(pclk);
|
|
+ }
|
|
+ iounmap(*base);
|
|
+ return ret;
|
|
}
|
|
|
|
static int __init add_clockevent(struct device_node *event_timer)
|
|
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
|
|
index 191966dc8d023..29c5e83500d33 100644
|
|
--- a/drivers/cpuidle/cpuidle-tegra.c
|
|
+++ b/drivers/cpuidle/cpuidle-tegra.c
|
|
@@ -135,13 +135,13 @@ static int tegra_cpuidle_c7_enter(void)
|
|
{
|
|
int err;
|
|
|
|
- if (tegra_cpuidle_using_firmware()) {
|
|
- err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
|
|
- if (err)
|
|
- return err;
|
|
+ err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
|
|
+ if (err && err != -ENOSYS)
|
|
+ return err;
|
|
|
|
- return call_firmware_op(do_idle, 0);
|
|
- }
|
|
+ err = call_firmware_op(do_idle, 0);
|
|
+ if (err != -ENOSYS)
|
|
+ return err;
|
|
|
|
return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend);
|
|
}
|
|
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
|
|
index 158422ff5695c..00194d1d9ae69 100644
|
|
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
|
|
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
|
|
@@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
|
|
if (err)
|
|
goto error_alg;
|
|
|
|
- err = pm_runtime_get_sync(ce->dev);
|
|
+ err = pm_runtime_resume_and_get(ce->dev);
|
|
if (err < 0)
|
|
goto error_alg;
|
|
|
|
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
|
|
index ed2a69f82e1c1..7c355bc2fb066 100644
|
|
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
|
|
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
|
|
@@ -351,7 +351,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
|
|
op->enginectx.op.prepare_request = NULL;
|
|
op->enginectx.op.unprepare_request = NULL;
|
|
|
|
- err = pm_runtime_get_sync(op->ss->dev);
|
|
+ err = pm_runtime_resume_and_get(op->ss->dev);
|
|
if (err < 0) {
|
|
dev_err(op->ss->dev, "pm error %d\n", err);
|
|
goto error_pm;
|
|
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
|
|
index e0ddc684798dc..80e89066dbd1a 100644
|
|
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
|
|
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
|
|
@@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
|
|
if (err)
|
|
goto error_alg;
|
|
|
|
- err = pm_runtime_get_sync(ss->dev);
|
|
+ err = pm_runtime_resume_and_get(ss->dev);
|
|
if (err < 0)
|
|
goto error_alg;
|
|
|
|
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
|
|
index 2eaa516b32311..8adcbb3271267 100644
|
|
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
|
|
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
|
|
@@ -546,7 +546,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
|
|
crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
|
|
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
|
|
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
|
|
- dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
|
|
+ pr_err("get error skcipher iv size!\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
|
|
index a45bdcf3026df..0dd4c6b157de9 100644
|
|
--- a/drivers/crypto/omap-aes.c
|
|
+++ b/drivers/crypto/omap-aes.c
|
|
@@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
|
|
dd->err = 0;
|
|
}
|
|
|
|
- err = pm_runtime_get_sync(dd->dev);
|
|
+ err = pm_runtime_resume_and_get(dd->dev);
|
|
if (err < 0) {
|
|
- pm_runtime_put_noidle(dd->dev);
|
|
dev_err(dd->dev, "failed to get sync: %d\n", err);
|
|
return err;
|
|
}
|
|
@@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
|
|
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
|
|
|
|
pm_runtime_enable(dev);
|
|
- err = pm_runtime_get_sync(dev);
|
|
+ err = pm_runtime_resume_and_get(dev);
|
|
if (err < 0) {
|
|
dev_err(dev, "%s: failed to get_sync(%d)\n",
|
|
__func__, err);
|
|
@@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
|
|
|
|
static int omap_aes_resume(struct device *dev)
|
|
{
|
|
- pm_runtime_get_sync(dev);
|
|
+ pm_runtime_resume_and_get(dev);
|
|
return 0;
|
|
}
|
|
#endif
|
|
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
|
|
index 31c7a206a6296..362c2d18b2925 100644
|
|
--- a/drivers/crypto/qat/qat_common/qat_algs.c
|
|
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
|
|
@@ -718,7 +718,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
|
struct qat_alg_buf_list *bufl;
|
|
struct qat_alg_buf_list *buflout = NULL;
|
|
dma_addr_t blp;
|
|
- dma_addr_t bloutp = 0;
|
|
+ dma_addr_t bloutp;
|
|
struct scatterlist *sg;
|
|
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
|
|
|
|
@@ -730,6 +730,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
|
if (unlikely(!bufl))
|
|
return -ENOMEM;
|
|
|
|
+ for_each_sg(sgl, sg, n, i)
|
|
+ bufl->bufers[i].addr = DMA_MAPPING_ERROR;
|
|
+
|
|
blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
|
|
if (unlikely(dma_mapping_error(dev, blp)))
|
|
goto err_in;
|
|
@@ -763,10 +766,14 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
|
dev_to_node(&GET_DEV(inst->accel_dev)));
|
|
if (unlikely(!buflout))
|
|
goto err_in;
|
|
+
|
|
+ bufers = buflout->bufers;
|
|
+ for_each_sg(sglout, sg, n, i)
|
|
+ bufers[i].addr = DMA_MAPPING_ERROR;
|
|
+
|
|
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
|
|
if (unlikely(dma_mapping_error(dev, bloutp)))
|
|
goto err_out;
|
|
- bufers = buflout->bufers;
|
|
for_each_sg(sglout, sg, n, i) {
|
|
int y = sg_nctr;
|
|
|
|
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
|
|
index f300b0a5958a5..d7b1628fb4848 100644
|
|
--- a/drivers/crypto/sa2ul.c
|
|
+++ b/drivers/crypto/sa2ul.c
|
|
@@ -2350,7 +2350,7 @@ static int sa_ul_probe(struct platform_device *pdev)
|
|
dev_set_drvdata(sa_k3_dev, dev_data);
|
|
|
|
pm_runtime_enable(dev);
|
|
- ret = pm_runtime_get_sync(dev);
|
|
+ ret = pm_runtime_resume_and_get(dev);
|
|
if (ret < 0) {
|
|
dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
|
|
ret);
|
|
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
|
|
index 2670c30332fad..7999b26a16ed0 100644
|
|
--- a/drivers/crypto/stm32/stm32-cryp.c
|
|
+++ b/drivers/crypto/stm32/stm32-cryp.c
|
|
@@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
|
|
int ret;
|
|
u32 cfg, hw_mode;
|
|
|
|
- pm_runtime_get_sync(cryp->dev);
|
|
+ pm_runtime_resume_and_get(cryp->dev);
|
|
|
|
/* Disable interrupt */
|
|
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
|
|
@@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
|
|
if (!cryp)
|
|
return -ENODEV;
|
|
|
|
- ret = pm_runtime_get_sync(cryp->dev);
|
|
+ ret = pm_runtime_resume_and_get(cryp->dev);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
|
|
index 7ac0573ef6630..389de9e3302d5 100644
|
|
--- a/drivers/crypto/stm32/stm32-hash.c
|
|
+++ b/drivers/crypto/stm32/stm32-hash.c
|
|
@@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
|
|
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
|
|
struct stm32_hash_request_ctx *rctx)
|
|
{
|
|
- pm_runtime_get_sync(hdev->dev);
|
|
+ pm_runtime_resume_and_get(hdev->dev);
|
|
|
|
if (!(HASH_FLAGS_INIT & hdev->flags)) {
|
|
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
|
|
@@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
|
|
u32 *preg;
|
|
unsigned int i;
|
|
|
|
- pm_runtime_get_sync(hdev->dev);
|
|
+ pm_runtime_resume_and_get(hdev->dev);
|
|
|
|
while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
|
|
cpu_relax();
|
|
@@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
|
|
|
|
preg = rctx->hw_context;
|
|
|
|
- pm_runtime_get_sync(hdev->dev);
|
|
+ pm_runtime_resume_and_get(hdev->dev);
|
|
|
|
stm32_hash_write(hdev, HASH_IMR, *preg++);
|
|
stm32_hash_write(hdev, HASH_STR, *preg++);
|
|
@@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
|
|
if (!hdev)
|
|
return -ENODEV;
|
|
|
|
- ret = pm_runtime_get_sync(hdev->dev);
|
|
+ ret = pm_runtime_resume_and_get(hdev->dev);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
|
|
index aae82db542a5e..76aacbac5869d 100644
|
|
--- a/drivers/extcon/extcon-arizona.c
|
|
+++ b/drivers/extcon/extcon-arizona.c
|
|
@@ -601,7 +601,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
|
|
struct arizona *arizona = info->arizona;
|
|
int id_gpio = arizona->pdata.hpdet_id_gpio;
|
|
unsigned int report = EXTCON_JACK_HEADPHONE;
|
|
- int ret, reading;
|
|
+ int ret, reading, state;
|
|
bool mic = false;
|
|
|
|
mutex_lock(&info->lock);
|
|
@@ -614,12 +614,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
|
|
}
|
|
|
|
/* If the cable was removed while measuring ignore the result */
|
|
- ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
|
|
- if (ret < 0) {
|
|
- dev_err(arizona->dev, "Failed to check cable state: %d\n",
|
|
- ret);
|
|
+ state = extcon_get_state(info->edev, EXTCON_MECHANICAL);
|
|
+ if (state < 0) {
|
|
+ dev_err(arizona->dev, "Failed to check cable state: %d\n", state);
|
|
goto out;
|
|
- } else if (!ret) {
|
|
+ } else if (!state) {
|
|
dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
|
|
goto done;
|
|
}
|
|
@@ -667,7 +666,7 @@ done:
|
|
gpio_set_value_cansleep(id_gpio, 0);
|
|
|
|
/* If we have a mic then reenable MICDET */
|
|
- if (mic || info->mic)
|
|
+ if (state && (mic || info->mic))
|
|
arizona_start_mic(info);
|
|
|
|
if (info->hpdet_active) {
|
|
@@ -675,7 +674,9 @@ done:
|
|
info->hpdet_active = false;
|
|
}
|
|
|
|
- info->hpdet_done = true;
|
|
+ /* Do not set hp_det done when the cable has been unplugged */
|
|
+ if (state)
|
|
+ info->hpdet_done = true;
|
|
|
|
out:
|
|
mutex_unlock(&info->lock);
|
|
@@ -1759,25 +1760,6 @@ static int arizona_extcon_remove(struct platform_device *pdev)
|
|
bool change;
|
|
int ret;
|
|
|
|
- ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
|
|
- ARIZONA_MICD_ENA, 0,
|
|
- &change);
|
|
- if (ret < 0) {
|
|
- dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
|
|
- ret);
|
|
- } else if (change) {
|
|
- regulator_disable(info->micvdd);
|
|
- pm_runtime_put(info->dev);
|
|
- }
|
|
-
|
|
- gpiod_put(info->micd_pol_gpio);
|
|
-
|
|
- pm_runtime_disable(&pdev->dev);
|
|
-
|
|
- regmap_update_bits(arizona->regmap,
|
|
- ARIZONA_MICD_CLAMP_CONTROL,
|
|
- ARIZONA_MICD_CLAMP_MODE_MASK, 0);
|
|
-
|
|
if (info->micd_clamp) {
|
|
jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
|
|
jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
|
|
@@ -1793,10 +1775,31 @@ static int arizona_extcon_remove(struct platform_device *pdev)
|
|
arizona_free_irq(arizona, jack_irq_rise, info);
|
|
arizona_free_irq(arizona, jack_irq_fall, info);
|
|
cancel_delayed_work_sync(&info->hpdet_work);
|
|
+ cancel_delayed_work_sync(&info->micd_detect_work);
|
|
+ cancel_delayed_work_sync(&info->micd_timeout_work);
|
|
+
|
|
+ ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
|
|
+ ARIZONA_MICD_ENA, 0,
|
|
+ &change);
|
|
+ if (ret < 0) {
|
|
+ dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
|
|
+ ret);
|
|
+ } else if (change) {
|
|
+ regulator_disable(info->micvdd);
|
|
+ pm_runtime_put(info->dev);
|
|
+ }
|
|
+
|
|
+ regmap_update_bits(arizona->regmap,
|
|
+ ARIZONA_MICD_CLAMP_CONTROL,
|
|
+ ARIZONA_MICD_CLAMP_MODE_MASK, 0);
|
|
regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
|
|
ARIZONA_JD1_ENA, 0);
|
|
arizona_clk32k_disable(arizona);
|
|
|
|
+ gpiod_put(info->micd_pol_gpio);
|
|
+
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
|
|
index 8a94388e38b33..a2ae9c3b95793 100644
|
|
--- a/drivers/firmware/efi/libstub/Makefile
|
|
+++ b/drivers/firmware/efi/libstub/Makefile
|
|
@@ -13,7 +13,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
|
|
-Wno-pointer-sign \
|
|
$(call cc-disable-warning, address-of-packed-member) \
|
|
$(call cc-disable-warning, gnu) \
|
|
- -fno-asynchronous-unwind-tables
|
|
+ -fno-asynchronous-unwind-tables \
|
|
+ $(CLANG_FLAGS)
|
|
|
|
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
|
|
# disable the stackleak plugin
|
|
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
|
|
index a2203d03c9e2b..bc108ee8e9eb4 100644
|
|
--- a/drivers/fpga/dfl-pci.c
|
|
+++ b/drivers/fpga/dfl-pci.c
|
|
@@ -61,14 +61,16 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
|
|
}
|
|
|
|
/* PCI Device ID */
|
|
-#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
|
|
-#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
|
|
-#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
|
|
-#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
|
|
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
|
|
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
|
|
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
|
|
+#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
|
|
+#define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B
|
|
/* VF Device */
|
|
-#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
|
|
-#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
|
|
-#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
|
|
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
|
|
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
|
|
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
|
|
+#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF 0x0B2C
|
|
|
|
static struct pci_device_id cci_pcie_id_tbl[] = {
|
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
|
|
@@ -78,6 +80,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
|
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
|
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
|
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
|
|
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
|
|
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
|
|
{0,}
|
|
};
|
|
MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
index eacfca7762491..ccf30782e4910 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
@@ -3579,6 +3579,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|
{
|
|
dev_info(adev->dev, "amdgpu: finishing device.\n");
|
|
flush_delayed_work(&adev->delayed_init_work);
|
|
+ ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
adev->shutdown = true;
|
|
|
|
kfree(adev->pci_state);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
|
|
index d56f4023ebb31..7e8e46c39dbd3 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
|
|
@@ -533,6 +533,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
|
|
|
if (!ring || !ring->fence_drv.initialized)
|
|
continue;
|
|
+ if (!ring->no_scheduler)
|
|
+ drm_sched_fini(&ring->sched);
|
|
r = amdgpu_fence_wait_empty(ring);
|
|
if (r) {
|
|
/* no need to trigger GPU reset as we are unloading */
|
|
@@ -541,8 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
|
if (ring->fence_drv.irq_src)
|
|
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
|
ring->fence_drv.irq_type);
|
|
- if (!ring->no_scheduler)
|
|
- drm_sched_fini(&ring->sched);
|
|
+
|
|
del_timer_sync(&ring->fence_drv.fallback_timer);
|
|
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
|
|
dma_fence_put(ring->fence_drv.fences[j]);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
|
|
index bea57e8e793f7..b535f7c6c61bb 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
|
|
@@ -534,7 +534,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
|
|
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
|
|
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
|
|
|
|
- if (!src)
|
|
+ if (!src || !src->funcs || !src->funcs->set)
|
|
continue;
|
|
for (k = 0; k < src->num_types; k++)
|
|
amdgpu_irq_update(adev, src, k);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
|
|
index 8b87991a0470a..a884ec5bce3e6 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
|
|
@@ -943,7 +943,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
|
|
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
|
|
|
/* double check that we don't free the table twice */
|
|
- if (!ttm->sg->sgl)
|
|
+ if (!ttm->sg || !ttm->sg->sgl)
|
|
return;
|
|
|
|
/* unmap the pages mapped to the device */
|
|
@@ -1163,13 +1163,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
|
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
|
int r;
|
|
|
|
- if (!gtt->bound)
|
|
- return;
|
|
-
|
|
/* if the pages have userptr pinning then clear that first */
|
|
if (gtt->userptr)
|
|
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
|
|
|
|
+ if (!gtt->bound)
|
|
+ return;
|
|
+
|
|
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
|
|
return;
|
|
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
|
|
index 8b989670ed663..431ae134a163b 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
|
|
@@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|
if ((adev->asic_type == CHIP_POLARIS10 ||
|
|
adev->asic_type == CHIP_POLARIS11) &&
|
|
(adev->uvd.fw_version < FW_1_66_16))
|
|
- DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
|
|
+ DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
|
|
version_major, version_minor);
|
|
} else {
|
|
unsigned int enc_major, enc_minor, dec_minor;
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
|
|
index 541ef6be390f0..6ef374cb3ee2a 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
|
|
@@ -470,15 +470,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
|
|
}
|
|
|
|
|
|
+/*
|
|
+ * NOTE psp_xgmi_node_info.num_hops layout is as follows:
|
|
+ * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
|
|
+ * num_hops[5:3] = reserved
|
|
+ * num_hops[2:0] = number of hops
|
|
+ */
|
|
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
|
|
struct amdgpu_device *peer_adev)
|
|
{
|
|
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
|
|
+ uint8_t num_hops_mask = 0x7;
|
|
int i;
|
|
|
|
for (i = 0 ; i < top->num_nodes; ++i)
|
|
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
|
|
- return top->nodes[i].num_hops;
|
|
+ return top->nodes[i].num_hops & num_hops_mask;
|
|
return -EINVAL;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
|
|
index 511712c2e382d..673d5e34f213c 100644
|
|
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
|
|
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
|
|
@@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
|
|
|
|
return single_open(file, show, NULL);
|
|
}
|
|
+static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
|
|
+{
|
|
+ seq_printf(m, "echo gpu_id > hang_hws\n");
|
|
+ return 0;
|
|
+}
|
|
|
|
static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
|
|
const char __user *user_buf, size_t size, loff_t *ppos)
|
|
@@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
|
|
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
|
|
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
|
|
debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
|
|
- NULL, &kfd_debugfs_hang_hws_fops);
|
|
+ kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
|
|
}
|
|
|
|
void kfd_debugfs_fini(void)
|
|
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
|
|
index 4598a9a581251..a4266c4bca135 100644
|
|
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
|
|
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
|
|
@@ -1128,6 +1128,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
|
|
|
|
static int initialize_cpsch(struct device_queue_manager *dqm)
|
|
{
|
|
+ uint64_t num_sdma_queues;
|
|
+ uint64_t num_xgmi_sdma_queues;
|
|
+
|
|
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
|
|
|
|
mutex_init(&dqm->lock_hidden);
|
|
@@ -1136,8 +1139,18 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
|
|
dqm->active_cp_queue_count = 0;
|
|
dqm->gws_queue_count = 0;
|
|
dqm->active_runlist = false;
|
|
- dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
|
|
- dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
|
|
+
|
|
+ num_sdma_queues = get_num_sdma_queues(dqm);
|
|
+ if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
|
|
+ dqm->sdma_bitmap = ULLONG_MAX;
|
|
+ else
|
|
+ dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
|
|
+
|
|
+ num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
|
|
+ if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
|
|
+ dqm->xgmi_sdma_bitmap = ULLONG_MAX;
|
|
+ else
|
|
+ dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
|
|
|
|
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
index 62a637c03f60f..fa4786a8296f0 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
@@ -5735,6 +5735,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|
|
|
} while (stream == NULL && requested_bpc >= 6);
|
|
|
|
+ if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
|
|
+ DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
|
|
+
|
|
+ aconnector->force_yuv420_output = true;
|
|
+ stream = create_validate_stream_for_sink(aconnector, drm_mode,
|
|
+ dm_state, old_stream);
|
|
+ aconnector->force_yuv420_output = false;
|
|
+ }
|
|
+
|
|
return stream;
|
|
}
|
|
|
|
@@ -7250,10 +7259,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
|
|
int x, y;
|
|
int xorigin = 0, yorigin = 0;
|
|
|
|
- position->enable = false;
|
|
- position->x = 0;
|
|
- position->y = 0;
|
|
-
|
|
if (!crtc || !plane->state->fb)
|
|
return 0;
|
|
|
|
@@ -7300,7 +7305,7 @@ static void handle_cursor_update(struct drm_plane *plane,
|
|
struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
|
|
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
|
uint64_t address = afb ? afb->address : 0;
|
|
- struct dc_cursor_position position;
|
|
+ struct dc_cursor_position position = {0};
|
|
struct dc_cursor_attributes attributes;
|
|
int ret;
|
|
|
|
@@ -9216,7 +9221,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|
}
|
|
|
|
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
|
- if (adev->asic_type >= CHIP_NAVI10) {
|
|
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
|
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
|
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
|
|
ret = add_affected_mst_dsc_crtcs(state, crtc);
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
|
|
index 1182dafcef022..9dc034b4548a5 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
|
|
@@ -68,18 +68,6 @@ struct common_irq_params {
|
|
enum dc_irq_source irq_src;
|
|
};
|
|
|
|
-/**
|
|
- * struct irq_list_head - Linked-list for low context IRQ handlers.
|
|
- *
|
|
- * @head: The list_head within &struct handler_data
|
|
- * @work: A work_struct containing the deferred handler work
|
|
- */
|
|
-struct irq_list_head {
|
|
- struct list_head head;
|
|
- /* In case this interrupt needs post-processing, 'work' will be queued*/
|
|
- struct work_struct work;
|
|
-};
|
|
-
|
|
/**
|
|
* struct dm_compressor_info - Buffer info used by frame buffer compression
|
|
* @cpu_addr: MMIO cpu addr
|
|
@@ -270,7 +258,7 @@ struct amdgpu_display_manager {
|
|
* Note that handlers are called in the same order as they were
|
|
* registered (FIFO).
|
|
*/
|
|
- struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
|
|
+ struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
|
|
|
|
/**
|
|
* @irq_handler_list_high_tab:
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
|
index 11459fb09a372..a559ced7c2e09 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
|
@@ -150,7 +150,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
|
|
*
|
|
* --- to get dp configuration
|
|
*
|
|
- * cat link_settings
|
|
+ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
|
|
*
|
|
* It will list current, verified, reported, preferred dp configuration.
|
|
* current -- for current video mode
|
|
@@ -163,7 +163,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
|
|
* echo <lane_count> <link_rate> > link_settings
|
|
*
|
|
* for example, to force to 2 lane, 2.7GHz,
|
|
- * echo 4 0xa > link_settings
|
|
+ * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings
|
|
*
|
|
* spread_spectrum could not be changed dynamically.
|
|
*
|
|
@@ -171,7 +171,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
|
|
* done. please check link settings after force operation to see if HW get
|
|
* programming.
|
|
*
|
|
- * cat link_settings
|
|
+ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
|
|
*
|
|
* check current and preferred settings.
|
|
*
|
|
@@ -255,7 +255,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
|
|
int max_param_num = 2;
|
|
uint8_t param_nums = 0;
|
|
long param[2];
|
|
- bool valid_input = false;
|
|
+ bool valid_input = true;
|
|
|
|
if (size == 0)
|
|
return -EINVAL;
|
|
@@ -282,9 +282,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
|
|
case LANE_COUNT_ONE:
|
|
case LANE_COUNT_TWO:
|
|
case LANE_COUNT_FOUR:
|
|
- valid_input = true;
|
|
break;
|
|
default:
|
|
+ valid_input = false;
|
|
break;
|
|
}
|
|
|
|
@@ -294,9 +294,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
|
|
case LINK_RATE_RBR2:
|
|
case LINK_RATE_HIGH2:
|
|
case LINK_RATE_HIGH3:
|
|
- valid_input = true;
|
|
break;
|
|
default:
|
|
+ valid_input = false;
|
|
break;
|
|
}
|
|
|
|
@@ -310,10 +310,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
|
|
* spread spectrum will not be changed
|
|
*/
|
|
prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
|
|
+ prefer_link_settings.use_link_rate_set = false;
|
|
prefer_link_settings.lane_count = param[0];
|
|
prefer_link_settings.link_rate = param[1];
|
|
|
|
- dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
|
|
+ dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
|
|
|
|
kfree(wr_buf);
|
|
return size;
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
|
|
index 26ed70e5538ae..6cd76c0eebf90 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
|
|
@@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data {
|
|
struct amdgpu_display_manager *dm;
|
|
/* DAL irq source which registered for this interrupt. */
|
|
enum dc_irq_source irq_source;
|
|
+ struct work_struct work;
|
|
};
|
|
|
|
#define DM_IRQ_TABLE_LOCK(adev, flags) \
|
|
@@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
|
|
*/
|
|
static void dm_irq_work_func(struct work_struct *work)
|
|
{
|
|
- struct irq_list_head *irq_list_head =
|
|
- container_of(work, struct irq_list_head, work);
|
|
- struct list_head *handler_list = &irq_list_head->head;
|
|
- struct amdgpu_dm_irq_handler_data *handler_data;
|
|
-
|
|
- list_for_each_entry(handler_data, handler_list, list) {
|
|
- DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
|
|
- handler_data->irq_source);
|
|
+ struct amdgpu_dm_irq_handler_data *handler_data =
|
|
+ container_of(work, struct amdgpu_dm_irq_handler_data, work);
|
|
|
|
- DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
|
|
- handler_data->irq_source);
|
|
-
|
|
- handler_data->handler(handler_data->handler_arg);
|
|
- }
|
|
+ handler_data->handler(handler_data->handler_arg);
|
|
|
|
/* Call a DAL subcomponent which registered for interrupt notification
|
|
* at INTERRUPT_LOW_IRQ_CONTEXT.
|
|
@@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
|
|
break;
|
|
case INTERRUPT_LOW_IRQ_CONTEXT:
|
|
default:
|
|
- hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
|
|
+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
|
|
break;
|
|
}
|
|
|
|
@@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
|
|
break;
|
|
case INTERRUPT_LOW_IRQ_CONTEXT:
|
|
default:
|
|
- hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
|
|
+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
|
|
+ INIT_WORK(&handler_data->work, dm_irq_work_func);
|
|
break;
|
|
}
|
|
|
|
@@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
|
|
int amdgpu_dm_irq_init(struct amdgpu_device *adev)
|
|
{
|
|
int src;
|
|
- struct irq_list_head *lh;
|
|
+ struct list_head *lh;
|
|
|
|
DRM_DEBUG_KMS("DM_IRQ\n");
|
|
|
|
@@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
|
|
for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
|
|
/* low context handler list init */
|
|
lh = &adev->dm.irq_handler_list_low_tab[src];
|
|
- INIT_LIST_HEAD(&lh->head);
|
|
- INIT_WORK(&lh->work, dm_irq_work_func);
|
|
-
|
|
+ INIT_LIST_HEAD(lh);
|
|
/* high context handler init */
|
|
INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
|
|
}
|
|
@@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
|
|
void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
|
|
{
|
|
int src;
|
|
- struct irq_list_head *lh;
|
|
+ struct list_head *lh;
|
|
+ struct list_head *entry, *tmp;
|
|
+ struct amdgpu_dm_irq_handler_data *handler;
|
|
unsigned long irq_table_flags;
|
|
+
|
|
DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
|
|
for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
|
|
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
|
@@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
|
|
* (because no code can schedule a new one). */
|
|
lh = &adev->dm.irq_handler_list_low_tab[src];
|
|
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
|
- flush_work(&lh->work);
|
|
+
|
|
+ if (!list_empty(lh)) {
|
|
+ list_for_each_safe(entry, tmp, lh) {
|
|
+ handler = list_entry(
|
|
+ entry,
|
|
+ struct amdgpu_dm_irq_handler_data,
|
|
+ list);
|
|
+ flush_work(&handler->work);
|
|
+ }
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
|
|
struct list_head *hnd_list_h;
|
|
struct list_head *hnd_list_l;
|
|
unsigned long irq_table_flags;
|
|
+ struct list_head *entry, *tmp;
|
|
+ struct amdgpu_dm_irq_handler_data *handler;
|
|
|
|
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
|
|
|
@@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
|
|
* will be disabled from manage_dm_interrupts on disable CRTC.
|
|
*/
|
|
for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
|
|
- hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
|
|
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
|
|
hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
|
|
if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
|
|
dc_interrupt_set(adev->dm.dc, src, false);
|
|
|
|
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
|
- flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
|
|
|
|
+ if (!list_empty(hnd_list_l)) {
|
|
+ list_for_each_safe (entry, tmp, hnd_list_l) {
|
|
+ handler = list_entry(
|
|
+ entry,
|
|
+ struct amdgpu_dm_irq_handler_data,
|
|
+ list);
|
|
+ flush_work(&handler->work);
|
|
+ }
|
|
+ }
|
|
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
|
}
|
|
|
|
@@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
|
|
|
|
/* re-enable short pulse interrupts HW interrupt */
|
|
for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
|
|
- hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
|
|
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
|
|
hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
|
|
if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
|
|
dc_interrupt_set(adev->dm.dc, src, true);
|
|
@@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
|
|
* will be enabled from manage_dm_interrupts on enable CRTC.
|
|
*/
|
|
for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
|
|
- hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
|
|
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
|
|
hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
|
|
if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
|
|
dc_interrupt_set(adev->dm.dc, src, true);
|
|
@@ -500,22 +512,53 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
|
|
static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
|
|
enum dc_irq_source irq_source)
|
|
{
|
|
- unsigned long irq_table_flags;
|
|
- struct work_struct *work = NULL;
|
|
+ struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
|
|
+ struct amdgpu_dm_irq_handler_data *handler_data;
|
|
+ bool work_queued = false;
|
|
|
|
- DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
|
+ if (list_empty(handler_list))
|
|
+ return;
|
|
+
|
|
+ list_for_each_entry (handler_data, handler_list, list) {
|
|
+ if (!queue_work(system_highpri_wq, &handler_data->work)) {
|
|
+ continue;
|
|
+ } else {
|
|
+ work_queued = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
|
|
- if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
|
|
- work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
|
|
+ if (!work_queued) {
|
|
+ struct amdgpu_dm_irq_handler_data *handler_data_add;
|
|
+ /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
|
|
+ handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
|
|
|
|
- DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
|
+ /*allocate a new amdgpu_dm_irq_handler_data*/
|
|
+ handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
|
|
+ if (!handler_data_add) {
|
|
+ DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
|
|
+ return;
|
|
+ }
|
|
|
|
- if (work) {
|
|
- if (!schedule_work(work))
|
|
- DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
|
|
- irq_source);
|
|
- }
|
|
+ /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
|
|
+ handler_data_add->handler = handler_data->handler;
|
|
+ handler_data_add->handler_arg = handler_data->handler_arg;
|
|
+ handler_data_add->dm = handler_data->dm;
|
|
+ handler_data_add->irq_source = irq_source;
|
|
|
|
+ list_add_tail(&handler_data_add->list, handler_list);
|
|
+
|
|
+ INIT_WORK(&handler_data_add->work, dm_irq_work_func);
|
|
+
|
|
+ if (queue_work(system_highpri_wq, &handler_data_add->work))
|
|
+ DRM_DEBUG("Queued work for handling interrupt from "
|
|
+ "display for IRQ source %d\n",
|
|
+ irq_source);
|
|
+ else
|
|
+ DRM_ERROR("Failed to queue work for handling interrupt "
|
|
+ "from display for IRQ source %d\n",
|
|
+ irq_source);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
|
|
index 995ffbbf64e7c..1ee27f2f28f1d 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
|
|
@@ -217,6 +217,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
|
|
if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
|
|
dcn3_clk_mgr_destroy(clk_mgr);
|
|
}
|
|
+ if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
|
|
+ dcn3_clk_mgr_destroy(clk_mgr);
|
|
+ }
|
|
break;
|
|
|
|
case FAMILY_VGH:
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
|
|
index ab98c259ef695..cbe94cf489c7f 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
|
|
@@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
|
|
bool force_reset = false;
|
|
bool update_uclk = false;
|
|
bool p_state_change_support;
|
|
+ int total_plane_count;
|
|
|
|
if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
|
|
return;
|
|
@@ -292,7 +293,8 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
|
|
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
|
|
|
|
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
|
|
- p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
|
|
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
|
|
+ p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
|
|
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
|
|
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
|
|
index 58eb0d69873a6..ccac86347315d 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
|
|
@@ -2380,7 +2380,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
|
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
|
|
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
|
|
|
|
- dc->hwss.optimize_bandwidth(dc, dc->current_state);
|
|
+ dc->optimized_required = true;
|
|
+
|
|
} else {
|
|
if (dc->optimize_seamless_boot_streams == 0)
|
|
dc->hwss.prepare_bandwidth(dc, dc->current_state);
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
|
|
index 382465862f297..f72f02e016aea 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
|
|
@@ -99,7 +99,6 @@ struct dce110_aux_registers {
|
|
AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
|
|
AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
|
AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
|
|
- AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
|
AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
|
|
AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
|
|
AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
|
|
index 17e84f34ceba1..e0b195cad9ce8 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
|
|
@@ -81,13 +81,18 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
|
|
{
|
|
struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
|
|
uint32_t raw_state;
|
|
+ enum dmub_status status = DMUB_STATUS_INVALID;
|
|
|
|
// Send gpint command and wait for ack
|
|
- dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
|
|
-
|
|
- dmub_srv_get_gpint_response(srv, &raw_state);
|
|
-
|
|
- *state = convert_psr_state(raw_state);
|
|
+ status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
|
|
+
|
|
+ if (status == DMUB_STATUS_OK) {
|
|
+ // GPINT was executed, get response
|
|
+ dmub_srv_get_gpint_response(srv, &raw_state);
|
|
+ *state = convert_psr_state(raw_state);
|
|
+ } else
|
|
+ // Return invalid state when GPINT times out
|
|
+ *state = 0xFF;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
|
|
index 3e6f76096119c..a7598356f37d2 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
|
|
@@ -143,16 +143,18 @@ static void mpc3_power_on_ogam_lut(
|
|
{
|
|
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
|
|
|
|
- if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
|
|
- // Force power on
|
|
- REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
|
|
- // Wait for confirmation when powering on
|
|
- if (power_on)
|
|
- REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
|
|
- } else {
|
|
- REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
|
|
- MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
|
|
- }
|
|
+ /*
|
|
+ * Powering on: force memory active so the LUT can be updated.
|
|
+ * Powering off: allow entering memory low power mode
|
|
+ *
|
|
+ * Memory low power mode is controlled during MPC OGAM LUT init.
|
|
+ */
|
|
+ REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id],
|
|
+ MPCC_OGAM_MEM_PWR_DIS, power_on != 0);
|
|
+
|
|
+ /* Wait for memory to be powered on - we won't be able to write to it otherwise. */
|
|
+ if (power_on)
|
|
+ REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
|
|
}
|
|
|
|
static void mpc3_configure_ogam_lut(
|
|
@@ -1427,7 +1429,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
|
|
.acquire_rmu = mpcc3_acquire_rmu,
|
|
.program_3dlut = mpc3_program_3dlut,
|
|
.release_rmu = mpcc3_release_rmu,
|
|
- .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
|
|
+ .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
|
|
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
|
|
|
|
};
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
|
|
index 7ec8936346b27..f90881f4458f9 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
|
|
@@ -181,7 +181,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
|
|
},
|
|
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
|
|
.num_states = 1,
|
|
- .sr_exit_time_us = 12,
|
|
+ .sr_exit_time_us = 15.5,
|
|
.sr_enter_plus_exit_time_us = 20,
|
|
.urgent_latency_us = 4.0,
|
|
.urgent_latency_pixel_data_only_us = 4.0,
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
|
|
index 45f028986a8db..b3f0476899d32 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
|
|
@@ -3437,6 +3437,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
|
mode_lib->vba.DCCEnabledInAnyPlane = true;
|
|
}
|
|
}
|
|
+ mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
|
|
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
|
|
locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
|
|
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
|
|
index 80170f9721ce9..1bcda7eba4a6f 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
|
|
@@ -3510,6 +3510,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
|
|
mode_lib->vba.DCCEnabledInAnyPlane = true;
|
|
}
|
|
}
|
|
+ mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
|
|
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
|
|
locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
|
|
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
|
|
index 72423dc425dc0..799bae229e679 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
|
|
@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
|
|
if (surf_linear) {
|
|
log2_swath_height_l = 0;
|
|
log2_swath_height_c = 0;
|
|
- } else if (!surf_vert) {
|
|
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
|
|
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
|
|
} else {
|
|
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
|
|
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
|
|
+ unsigned int swath_height_l;
|
|
+ unsigned int swath_height_c;
|
|
+
|
|
+ if (!surf_vert) {
|
|
+ swath_height_l = rq_param->misc.rq_l.blk256_height;
|
|
+ swath_height_c = rq_param->misc.rq_c.blk256_height;
|
|
+ } else {
|
|
+ swath_height_l = rq_param->misc.rq_l.blk256_width;
|
|
+ swath_height_c = rq_param->misc.rq_c.blk256_width;
|
|
+ }
|
|
+
|
|
+ if (swath_height_l > 0)
|
|
+ log2_swath_height_l = dml_log2(swath_height_l);
|
|
+
|
|
+ if (req128_l && log2_swath_height_l > 0)
|
|
+ log2_swath_height_l -= 1;
|
|
+
|
|
+ if (swath_height_c > 0)
|
|
+ log2_swath_height_c = dml_log2(swath_height_c);
|
|
+
|
|
+ if (req128_c && log2_swath_height_c > 0)
|
|
+ log2_swath_height_c -= 1;
|
|
}
|
|
+
|
|
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
|
|
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
|
|
index 9c78446c3a9d8..6a6d5970d1d58 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
|
|
@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
|
|
if (surf_linear) {
|
|
log2_swath_height_l = 0;
|
|
log2_swath_height_c = 0;
|
|
- } else if (!surf_vert) {
|
|
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
|
|
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
|
|
} else {
|
|
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
|
|
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
|
|
+ unsigned int swath_height_l;
|
|
+ unsigned int swath_height_c;
|
|
+
|
|
+ if (!surf_vert) {
|
|
+ swath_height_l = rq_param->misc.rq_l.blk256_height;
|
|
+ swath_height_c = rq_param->misc.rq_c.blk256_height;
|
|
+ } else {
|
|
+ swath_height_l = rq_param->misc.rq_l.blk256_width;
|
|
+ swath_height_c = rq_param->misc.rq_c.blk256_width;
|
|
+ }
|
|
+
|
|
+ if (swath_height_l > 0)
|
|
+ log2_swath_height_l = dml_log2(swath_height_l);
|
|
+
|
|
+ if (req128_l && log2_swath_height_l > 0)
|
|
+ log2_swath_height_l -= 1;
|
|
+
|
|
+ if (swath_height_c > 0)
|
|
+ log2_swath_height_c = dml_log2(swath_height_c);
|
|
+
|
|
+ if (req128_c && log2_swath_height_c > 0)
|
|
+ log2_swath_height_c -= 1;
|
|
}
|
|
+
|
|
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
|
|
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
|
|
index edd41d3582910..dc1c81a6e3771 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
|
|
@@ -277,13 +277,31 @@ static void handle_det_buf_split(
|
|
if (surf_linear) {
|
|
log2_swath_height_l = 0;
|
|
log2_swath_height_c = 0;
|
|
- } else if (!surf_vert) {
|
|
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
|
|
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
|
|
} else {
|
|
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
|
|
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
|
|
+ unsigned int swath_height_l;
|
|
+ unsigned int swath_height_c;
|
|
+
|
|
+ if (!surf_vert) {
|
|
+ swath_height_l = rq_param->misc.rq_l.blk256_height;
|
|
+ swath_height_c = rq_param->misc.rq_c.blk256_height;
|
|
+ } else {
|
|
+ swath_height_l = rq_param->misc.rq_l.blk256_width;
|
|
+ swath_height_c = rq_param->misc.rq_c.blk256_width;
|
|
+ }
|
|
+
|
|
+ if (swath_height_l > 0)
|
|
+ log2_swath_height_l = dml_log2(swath_height_l);
|
|
+
|
|
+ if (req128_l && log2_swath_height_l > 0)
|
|
+ log2_swath_height_l -= 1;
|
|
+
|
|
+ if (swath_height_c > 0)
|
|
+ log2_swath_height_c = dml_log2(swath_height_c);
|
|
+
|
|
+ if (req128_c && log2_swath_height_c > 0)
|
|
+ log2_swath_height_c -= 1;
|
|
}
|
|
+
|
|
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
|
|
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
|
|
index 5b5916b5bc710..cf5d8d8c2c9c3 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
|
|
@@ -237,13 +237,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
|
|
if (surf_linear) {
|
|
log2_swath_height_l = 0;
|
|
log2_swath_height_c = 0;
|
|
- } else if (!surf_vert) {
|
|
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
|
|
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
|
|
} else {
|
|
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
|
|
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
|
|
+ unsigned int swath_height_l;
|
|
+ unsigned int swath_height_c;
|
|
+
|
|
+ if (!surf_vert) {
|
|
+ swath_height_l = rq_param->misc.rq_l.blk256_height;
|
|
+ swath_height_c = rq_param->misc.rq_c.blk256_height;
|
|
+ } else {
|
|
+ swath_height_l = rq_param->misc.rq_l.blk256_width;
|
|
+ swath_height_c = rq_param->misc.rq_c.blk256_width;
|
|
+ }
|
|
+
|
|
+ if (swath_height_l > 0)
|
|
+ log2_swath_height_l = dml_log2(swath_height_l);
|
|
+
|
|
+ if (req128_l && log2_swath_height_l > 0)
|
|
+ log2_swath_height_l -= 1;
|
|
+
|
|
+ if (swath_height_c > 0)
|
|
+ log2_swath_height_c = dml_log2(swath_height_c);
|
|
+
|
|
+ if (req128_c && log2_swath_height_c > 0)
|
|
+ log2_swath_height_c -= 1;
|
|
}
|
|
+
|
|
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
|
|
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
|
|
index 4c3e9cc301679..414da64f57340 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
|
|
@@ -344,13 +344,31 @@ static void handle_det_buf_split(
|
|
if (surf_linear) {
|
|
log2_swath_height_l = 0;
|
|
log2_swath_height_c = 0;
|
|
- } else if (!surf_vert) {
|
|
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
|
|
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
|
|
} else {
|
|
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
|
|
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
|
|
+ unsigned int swath_height_l;
|
|
+ unsigned int swath_height_c;
|
|
+
|
|
+ if (!surf_vert) {
|
|
+ swath_height_l = rq_param->misc.rq_l.blk256_height;
|
|
+ swath_height_c = rq_param->misc.rq_c.blk256_height;
|
|
+ } else {
|
|
+ swath_height_l = rq_param->misc.rq_l.blk256_width;
|
|
+ swath_height_c = rq_param->misc.rq_c.blk256_width;
|
|
+ }
|
|
+
|
|
+ if (swath_height_l > 0)
|
|
+ log2_swath_height_l = dml_log2(swath_height_l);
|
|
+
|
|
+ if (req128_l && log2_swath_height_l > 0)
|
|
+ log2_swath_height_l -= 1;
|
|
+
|
|
+ if (swath_height_c > 0)
|
|
+ log2_swath_height_c = dml_log2(swath_height_c);
|
|
+
|
|
+ if (req128_c && log2_swath_height_c > 0)
|
|
+ log2_swath_height_c -= 1;
|
|
}
|
|
+
|
|
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
|
|
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
|
|
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
|
|
index 892f08f2ba429..13b5ae1c106f2 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
|
|
@@ -5161,7 +5161,7 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
|
|
|
|
out:
|
|
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
|
|
- 1 << power_profile_mode,
|
|
+ (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
|
|
NULL);
|
|
hwmgr->power_profile_mode = power_profile_mode;
|
|
|
|
diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
|
|
index 3bc383d5bf73d..49a1d7f3539c2 100644
|
|
--- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
|
|
+++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
|
|
@@ -13,9 +13,6 @@
|
|
#define has_bit(nr, mask) (BIT(nr) & (mask))
|
|
#define has_bits(bits, mask) (((bits) & (mask)) == (bits))
|
|
|
|
-#define dp_for_each_set_bit(bit, mask) \
|
|
- for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
|
|
-
|
|
#define dp_wait_cond(__cond, __tries, __min_range, __max_range) \
|
|
({ \
|
|
int num_tries = __tries; \
|
|
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
|
|
index 719a79728e24f..06c595378dda2 100644
|
|
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
|
|
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
|
|
@@ -46,8 +46,9 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
|
|
{
|
|
struct komeda_component *c;
|
|
int i;
|
|
+ unsigned long avail_comps = pipe->avail_comps;
|
|
|
|
- dp_for_each_set_bit(i, pipe->avail_comps) {
|
|
+ for_each_set_bit(i, &avail_comps, 32) {
|
|
c = komeda_pipeline_get_component(pipe, i);
|
|
komeda_component_destroy(mdev, c);
|
|
}
|
|
@@ -247,6 +248,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
|
|
{
|
|
struct komeda_component *c;
|
|
int id;
|
|
+ unsigned long avail_comps = pipe->avail_comps;
|
|
|
|
DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
|
|
pipe->id, pipe->n_layers, pipe->n_scalers,
|
|
@@ -258,7 +260,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
|
|
pipe->of_output_links[1] ?
|
|
pipe->of_output_links[1]->full_name : "none");
|
|
|
|
- dp_for_each_set_bit(id, pipe->avail_comps) {
|
|
+ for_each_set_bit(id, &avail_comps, 32) {
|
|
c = komeda_pipeline_get_component(pipe, id);
|
|
|
|
komeda_component_dump(c);
|
|
@@ -270,8 +272,9 @@ static void komeda_component_verify_inputs(struct komeda_component *c)
|
|
struct komeda_pipeline *pipe = c->pipeline;
|
|
struct komeda_component *input;
|
|
int id;
|
|
+ unsigned long supported_inputs = c->supported_inputs;
|
|
|
|
- dp_for_each_set_bit(id, c->supported_inputs) {
|
|
+ for_each_set_bit(id, &supported_inputs, 32) {
|
|
input = komeda_pipeline_get_component(pipe, id);
|
|
if (!input) {
|
|
c->supported_inputs &= ~(BIT(id));
|
|
@@ -302,8 +305,9 @@ static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
|
|
struct komeda_component *c;
|
|
struct komeda_layer *layer;
|
|
int i, id;
|
|
+ unsigned long avail_comps = pipe->avail_comps;
|
|
|
|
- dp_for_each_set_bit(id, pipe->avail_comps) {
|
|
+ for_each_set_bit(id, &avail_comps, 32) {
|
|
c = komeda_pipeline_get_component(pipe, id);
|
|
komeda_component_verify_inputs(c);
|
|
}
|
|
@@ -355,13 +359,15 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
|
|
{
|
|
struct komeda_component *c;
|
|
u32 id;
|
|
+ unsigned long avail_comps;
|
|
|
|
seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
|
|
|
|
if (pipe->funcs && pipe->funcs->dump_register)
|
|
pipe->funcs->dump_register(pipe, sf);
|
|
|
|
- dp_for_each_set_bit(id, pipe->avail_comps) {
|
|
+ avail_comps = pipe->avail_comps;
|
|
+ for_each_set_bit(id, &avail_comps, 32) {
|
|
c = komeda_pipeline_get_component(pipe, id);
|
|
|
|
seq_printf(sf, "\n------%s------\n", c->name);
|
|
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
|
|
index 5c085116de3f8..e672b9cffee3c 100644
|
|
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
|
|
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
|
|
@@ -1231,14 +1231,15 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
|
|
struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
|
|
struct komeda_component_state *c_st;
|
|
struct komeda_component *c;
|
|
- u32 disabling_comps, id;
|
|
+ u32 id;
|
|
+ unsigned long disabling_comps;
|
|
|
|
WARN_ON(!old);
|
|
|
|
disabling_comps = (~new->active_comps) & old->active_comps;
|
|
|
|
/* unbound all disabling component */
|
|
- dp_for_each_set_bit(id, disabling_comps) {
|
|
+ for_each_set_bit(id, &disabling_comps, 32) {
|
|
c = komeda_pipeline_get_component(pipe, id);
|
|
c_st = komeda_component_get_state_and_set_user(c,
|
|
drm_st, NULL, new->crtc);
|
|
@@ -1286,7 +1287,8 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
|
|
struct komeda_pipeline_state *old;
|
|
struct komeda_component *c;
|
|
struct komeda_component_state *c_st;
|
|
- u32 id, disabling_comps = 0;
|
|
+ u32 id;
|
|
+ unsigned long disabling_comps;
|
|
|
|
old = komeda_pipeline_get_old_state(pipe, old_state);
|
|
|
|
@@ -1296,10 +1298,10 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
|
|
disabling_comps = old->active_comps &
|
|
pipe->standalone_disabled_comps;
|
|
|
|
- DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
|
|
+ DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n",
|
|
pipe->id, old->active_comps, disabling_comps);
|
|
|
|
- dp_for_each_set_bit(id, disabling_comps) {
|
|
+ for_each_set_bit(id, &disabling_comps, 32) {
|
|
c = komeda_pipeline_get_component(pipe, id);
|
|
c_st = priv_to_comp_st(c->obj.state);
|
|
|
|
@@ -1330,16 +1332,17 @@ void komeda_pipeline_update(struct komeda_pipeline *pipe,
|
|
struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
|
|
struct komeda_pipeline_state *old;
|
|
struct komeda_component *c;
|
|
- u32 id, changed_comps = 0;
|
|
+ u32 id;
|
|
+ unsigned long changed_comps;
|
|
|
|
old = komeda_pipeline_get_old_state(pipe, old_state);
|
|
|
|
changed_comps = new->active_comps | old->active_comps;
|
|
|
|
- DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
|
|
+ DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
|
|
pipe->id, new->active_comps, changed_comps);
|
|
|
|
- dp_for_each_set_bit(id, changed_comps) {
|
|
+ for_each_set_bit(id, &changed_comps, 32) {
|
|
c = komeda_pipeline_get_component(pipe, id);
|
|
|
|
if (new->active_comps & BIT(c->id))
|
|
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
|
|
index 667b450606ef8..b047c0ea43e8c 100644
|
|
--- a/drivers/gpu/drm/ast/ast_drv.c
|
|
+++ b/drivers/gpu/drm/ast/ast_drv.c
|
|
@@ -30,6 +30,7 @@
|
|
#include <linux/module.h>
|
|
#include <linux/pci.h>
|
|
|
|
+#include <drm/drm_atomic_helper.h>
|
|
#include <drm/drm_crtc_helper.h>
|
|
#include <drm/drm_drv.h>
|
|
#include <drm/drm_fb_helper.h>
|
|
@@ -138,6 +139,7 @@ static void ast_pci_remove(struct pci_dev *pdev)
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
drm_dev_unregister(dev);
|
|
+ drm_atomic_helper_shutdown(dev);
|
|
}
|
|
|
|
static int ast_drm_freeze(struct drm_device *dev)
|
|
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
|
|
index 9db371f4054f3..c86ed2ffb725b 100644
|
|
--- a/drivers/gpu/drm/ast/ast_mode.c
|
|
+++ b/drivers/gpu/drm/ast/ast_mode.c
|
|
@@ -688,7 +688,7 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
|
|
unsigned int offset_x, offset_y;
|
|
|
|
offset_x = AST_MAX_HWC_WIDTH - fb->width;
|
|
- offset_y = AST_MAX_HWC_WIDTH - fb->height;
|
|
+ offset_y = AST_MAX_HWC_HEIGHT - fb->height;
|
|
|
|
if (state->fb != old_state->fb) {
|
|
/* A new cursor image was installed. */
|
|
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
|
|
index 58f5dc2f6dd52..f6bdec7fa9253 100644
|
|
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
|
|
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
|
|
@@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
|
|
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
|
|
};
|
|
|
|
+static const struct drm_dmi_panel_orientation_data onegx1_pro = {
|
|
+ .width = 1200,
|
|
+ .height = 1920,
|
|
+ .bios_dates = (const char * const []){ "12/17/2020", NULL },
|
|
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
|
|
+};
|
|
+
|
|
static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
|
|
.width = 720,
|
|
.height = 1280,
|
|
@@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = {
|
|
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
|
|
},
|
|
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
|
+ }, { /* OneGX1 Pro */
|
|
+ .matches = {
|
|
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
|
|
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
|
|
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
|
|
+ },
|
|
+ .driver_data = (void *)&onegx1_pro,
|
|
}, { /* VIOS LTH17 */
|
|
.matches = {
|
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
|
|
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
|
|
index e53a222186a66..717e1611ce376 100644
|
|
--- a/drivers/gpu/drm/i915/intel_pm.c
|
|
+++ b/drivers/gpu/drm/i915/intel_pm.c
|
|
@@ -2993,7 +2993,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
|
|
|
|
static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
|
|
const char *name,
|
|
- const u16 wm[8])
|
|
+ const u16 wm[])
|
|
{
|
|
int level, max_level = ilk_wm_max_level(dev_priv);
|
|
|
|
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
|
|
index ff2c1d583c792..0392d4dfe270a 100644
|
|
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
|
|
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
|
|
@@ -20,7 +20,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
|
|
{
|
|
struct mdp5_kms *mdp5_kms = get_kms(encoder);
|
|
struct device *dev = encoder->dev->dev;
|
|
- u32 total_lines_x100, vclks_line, cfg;
|
|
+ u32 total_lines, vclks_line, cfg;
|
|
long vsync_clk_speed;
|
|
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
|
|
int pp_id = mixer->pp;
|
|
@@ -30,8 +30,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
|
|
- if (!total_lines_x100) {
|
|
+ total_lines = mode->vtotal * drm_mode_vrefresh(mode);
|
|
+ if (!total_lines) {
|
|
DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
|
|
__func__, mode->vtotal, drm_mode_vrefresh(mode));
|
|
return -EINVAL;
|
|
@@ -43,15 +43,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
|
|
vsync_clk_speed);
|
|
return -EINVAL;
|
|
}
|
|
- vclks_line = vsync_clk_speed * 100 / total_lines_x100;
|
|
+ vclks_line = vsync_clk_speed / total_lines;
|
|
|
|
cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
|
|
| MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
|
|
cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
|
|
|
|
+ /*
|
|
+ * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
|
|
+ * the vsync_clk equating to roughly half the desired panel refresh rate.
|
|
+ * This is only necessary as stability fallback if interrupts from the
|
|
+ * panel arrive too late or not at all, but is currently used by default
|
|
+ * because these panel interrupts are not wired up yet.
|
|
+ */
|
|
mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
|
|
mdp5_write(mdp5_kms,
|
|
- REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
|
|
+ REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
|
|
+
|
|
mdp5_write(mdp5_kms,
|
|
REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
|
|
mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
|
|
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
|
|
index 5b8fe32022b5f..e1c90fa47411f 100644
|
|
--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
|
|
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
|
|
@@ -34,8 +34,8 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
|
|
|
|
dp_usbpd->hpd_high = hpd;
|
|
|
|
- if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
|
|
- && !hpd_priv->dp_cb->disconnect) {
|
|
+ if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
|
|
+ || !hpd_priv->dp_cb->disconnect) {
|
|
pr_err("hpd dp_cb not initialized\n");
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
|
|
index 10738e04c09b8..56e0c6c625e9a 100644
|
|
--- a/drivers/gpu/drm/qxl/qxl_display.c
|
|
+++ b/drivers/gpu/drm/qxl/qxl_display.c
|
|
@@ -1228,6 +1228,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
|
|
|
|
void qxl_modeset_fini(struct qxl_device *qdev)
|
|
{
|
|
+ if (qdev->dumb_shadow_bo) {
|
|
+ drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
|
|
+ qdev->dumb_shadow_bo = NULL;
|
|
+ }
|
|
qxl_destroy_monitors_object(qdev);
|
|
drm_mode_config_cleanup(&qdev->ddev);
|
|
}
|
|
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
|
|
index 6e7f16f4cec79..41cdf9d1e59dc 100644
|
|
--- a/drivers/gpu/drm/qxl/qxl_drv.c
|
|
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
|
|
@@ -144,6 +144,8 @@ static void qxl_drm_release(struct drm_device *dev)
|
|
* reodering qxl_modeset_fini() + qxl_device_fini() calls is
|
|
* non-trivial though.
|
|
*/
|
|
+ if (!dev->registered)
|
|
+ return;
|
|
qxl_modeset_fini(qdev);
|
|
qxl_device_fini(qdev);
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
|
|
index 8bc5ad1d65857..962be545f889b 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_object.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_object.c
|
|
@@ -385,6 +385,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
|
|
}
|
|
#endif
|
|
man = ttm_manager_type(bdev, TTM_PL_VRAM);
|
|
+ if (!man)
|
|
+ return 0;
|
|
return ttm_resource_manager_evict_all(bdev, man);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
index 176cb55062be6..08a015a363040 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
@@ -486,13 +486,14 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt
|
|
struct radeon_ttm_tt *gtt = (void *)ttm;
|
|
struct radeon_device *rdev = radeon_get_rdev(bdev);
|
|
|
|
+ if (gtt->userptr)
|
|
+ radeon_ttm_tt_unpin_userptr(bdev, ttm);
|
|
+
|
|
if (!gtt->bound)
|
|
return;
|
|
|
|
radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
|
|
|
|
- if (gtt->userptr)
|
|
- radeon_ttm_tt_unpin_userptr(bdev, ttm);
|
|
gtt->bound = false;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
|
|
index 23eb6d772e405..669f2ee395154 100644
|
|
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
|
|
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
|
|
@@ -174,7 +174,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|
if (!sync_file) {
|
|
dma_fence_put(&out_fence->f);
|
|
ret = -ENOMEM;
|
|
- goto out_memdup;
|
|
+ goto out_unresv;
|
|
}
|
|
|
|
exbuf->fence_fd = out_fence_fd;
|
|
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
|
|
index d69a5b6da5532..4ff1ec28e630d 100644
|
|
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
|
|
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
|
|
@@ -248,6 +248,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
|
|
|
|
ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
|
|
if (ret != 0) {
|
|
+ virtio_gpu_array_put_free(objs);
|
|
virtio_gpu_free_object(&shmem_obj->base);
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
|
|
index 0443b7deeaef6..758d8a98d96b3 100644
|
|
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
|
|
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
|
|
@@ -18,7 +18,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
|
|
|
|
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
|
|
output->period_ns);
|
|
- WARN_ON(ret_overrun != 1);
|
|
+ if (ret_overrun != 1)
|
|
+ pr_warn("%s: vblank timer overrun\n", __func__);
|
|
|
|
spin_lock(&output->lock);
|
|
ret = drm_crtc_handle_vblank(crtc);
|
|
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
|
|
index f72803a023910..28509b02a0b56 100644
|
|
--- a/drivers/hwtracing/intel_th/gth.c
|
|
+++ b/drivers/hwtracing/intel_th/gth.c
|
|
@@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
|
|
output->active = false;
|
|
|
|
for_each_set_bit(master, gth->output[output->port].master,
|
|
- TH_CONFIGURABLE_MASTERS) {
|
|
+ TH_CONFIGURABLE_MASTERS + 1) {
|
|
gth_master_set(gth, master, -1);
|
|
}
|
|
spin_unlock(>h->gth_lock);
|
|
@@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
|
|
othdev->output.port = -1;
|
|
othdev->output.active = false;
|
|
gth->output[port].output = NULL;
|
|
- for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
|
|
+ for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
|
|
if (gth->master[master] == port)
|
|
gth->master[master] = -1;
|
|
spin_unlock(>h->gth_lock);
|
|
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
|
|
index 251e75c9ba9d0..817cdb29bbd89 100644
|
|
--- a/drivers/hwtracing/intel_th/pci.c
|
|
+++ b/drivers/hwtracing/intel_th/pci.c
|
|
@@ -273,11 +273,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
|
|
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
|
},
|
|
+ {
|
|
+ /* Alder Lake-M */
|
|
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
|
|
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
|
|
+ },
|
|
{
|
|
/* Alder Lake CPU */
|
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
|
|
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
|
},
|
|
+ {
|
|
+ /* Rocket Lake CPU */
|
|
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
|
|
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
|
|
+ },
|
|
{ 0 },
|
|
};
|
|
|
|
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
|
|
index d8fccf048bf44..30576a5f2f045 100644
|
|
--- a/drivers/input/touchscreen/ili210x.c
|
|
+++ b/drivers/input/touchscreen/ili210x.c
|
|
@@ -87,7 +87,7 @@ static bool ili210x_touchdata_to_coords(const u8 *touchdata,
|
|
unsigned int *x, unsigned int *y,
|
|
unsigned int *z)
|
|
{
|
|
- if (touchdata[0] & BIT(finger))
|
|
+ if (!(touchdata[0] & BIT(finger)))
|
|
return false;
|
|
|
|
*x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
|
|
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
|
|
index 06b00b5363d86..e49a79322c53f 100644
|
|
--- a/drivers/iommu/intel/iommu.c
|
|
+++ b/drivers/iommu/intel/iommu.c
|
|
@@ -2294,6 +2294,41 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
|
|
return level;
|
|
}
|
|
|
|
+/*
|
|
+ * Ensure that old small page tables are removed to make room for superpage(s).
|
|
+ * We're going to add new large pages, so make sure we don't remove their parent
|
|
+ * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
|
|
+ */
|
|
+static void switch_to_super_page(struct dmar_domain *domain,
|
|
+ unsigned long start_pfn,
|
|
+ unsigned long end_pfn, int level)
|
|
+{
|
|
+ unsigned long lvl_pages = lvl_to_nr_pages(level);
|
|
+ struct dma_pte *pte = NULL;
|
|
+ int i;
|
|
+
|
|
+ while (start_pfn <= end_pfn) {
|
|
+ if (!pte)
|
|
+ pte = pfn_to_dma_pte(domain, start_pfn, &level);
|
|
+
|
|
+ if (dma_pte_present(pte)) {
|
|
+ dma_pte_free_pagetable(domain, start_pfn,
|
|
+ start_pfn + lvl_pages - 1,
|
|
+ level + 1);
|
|
+
|
|
+ for_each_domain_iommu(i, domain)
|
|
+ iommu_flush_iotlb_psi(g_iommus[i], domain,
|
|
+ start_pfn, lvl_pages,
|
|
+ 0, 0);
|
|
+ }
|
|
+
|
|
+ pte++;
|
|
+ start_pfn += lvl_pages;
|
|
+ if (first_pte_in_page(pte))
|
|
+ pte = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
static int
|
|
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|
unsigned long phys_pfn, unsigned long nr_pages, int prot)
|
|
@@ -2327,22 +2362,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|
return -ENOMEM;
|
|
/* It is large page*/
|
|
if (largepage_lvl > 1) {
|
|
- unsigned long nr_superpages, end_pfn;
|
|
+ unsigned long end_pfn;
|
|
|
|
pteval |= DMA_PTE_LARGE_PAGE;
|
|
- lvl_pages = lvl_to_nr_pages(largepage_lvl);
|
|
-
|
|
- nr_superpages = nr_pages / lvl_pages;
|
|
- end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
|
|
-
|
|
- /*
|
|
- * Ensure that old small page tables are
|
|
- * removed to make room for superpage(s).
|
|
- * We're adding new large pages, so make sure
|
|
- * we don't remove their parent tables.
|
|
- */
|
|
- dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
|
|
- largepage_lvl + 1);
|
|
+ end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
|
|
+ switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
|
|
} else {
|
|
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
|
|
}
|
|
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
|
|
index 3fc65375cbe0f..bb025e04ba771 100644
|
|
--- a/drivers/irqchip/irq-gic-v3.c
|
|
+++ b/drivers/irqchip/irq-gic-v3.c
|
|
@@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
|
|
|
|
irqnr = gic_read_iar();
|
|
|
|
+ /* Check for special IDs first */
|
|
+ if ((irqnr >= 1020 && irqnr <= 1023))
|
|
+ return;
|
|
+
|
|
if (gic_supports_nmi() &&
|
|
unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
|
|
gic_handle_nmi(irqnr, regs);
|
|
@@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
|
|
gic_arch_enable_irqs();
|
|
}
|
|
|
|
- /* Check for special IDs first */
|
|
- if ((irqnr >= 1020 && irqnr <= 1023))
|
|
- return;
|
|
-
|
|
if (static_branch_likely(&supports_deactivate_key))
|
|
gic_write_eoir(irqnr);
|
|
else
|
|
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
|
|
index b64fede032dc5..4c7da1c4e6cb9 100644
|
|
--- a/drivers/md/dm-integrity.c
|
|
+++ b/drivers/md/dm-integrity.c
|
|
@@ -3929,6 +3929,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|
if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
|
|
r = -EINVAL;
|
|
ti->error = "Invalid bitmap_flush_interval argument";
|
|
+ goto bad;
|
|
}
|
|
ic->bitmap_flush_interval = msecs_to_jiffies(val);
|
|
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
|
|
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
|
|
index cab12b2251bac..91461b6904c1d 100644
|
|
--- a/drivers/md/dm-raid.c
|
|
+++ b/drivers/md/dm-raid.c
|
|
@@ -1868,6 +1868,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
|
|
return rs->md.new_level != rs->md.level;
|
|
}
|
|
|
|
+/* True if layout is set to reshape. */
|
|
+static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
|
|
+{
|
|
+ return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
|
|
+ rs->md.new_layout != rs->md.layout ||
|
|
+ rs->md.new_chunk_sectors != rs->md.chunk_sectors;
|
|
+}
|
|
+
|
|
/* True if @rs is requested to reshape by ctr */
|
|
static bool rs_reshape_requested(struct raid_set *rs)
|
|
{
|
|
@@ -1880,9 +1888,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
|
|
if (rs_is_raid0(rs))
|
|
return false;
|
|
|
|
- change = mddev->new_layout != mddev->layout ||
|
|
- mddev->new_chunk_sectors != mddev->chunk_sectors ||
|
|
- rs->delta_disks;
|
|
+ change = rs_is_layout_change(rs, false);
|
|
|
|
/* Historical case to support raid1 reshape without delta disks */
|
|
if (rs_is_raid1(rs)) {
|
|
@@ -2817,7 +2823,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
|
|
}
|
|
|
|
/*
|
|
- *
|
|
+ * Reshape:
|
|
* - change raid layout
|
|
* - change chunk size
|
|
* - add disks
|
|
@@ -2926,6 +2932,20 @@ static int rs_setup_reshape(struct raid_set *rs)
|
|
return r;
|
|
}
|
|
|
|
+/*
|
|
+ * If the md resync thread has updated superblock with max reshape position
|
|
+ * at the end of a reshape but not (yet) reset the layout configuration
|
|
+ * changes -> reset the latter.
|
|
+ */
|
|
+static void rs_reset_inconclusive_reshape(struct raid_set *rs)
|
|
+{
|
|
+ if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
|
|
+ rs_set_cur(rs);
|
|
+ rs->md.delta_disks = 0;
|
|
+ rs->md.reshape_backwards = 0;
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* Enable/disable discard support on RAID set depending on
|
|
* RAID level and discard properties of underlying RAID members.
|
|
@@ -3212,11 +3232,14 @@ size_check:
|
|
if (r)
|
|
goto bad;
|
|
|
|
+ /* Catch any inconclusive reshape superblock content. */
|
|
+ rs_reset_inconclusive_reshape(rs);
|
|
+
|
|
/* Start raid set read-only and assumed clean to change in raid_resume() */
|
|
rs->md.ro = 1;
|
|
rs->md.in_sync = 1;
|
|
|
|
- /* Keep array frozen */
|
|
+ /* Keep array frozen until resume. */
|
|
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
|
|
|
|
/* Has to be held on running the array */
|
|
@@ -3230,7 +3253,6 @@ size_check:
|
|
}
|
|
|
|
r = md_start(&rs->md);
|
|
-
|
|
if (r) {
|
|
ti->error = "Failed to start raid array";
|
|
mddev_unlock(&rs->md);
|
|
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
|
|
index 13b4385f4d5a9..9c3bc3711b335 100644
|
|
--- a/drivers/md/dm-rq.c
|
|
+++ b/drivers/md/dm-rq.c
|
|
@@ -569,6 +569,7 @@ out_tag_set:
|
|
blk_mq_free_tag_set(md->tag_set);
|
|
out_kfree_tag_set:
|
|
kfree(md->tag_set);
|
|
+ md->tag_set = NULL;
|
|
|
|
return err;
|
|
}
|
|
@@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
|
|
if (md->tag_set) {
|
|
blk_mq_free_tag_set(md->tag_set);
|
|
kfree(md->tag_set);
|
|
+ md->tag_set = NULL;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
|
|
index 564896659dd44..21d1a17e77c96 100644
|
|
--- a/drivers/md/persistent-data/dm-btree-internal.h
|
|
+++ b/drivers/md/persistent-data/dm-btree-internal.h
|
|
@@ -34,12 +34,12 @@ struct node_header {
|
|
__le32 max_entries;
|
|
__le32 value_size;
|
|
__le32 padding;
|
|
-} __packed;
|
|
+} __attribute__((packed, aligned(8)));
|
|
|
|
struct btree_node {
|
|
struct node_header header;
|
|
__le64 keys[];
|
|
-} __packed;
|
|
+} __attribute__((packed, aligned(8)));
|
|
|
|
|
|
/*
|
|
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
|
|
index d8b4125e338ca..a213bf11738fb 100644
|
|
--- a/drivers/md/persistent-data/dm-space-map-common.c
|
|
+++ b/drivers/md/persistent-data/dm-space-map-common.c
|
|
@@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
|
|
*/
|
|
begin = do_div(index_begin, ll->entries_per_block);
|
|
end = do_div(end, ll->entries_per_block);
|
|
+ if (end == 0)
|
|
+ end = ll->entries_per_block;
|
|
|
|
for (i = index_begin; i < index_end; i++, begin = 0) {
|
|
struct dm_block *blk;
|
|
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
|
|
index 8de63ce39bdd5..87e17909ef521 100644
|
|
--- a/drivers/md/persistent-data/dm-space-map-common.h
|
|
+++ b/drivers/md/persistent-data/dm-space-map-common.h
|
|
@@ -33,7 +33,7 @@ struct disk_index_entry {
|
|
__le64 blocknr;
|
|
__le32 nr_free;
|
|
__le32 none_free_before;
|
|
-} __packed;
|
|
+} __attribute__ ((packed, aligned(8)));
|
|
|
|
|
|
#define MAX_METADATA_BITMAPS 255
|
|
@@ -43,7 +43,7 @@ struct disk_metadata_index {
|
|
__le64 blocknr;
|
|
|
|
struct disk_index_entry index[MAX_METADATA_BITMAPS];
|
|
-} __packed;
|
|
+} __attribute__ ((packed, aligned(8)));
|
|
|
|
struct ll_disk;
|
|
|
|
@@ -86,7 +86,7 @@ struct disk_sm_root {
|
|
__le64 nr_allocated;
|
|
__le64 bitmap_root;
|
|
__le64 ref_count_root;
|
|
-} __packed;
|
|
+} __attribute__ ((packed, aligned(8)));
|
|
|
|
#define ENTRIES_PER_BYTE 4
|
|
|
|
@@ -94,7 +94,7 @@ struct disk_bitmap_header {
|
|
__le32 csum;
|
|
__le32 not_used;
|
|
__le64 blocknr;
|
|
-} __packed;
|
|
+} __attribute__ ((packed, aligned(8)));
|
|
|
|
enum allocation_event {
|
|
SM_NONE,
|
|
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
|
|
index c0347997f6ff7..9d3e51c1efd7e 100644
|
|
--- a/drivers/md/raid1.c
|
|
+++ b/drivers/md/raid1.c
|
|
@@ -478,6 +478,8 @@ static void raid1_end_write_request(struct bio *bio)
|
|
if (!test_bit(Faulty, &rdev->flags))
|
|
set_bit(R1BIO_WriteError, &r1_bio->state);
|
|
else {
|
|
+ /* Fail the request */
|
|
+ set_bit(R1BIO_Degraded, &r1_bio->state);
|
|
/* Finished with this branch */
|
|
r1_bio->bios[mirror] = NULL;
|
|
to_put = bio;
|
|
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
|
|
index 5ff7bedee2477..3862ddc86ec48 100644
|
|
--- a/drivers/media/dvb-core/dvbdev.c
|
|
+++ b/drivers/media/dvb-core/dvbdev.c
|
|
@@ -241,6 +241,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev)
|
|
|
|
if (dvbdev->adapter->conn) {
|
|
media_device_unregister_entity(dvbdev->adapter->conn);
|
|
+ kfree(dvbdev->adapter->conn);
|
|
dvbdev->adapter->conn = NULL;
|
|
kfree(dvbdev->adapter->conn_pads);
|
|
dvbdev->adapter->conn_pads = NULL;
|
|
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
|
|
index a3161d7090153..ab7883cff8b22 100644
|
|
--- a/drivers/media/i2c/adv7511-v4l2.c
|
|
+++ b/drivers/media/i2c/adv7511-v4l2.c
|
|
@@ -1964,7 +1964,7 @@ static int adv7511_remove(struct i2c_client *client)
|
|
|
|
adv7511_set_isr(sd, false);
|
|
adv7511_init_setup(sd);
|
|
- cancel_delayed_work(&state->edid_handler);
|
|
+ cancel_delayed_work_sync(&state->edid_handler);
|
|
i2c_unregister_device(state->i2c_edid);
|
|
i2c_unregister_device(state->i2c_cec);
|
|
i2c_unregister_device(state->i2c_pktmem);
|
|
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
|
|
index 09004d928d11f..d1f58795794fd 100644
|
|
--- a/drivers/media/i2c/adv7604.c
|
|
+++ b/drivers/media/i2c/adv7604.c
|
|
@@ -3616,7 +3616,7 @@ static int adv76xx_remove(struct i2c_client *client)
|
|
io_write(sd, 0x6e, 0);
|
|
io_write(sd, 0x73, 0);
|
|
|
|
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
|
|
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
|
|
v4l2_async_unregister_subdev(sd);
|
|
media_entity_cleanup(&sd->entity);
|
|
adv76xx_unregister_clients(to_state(sd));
|
|
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
|
|
index 0855f648416d1..f7d2b6cd3008b 100644
|
|
--- a/drivers/media/i2c/adv7842.c
|
|
+++ b/drivers/media/i2c/adv7842.c
|
|
@@ -3586,7 +3586,7 @@ static int adv7842_remove(struct i2c_client *client)
|
|
struct adv7842_state *state = to_state(sd);
|
|
|
|
adv7842_irq_enable(sd, false);
|
|
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
|
|
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
|
|
v4l2_device_unregister_subdev(sd);
|
|
media_entity_cleanup(&sd->entity);
|
|
adv7842_unregister_clients(sd);
|
|
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
|
|
index 831b5b54fd78c..1b309bb743c7b 100644
|
|
--- a/drivers/media/i2c/tc358743.c
|
|
+++ b/drivers/media/i2c/tc358743.c
|
|
@@ -2193,7 +2193,7 @@ static int tc358743_remove(struct i2c_client *client)
|
|
del_timer_sync(&state->timer);
|
|
flush_work(&state->work_i2c_poll);
|
|
}
|
|
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
|
|
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
|
|
cec_unregister_adapter(state->cec_adap);
|
|
v4l2_async_unregister_subdev(sd);
|
|
v4l2_device_unregister_subdev(sd);
|
|
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
|
|
index a09bf0a39d058..89bb7e6dc7a42 100644
|
|
--- a/drivers/media/i2c/tda1997x.c
|
|
+++ b/drivers/media/i2c/tda1997x.c
|
|
@@ -2804,7 +2804,7 @@ static int tda1997x_remove(struct i2c_client *client)
|
|
media_entity_cleanup(&sd->entity);
|
|
v4l2_ctrl_handler_free(&state->hdl);
|
|
regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
|
|
- cancel_delayed_work(&state->delayed_work_enable_hpd);
|
|
+ cancel_delayed_work_sync(&state->delayed_work_enable_hpd);
|
|
mutex_destroy(&state->page_lock);
|
|
mutex_destroy(&state->lock);
|
|
|
|
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
|
|
index 22f55a7840a62..d0ca260ecf700 100644
|
|
--- a/drivers/media/pci/cx23885/cx23885-core.c
|
|
+++ b/drivers/media/pci/cx23885/cx23885-core.c
|
|
@@ -2077,6 +2077,15 @@ static struct {
|
|
* 0x1423 is the PCI ID for the IOMMU found on Kaveri
|
|
*/
|
|
{ PCI_VENDOR_ID_AMD, 0x1423 },
|
|
+ /* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
|
|
+ */
|
|
+ { PCI_VENDOR_ID_AMD, 0x1481 },
|
|
+ /* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
|
|
+ */
|
|
+ { PCI_VENDOR_ID_AMD, 0x1419 },
|
|
+ /* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
|
|
+ */
|
|
+ { PCI_VENDOR_ID_ATI, 0x5a23 },
|
|
};
|
|
|
|
static bool cx23885_does_need_dma_reset(void)
|
|
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
|
|
index 11e1eb6a6809e..1d1d32e043f16 100644
|
|
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
|
|
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
|
|
@@ -1008,7 +1008,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
|
|
printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
|
|
__func__, result);
|
|
result = -ENOMEM;
|
|
- goto failed;
|
|
+ goto fail_pci;
|
|
}
|
|
|
|
/* Establish encoder defaults here */
|
|
@@ -1062,7 +1062,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
|
|
100000, ENCODER_DEF_BITRATE);
|
|
if (hdl->error) {
|
|
result = hdl->error;
|
|
- goto failed;
|
|
+ goto fail_hdl;
|
|
}
|
|
|
|
port->std = V4L2_STD_NTSC_M;
|
|
@@ -1080,7 +1080,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
|
|
printk(KERN_INFO "%s: can't allocate mpeg device\n",
|
|
dev->name);
|
|
result = -ENOMEM;
|
|
- goto failed;
|
|
+ goto fail_hdl;
|
|
}
|
|
|
|
port->v4l_device->ctrl_handler = hdl;
|
|
@@ -1091,10 +1091,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
|
|
if (result < 0) {
|
|
printk(KERN_INFO "%s: can't register mpeg device\n",
|
|
dev->name);
|
|
- /* TODO: We're going to leak here if we don't dealloc
|
|
- The buffers above. The unreg function can't deal wit it.
|
|
- */
|
|
- goto failed;
|
|
+ goto fail_reg;
|
|
}
|
|
|
|
printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
|
|
@@ -1116,9 +1113,14 @@ int saa7164_encoder_register(struct saa7164_port *port)
|
|
|
|
saa7164_api_set_encoder(port);
|
|
saa7164_api_get_encoder(port);
|
|
+ return 0;
|
|
|
|
- result = 0;
|
|
-failed:
|
|
+fail_reg:
|
|
+ video_device_release(port->v4l_device);
|
|
+ port->v4l_device = NULL;
|
|
+fail_hdl:
|
|
+ v4l2_ctrl_handler_free(hdl);
|
|
+fail_pci:
|
|
return result;
|
|
}
|
|
|
|
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
|
|
index 4dd98f94a91ed..27bb785136319 100644
|
|
--- a/drivers/media/pci/sta2x11/Kconfig
|
|
+++ b/drivers/media/pci/sta2x11/Kconfig
|
|
@@ -3,6 +3,7 @@ config STA2X11_VIP
|
|
tristate "STA2X11 VIP Video For Linux"
|
|
depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS && I2C
|
|
depends on STA2X11 || COMPILE_TEST
|
|
+ select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
|
|
select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
|
|
select VIDEOBUF2_DMA_CONTIG
|
|
select MEDIA_CONTROLLER
|
|
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
|
|
index 995e95272e511..e600764dce968 100644
|
|
--- a/drivers/media/platform/coda/coda-common.c
|
|
+++ b/drivers/media/platform/coda/coda-common.c
|
|
@@ -2062,7 +2062,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
|
|
if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
|
|
ctx->params.gop_size = 1;
|
|
ctx->gopcounter = ctx->params.gop_size - 1;
|
|
- v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
|
|
+ /* Only decoders have this control */
|
|
+ if (ctx->mb_err_cnt_ctrl)
|
|
+ v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
|
|
|
|
ret = ctx->ops->start_streaming(ctx);
|
|
if (ctx->inst_type == CODA_INST_DECODER) {
|
|
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
|
|
index 363ee2a65453c..2dcf7eaea4ce2 100644
|
|
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
|
|
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
|
|
@@ -239,8 +239,10 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
|
|
|
|
parser_init(inst, &codecs, &domain);
|
|
|
|
- core->codecs_count = 0;
|
|
- memset(core->caps, 0, sizeof(core->caps));
|
|
+ if (core->res->hfi_version > HFI_VERSION_1XX) {
|
|
+ core->codecs_count = 0;
|
|
+ memset(core->caps, 0, sizeof(core->caps));
|
|
+ }
|
|
|
|
while (words_count) {
|
|
data = word + 1;
|
|
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
|
|
index 2b270093009c7..a27f638df11c6 100644
|
|
--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
|
|
+++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
|
|
@@ -480,7 +480,7 @@ static int regs_show(struct seq_file *s, void *data)
|
|
int ret;
|
|
unsigned int i;
|
|
|
|
- ret = pm_runtime_get_sync(bdisp->dev);
|
|
+ ret = pm_runtime_resume_and_get(bdisp->dev);
|
|
if (ret < 0) {
|
|
seq_puts(s, "Cannot wake up IP\n");
|
|
return 0;
|
|
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
|
|
index ed863bf5ea804..671e4a928993d 100644
|
|
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
|
|
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
|
|
@@ -589,7 +589,7 @@ static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
|
|
int ret;
|
|
|
|
if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
|
|
- ret = pm_runtime_get_sync(dev);
|
|
+ ret = pm_runtime_resume_and_get(dev);
|
|
if (ret < 0) {
|
|
dev_err(dev, "Failed to enable module\n");
|
|
|
|
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
|
|
index 0c6229592e132..e5c4a6941d26b 100644
|
|
--- a/drivers/media/rc/ite-cir.c
|
|
+++ b/drivers/media/rc/ite-cir.c
|
|
@@ -276,8 +276,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
|
|
/* read the interrupt flags */
|
|
iflags = dev->params.get_irq_causes(dev);
|
|
|
|
+ /* Check for RX overflow */
|
|
+ if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
|
|
+ dev_warn(&dev->rdev->dev, "receive overflow\n");
|
|
+ ir_raw_event_reset(dev->rdev);
|
|
+ }
|
|
+
|
|
/* check for the receive interrupt */
|
|
- if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
|
|
+ if (iflags & ITE_IRQ_RX_FIFO) {
|
|
/* read the FIFO bytes */
|
|
rx_bytes =
|
|
dev->params.get_rx_bytes(dev, rx_buf,
|
|
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
|
|
index 0dc65ef3aa14d..ca0ebf6ad9ccf 100644
|
|
--- a/drivers/media/test-drivers/vivid/vivid-core.c
|
|
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
|
|
@@ -205,13 +205,13 @@ static const u8 vivid_hdmi_edid[256] = {
|
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
|
|
|
|
- 0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
|
|
+ 0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f,
|
|
0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
|
|
0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
|
|
0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
|
|
0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
|
|
0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
|
|
- 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
|
|
+ 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3,
|
|
0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
|
|
0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
|
|
0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
|
|
@@ -220,7 +220,7 @@ static const u8 vivid_hdmi_edid[256] = {
|
|
0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
|
|
0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
|
|
0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
|
|
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
|
|
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
|
|
};
|
|
|
|
static int vidioc_querycap(struct file *file, void *priv,
|
|
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
|
|
index c1a7634e27b43..28e1fd64dd3c2 100644
|
|
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
|
|
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
|
|
@@ -79,11 +79,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
|
|
}
|
|
}
|
|
|
|
- if ((ret = dvb_usb_adapter_stream_init(adap)) ||
|
|
- (ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) ||
|
|
- (ret = dvb_usb_adapter_frontend_init(adap))) {
|
|
+ ret = dvb_usb_adapter_stream_init(adap);
|
|
+ if (ret)
|
|
return ret;
|
|
- }
|
|
+
|
|
+ ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
|
|
+ if (ret)
|
|
+ goto dvb_init_err;
|
|
+
|
|
+ ret = dvb_usb_adapter_frontend_init(adap);
|
|
+ if (ret)
|
|
+ goto frontend_init_err;
|
|
|
|
/* use exclusive FE lock if there is multiple shared FEs */
|
|
if (adap->fe_adap[1].fe)
|
|
@@ -103,6 +109,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
|
|
}
|
|
|
|
return 0;
|
|
+
|
|
+frontend_init_err:
|
|
+ dvb_usb_adapter_dvb_exit(adap);
|
|
+dvb_init_err:
|
|
+ dvb_usb_adapter_stream_exit(adap);
|
|
+ return ret;
|
|
}
|
|
|
|
static int dvb_usb_adapter_exit(struct dvb_usb_device *d)
|
|
@@ -158,22 +170,20 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
|
|
|
|
if (d->props.priv_init != NULL) {
|
|
ret = d->props.priv_init(d);
|
|
- if (ret != 0) {
|
|
- kfree(d->priv);
|
|
- d->priv = NULL;
|
|
- return ret;
|
|
- }
|
|
+ if (ret != 0)
|
|
+ goto err_priv_init;
|
|
}
|
|
}
|
|
|
|
/* check the capabilities and set appropriate variables */
|
|
dvb_usb_device_power_ctrl(d, 1);
|
|
|
|
- if ((ret = dvb_usb_i2c_init(d)) ||
|
|
- (ret = dvb_usb_adapter_init(d, adapter_nums))) {
|
|
- dvb_usb_exit(d);
|
|
- return ret;
|
|
- }
|
|
+ ret = dvb_usb_i2c_init(d);
|
|
+ if (ret)
|
|
+ goto err_i2c_init;
|
|
+ ret = dvb_usb_adapter_init(d, adapter_nums);
|
|
+ if (ret)
|
|
+ goto err_adapter_init;
|
|
|
|
if ((ret = dvb_usb_remote_init(d)))
|
|
err("could not initialize remote control.");
|
|
@@ -181,6 +191,17 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
|
|
dvb_usb_device_power_ctrl(d, 0);
|
|
|
|
return 0;
|
|
+
|
|
+err_adapter_init:
|
|
+ dvb_usb_adapter_exit(d);
|
|
+err_i2c_init:
|
|
+ dvb_usb_i2c_exit(d);
|
|
+ if (d->priv && d->props.priv_destroy)
|
|
+ d->props.priv_destroy(d);
|
|
+err_priv_init:
|
|
+ kfree(d->priv);
|
|
+ d->priv = NULL;
|
|
+ return ret;
|
|
}
|
|
|
|
/* determine the name and the state of the just found USB device */
|
|
@@ -255,41 +276,50 @@ int dvb_usb_device_init(struct usb_interface *intf,
|
|
if (du != NULL)
|
|
*du = NULL;
|
|
|
|
- if ((desc = dvb_usb_find_device(udev, props, &cold)) == NULL) {
|
|
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
|
|
+ if (!d) {
|
|
+ err("no memory for 'struct dvb_usb_device'");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
|
|
+
|
|
+ desc = dvb_usb_find_device(udev, &d->props, &cold);
|
|
+ if (!desc) {
|
|
deb_err("something went very wrong, device was not found in current device list - let's see what comes next.\n");
|
|
- return -ENODEV;
|
|
+ ret = -ENODEV;
|
|
+ goto error;
|
|
}
|
|
|
|
if (cold) {
|
|
info("found a '%s' in cold state, will try to load a firmware", desc->name);
|
|
ret = dvb_usb_download_firmware(udev, props);
|
|
if (!props->no_reconnect || ret != 0)
|
|
- return ret;
|
|
+ goto error;
|
|
}
|
|
|
|
info("found a '%s' in warm state.", desc->name);
|
|
- d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
|
|
- if (d == NULL) {
|
|
- err("no memory for 'struct dvb_usb_device'");
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
d->udev = udev;
|
|
- memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
|
|
d->desc = desc;
|
|
d->owner = owner;
|
|
|
|
usb_set_intfdata(intf, d);
|
|
|
|
- if (du != NULL)
|
|
+ ret = dvb_usb_init(d, adapter_nums);
|
|
+ if (ret) {
|
|
+ info("%s error while loading driver (%d)", desc->name, ret);
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ if (du)
|
|
*du = d;
|
|
|
|
- ret = dvb_usb_init(d, adapter_nums);
|
|
+ info("%s successfully initialized and connected.", desc->name);
|
|
+ return 0;
|
|
|
|
- if (ret == 0)
|
|
- info("%s successfully initialized and connected.", desc->name);
|
|
- else
|
|
- info("%s error while loading driver (%d)", desc->name, ret);
|
|
+ error:
|
|
+ usb_set_intfdata(intf, NULL);
|
|
+ kfree(d);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(dvb_usb_device_init);
|
|
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
|
|
index 741be0e694471..2b8ad2bde8a48 100644
|
|
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
|
|
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
|
|
@@ -487,7 +487,7 @@ extern int __must_check
|
|
dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
|
|
|
|
/* commonly used remote control parsing */
|
|
-extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
|
|
+extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[5], u32 *, int *);
|
|
|
|
/* commonly used firmware download types and function */
|
|
struct hexline {
|
|
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
|
|
index fb9cbfa81a84b..3cd9e9556fa9f 100644
|
|
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
|
|
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
|
|
@@ -1984,6 +1984,7 @@ ret:
|
|
return result;
|
|
|
|
out_free:
|
|
+ em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
|
|
kfree(dvb);
|
|
dev->dvb = NULL;
|
|
goto ret;
|
|
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
|
|
index 158c8e28ed2cc..47d8f28bfdfc2 100644
|
|
--- a/drivers/media/usb/gspca/gspca.c
|
|
+++ b/drivers/media/usb/gspca/gspca.c
|
|
@@ -1576,6 +1576,8 @@ out:
|
|
#endif
|
|
v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
|
|
v4l2_device_unregister(&gspca_dev->v4l2_dev);
|
|
+ if (sd_desc->probe_error)
|
|
+ sd_desc->probe_error(gspca_dev);
|
|
kfree(gspca_dev->usb_buf);
|
|
kfree(gspca_dev);
|
|
return ret;
|
|
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
|
|
index b0ced2e140064..a6554d5e9e1a5 100644
|
|
--- a/drivers/media/usb/gspca/gspca.h
|
|
+++ b/drivers/media/usb/gspca/gspca.h
|
|
@@ -105,6 +105,7 @@ struct sd_desc {
|
|
cam_cf_op config; /* called on probe */
|
|
cam_op init; /* called on probe and resume */
|
|
cam_op init_controls; /* called on probe */
|
|
+ cam_v_op probe_error; /* called if probe failed, do cleanup here */
|
|
cam_op start; /* called on stream on after URBs creation */
|
|
cam_pkt_op pkt_scan;
|
|
/* optional operations */
|
|
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
|
|
index 97799cfb832e3..9491110709718 100644
|
|
--- a/drivers/media/usb/gspca/sq905.c
|
|
+++ b/drivers/media/usb/gspca/sq905.c
|
|
@@ -158,7 +158,7 @@ static int
|
|
sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
|
|
{
|
|
int ret;
|
|
- int act_len;
|
|
+ int act_len = 0;
|
|
|
|
gspca_dev->usb_buf[0] = '\0';
|
|
if (need_lock)
|
|
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
|
|
index 95673fc0a99c5..d9bc2aacc8851 100644
|
|
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
|
|
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
|
|
@@ -529,12 +529,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
|
|
static int stv06xx_config(struct gspca_dev *gspca_dev,
|
|
const struct usb_device_id *id);
|
|
|
|
+static void stv06xx_probe_error(struct gspca_dev *gspca_dev)
|
|
+{
|
|
+ struct sd *sd = (struct sd *)gspca_dev;
|
|
+
|
|
+ kfree(sd->sensor_priv);
|
|
+ sd->sensor_priv = NULL;
|
|
+}
|
|
+
|
|
/* sub-driver description */
|
|
static const struct sd_desc sd_desc = {
|
|
.name = MODULE_NAME,
|
|
.config = stv06xx_config,
|
|
.init = stv06xx_init,
|
|
.init_controls = stv06xx_init_controls,
|
|
+ .probe_error = stv06xx_probe_error,
|
|
.start = stv06xx_start,
|
|
.stopN = stv06xx_stopN,
|
|
.pkt_scan = stv06xx_pkt_scan,
|
|
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
|
|
index d29b861367ea7..1ef611e083237 100644
|
|
--- a/drivers/media/usb/zr364xx/zr364xx.c
|
|
+++ b/drivers/media/usb/zr364xx/zr364xx.c
|
|
@@ -1430,7 +1430,7 @@ static int zr364xx_probe(struct usb_interface *intf,
|
|
if (hdl->error) {
|
|
err = hdl->error;
|
|
dev_err(&udev->dev, "couldn't register control\n");
|
|
- goto unregister;
|
|
+ goto free_hdlr_and_unreg_dev;
|
|
}
|
|
/* save the init method used by this camera */
|
|
cam->method = id->driver_info;
|
|
@@ -1503,7 +1503,7 @@ static int zr364xx_probe(struct usb_interface *intf,
|
|
if (!cam->read_endpoint) {
|
|
err = -ENOMEM;
|
|
dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
|
|
- goto unregister;
|
|
+ goto free_hdlr_and_unreg_dev;
|
|
}
|
|
|
|
/* v4l */
|
|
@@ -1515,7 +1515,7 @@ static int zr364xx_probe(struct usb_interface *intf,
|
|
/* load zr364xx board specific */
|
|
err = zr364xx_board_init(cam);
|
|
if (err)
|
|
- goto unregister;
|
|
+ goto free_hdlr_and_unreg_dev;
|
|
err = v4l2_ctrl_handler_setup(hdl);
|
|
if (err)
|
|
goto board_uninit;
|
|
@@ -1533,7 +1533,7 @@ static int zr364xx_probe(struct usb_interface *intf,
|
|
err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
|
|
if (err) {
|
|
dev_err(&udev->dev, "video_register_device failed\n");
|
|
- goto free_handler;
|
|
+ goto board_uninit;
|
|
}
|
|
cam->v4l2_dev.release = zr364xx_release;
|
|
|
|
@@ -1541,11 +1541,10 @@ static int zr364xx_probe(struct usb_interface *intf,
|
|
video_device_node_name(&cam->vdev));
|
|
return 0;
|
|
|
|
-free_handler:
|
|
- v4l2_ctrl_handler_free(hdl);
|
|
board_uninit:
|
|
zr364xx_board_uninit(cam);
|
|
-unregister:
|
|
+free_hdlr_and_unreg_dev:
|
|
+ v4l2_ctrl_handler_free(hdl);
|
|
v4l2_device_unregister(&cam->v4l2_dev);
|
|
free_cam:
|
|
kfree(cam);
|
|
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
|
|
index 9dc151431a5c6..8052a6efb9659 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
|
|
@@ -1659,6 +1659,8 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
|
|
p_fwht_params->version = V4L2_FWHT_VERSION;
|
|
p_fwht_params->width = 1280;
|
|
p_fwht_params->height = 720;
|
|
+ p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
|
|
+ (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
|
|
break;
|
|
}
|
|
}
|
|
@@ -2379,7 +2381,16 @@ static void new_to_req(struct v4l2_ctrl_ref *ref)
|
|
if (!ref)
|
|
return;
|
|
ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
|
|
- ref->req = ref;
|
|
+ ref->valid_p_req = true;
|
|
+}
|
|
+
|
|
+/* Copy the current value to the request value */
|
|
+static void cur_to_req(struct v4l2_ctrl_ref *ref)
|
|
+{
|
|
+ if (!ref)
|
|
+ return;
|
|
+ ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
|
|
+ ref->valid_p_req = true;
|
|
}
|
|
|
|
/* Copy the request value to the new value */
|
|
@@ -2387,8 +2398,8 @@ static void req_to_new(struct v4l2_ctrl_ref *ref)
|
|
{
|
|
if (!ref)
|
|
return;
|
|
- if (ref->req)
|
|
- ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
|
|
+ if (ref->valid_p_req)
|
|
+ ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
|
|
else
|
|
ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
|
|
}
|
|
@@ -3555,39 +3566,8 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
|
|
struct v4l2_ctrl_handler *hdl =
|
|
container_of(obj, struct v4l2_ctrl_handler, req_obj);
|
|
struct v4l2_ctrl_handler *main_hdl = obj->priv;
|
|
- struct v4l2_ctrl_handler *prev_hdl = NULL;
|
|
- struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
|
|
|
|
mutex_lock(main_hdl->lock);
|
|
- if (list_empty(&main_hdl->requests_queued))
|
|
- goto queue;
|
|
-
|
|
- prev_hdl = list_last_entry(&main_hdl->requests_queued,
|
|
- struct v4l2_ctrl_handler, requests_queued);
|
|
- /*
|
|
- * Note: prev_hdl and hdl must contain the same list of control
|
|
- * references, so if any differences are detected then that is a
|
|
- * driver bug and the WARN_ON is triggered.
|
|
- */
|
|
- mutex_lock(prev_hdl->lock);
|
|
- ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
|
|
- struct v4l2_ctrl_ref, node);
|
|
- list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
|
|
- if (ref_ctrl->req)
|
|
- continue;
|
|
- while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
|
|
- /* Should never happen, but just in case... */
|
|
- if (list_is_last(&ref_ctrl_prev->node,
|
|
- &prev_hdl->ctrl_refs))
|
|
- break;
|
|
- ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
|
|
- }
|
|
- if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
|
|
- break;
|
|
- ref_ctrl->req = ref_ctrl_prev->req;
|
|
- }
|
|
- mutex_unlock(prev_hdl->lock);
|
|
-queue:
|
|
list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
|
|
hdl->request_is_queued = true;
|
|
mutex_unlock(main_hdl->lock);
|
|
@@ -3644,7 +3624,7 @@ v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
|
|
{
|
|
struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
|
|
|
|
- return (ref && ref->req == ref) ? ref->ctrl : NULL;
|
|
+ return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
|
|
}
|
|
EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
|
|
|
|
@@ -3830,7 +3810,13 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
|
|
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
|
|
}
|
|
|
|
-/* Get extended controls. Allocates the helpers array if needed. */
|
|
+/*
|
|
+ * Get extended controls. Allocates the helpers array if needed.
|
|
+ *
|
|
+ * Note that v4l2_g_ext_ctrls_common() with 'which' set to
|
|
+ * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
|
|
+ * completed, and in that case valid_p_req is true for all controls.
|
|
+ */
|
|
static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
|
|
struct v4l2_ext_controls *cs,
|
|
struct video_device *vdev)
|
|
@@ -3839,9 +3825,10 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
|
|
struct v4l2_ctrl_helper *helpers = helper;
|
|
int ret;
|
|
int i, j;
|
|
- bool def_value;
|
|
+ bool is_default, is_request;
|
|
|
|
- def_value = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
|
|
+ is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
|
|
+ is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
|
|
|
|
cs->error_idx = cs->count;
|
|
cs->which = V4L2_CTRL_ID2WHICH(cs->which);
|
|
@@ -3867,11 +3854,9 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
|
|
ret = -EACCES;
|
|
|
|
for (i = 0; !ret && i < cs->count; i++) {
|
|
- int (*ctrl_to_user)(struct v4l2_ext_control *c,
|
|
- struct v4l2_ctrl *ctrl);
|
|
struct v4l2_ctrl *master;
|
|
-
|
|
- ctrl_to_user = def_value ? def_to_user : cur_to_user;
|
|
+ bool is_volatile = false;
|
|
+ u32 idx = i;
|
|
|
|
if (helpers[i].mref == NULL)
|
|
continue;
|
|
@@ -3881,31 +3866,48 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
|
|
|
|
v4l2_ctrl_lock(master);
|
|
|
|
- /* g_volatile_ctrl will update the new control values */
|
|
- if (!def_value &&
|
|
+ /*
|
|
+ * g_volatile_ctrl will update the new control values.
|
|
+ * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
|
|
+ * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
|
|
+ * it is v4l2_ctrl_request_complete() that copies the
|
|
+ * volatile controls at the time of request completion
|
|
+ * to the request, so you don't want to do that again.
|
|
+ */
|
|
+ if (!is_default && !is_request &&
|
|
((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
|
|
(master->has_volatiles && !is_cur_manual(master)))) {
|
|
for (j = 0; j < master->ncontrols; j++)
|
|
cur_to_new(master->cluster[j]);
|
|
ret = call_op(master, g_volatile_ctrl);
|
|
- ctrl_to_user = new_to_user;
|
|
+ is_volatile = true;
|
|
}
|
|
- /* If OK, then copy the current (for non-volatile controls)
|
|
- or the new (for volatile controls) control values to the
|
|
- caller */
|
|
- if (!ret) {
|
|
- u32 idx = i;
|
|
|
|
- do {
|
|
- if (helpers[idx].ref->req)
|
|
- ret = req_to_user(cs->controls + idx,
|
|
- helpers[idx].ref->req);
|
|
- else
|
|
- ret = ctrl_to_user(cs->controls + idx,
|
|
- helpers[idx].ref->ctrl);
|
|
- idx = helpers[idx].next;
|
|
- } while (!ret && idx);
|
|
+ if (ret) {
|
|
+ v4l2_ctrl_unlock(master);
|
|
+ break;
|
|
}
|
|
+
|
|
+ /*
|
|
+ * Copy the default value (if is_default is true), the
|
|
+ * request value (if is_request is true and p_req is valid),
|
|
+ * the new volatile value (if is_volatile is true) or the
|
|
+ * current value.
|
|
+ */
|
|
+ do {
|
|
+ struct v4l2_ctrl_ref *ref = helpers[idx].ref;
|
|
+
|
|
+ if (is_default)
|
|
+ ret = def_to_user(cs->controls + idx, ref->ctrl);
|
|
+ else if (is_request && ref->valid_p_req)
|
|
+ ret = req_to_user(cs->controls + idx, ref);
|
|
+ else if (is_volatile)
|
|
+ ret = new_to_user(cs->controls + idx, ref->ctrl);
|
|
+ else
|
|
+ ret = cur_to_user(cs->controls + idx, ref->ctrl);
|
|
+ idx = helpers[idx].next;
|
|
+ } while (!ret && idx);
|
|
+
|
|
v4l2_ctrl_unlock(master);
|
|
}
|
|
|
|
@@ -4548,8 +4550,6 @@ void v4l2_ctrl_request_complete(struct media_request *req,
|
|
unsigned int i;
|
|
|
|
if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
|
|
- ref->req = ref;
|
|
-
|
|
v4l2_ctrl_lock(master);
|
|
/* g_volatile_ctrl will update the current control values */
|
|
for (i = 0; i < master->ncontrols; i++)
|
|
@@ -4559,21 +4559,12 @@ void v4l2_ctrl_request_complete(struct media_request *req,
|
|
v4l2_ctrl_unlock(master);
|
|
continue;
|
|
}
|
|
- if (ref->req == ref)
|
|
+ if (ref->valid_p_req)
|
|
continue;
|
|
|
|
+ /* Copy the current control value into the request */
|
|
v4l2_ctrl_lock(ctrl);
|
|
- if (ref->req) {
|
|
- ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
|
|
- } else {
|
|
- ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
|
|
- /*
|
|
- * Set ref->req to ensure that when userspace wants to
|
|
- * obtain the controls of this request it will take
|
|
- * this value and not the current value of the control.
|
|
- */
|
|
- ref->req = ref;
|
|
- }
|
|
+ cur_to_req(ref);
|
|
v4l2_ctrl_unlock(ctrl);
|
|
}
|
|
|
|
@@ -4637,7 +4628,7 @@ int v4l2_ctrl_request_setup(struct media_request *req,
|
|
struct v4l2_ctrl_ref *r =
|
|
find_ref(hdl, master->cluster[i]->id);
|
|
|
|
- if (r->req && r == r->req) {
|
|
+ if (r->valid_p_req) {
|
|
have_new_data = true;
|
|
break;
|
|
}
|
|
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
|
|
index 077d9ab112b71..d919ae9691e23 100644
|
|
--- a/drivers/mfd/arizona-irq.c
|
|
+++ b/drivers/mfd/arizona-irq.c
|
|
@@ -100,7 +100,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
|
|
unsigned int val;
|
|
int ret;
|
|
|
|
- ret = pm_runtime_get_sync(arizona->dev);
|
|
+ ret = pm_runtime_resume_and_get(arizona->dev);
|
|
if (ret < 0) {
|
|
dev_err(arizona->dev, "Failed to resume device: %d\n", ret);
|
|
return IRQ_NONE;
|
|
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
|
|
index 3781d0bb77865..783a14af18e26 100644
|
|
--- a/drivers/mfd/da9063-i2c.c
|
|
+++ b/drivers/mfd/da9063-i2c.c
|
|
@@ -442,6 +442,16 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
|
|
return ret;
|
|
}
|
|
|
|
+ /* If SMBus is not available and only I2C is possible, enter I2C mode */
|
|
+ if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) {
|
|
+ ret = regmap_clear_bits(da9063->regmap, DA9063_REG_CONFIG_J,
|
|
+ DA9063_TWOWIRE_TO);
|
|
+ if (ret < 0) {
|
|
+ dev_err(da9063->dev, "Failed to set Two-Wire Bus Mode.\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+ }
|
|
+
|
|
return da9063_device_init(da9063, i2c->irq);
|
|
}
|
|
|
|
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
|
|
index 42e27a2982180..3246598e4d7e3 100644
|
|
--- a/drivers/mmc/core/block.c
|
|
+++ b/drivers/mmc/core/block.c
|
|
@@ -571,6 +571,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
|
main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
|
|
}
|
|
|
|
+ /*
|
|
+ * Make sure to update CACHE_CTRL in case it was changed. The cache
|
|
+ * will get turned back on if the card is re-initialized, e.g.
|
|
+ * suspend/resume or hw reset in recovery.
|
|
+ */
|
|
+ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
|
|
+ (cmd.opcode == MMC_SWITCH)) {
|
|
+ u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
|
|
+
|
|
+ card->ext_csd.cache_ctrl = value;
|
|
+ }
|
|
+
|
|
/*
|
|
* According to the SD specs, some commands require a delay after
|
|
* issuing the command.
|
|
@@ -2221,6 +2233,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
case MMC_ISSUE_ASYNC:
|
|
switch (req_op(req)) {
|
|
case REQ_OP_FLUSH:
|
|
+ if (!mmc_cache_enabled(host)) {
|
|
+ blk_mq_end_request(req, BLK_STS_OK);
|
|
+ return MMC_REQ_FINISHED;
|
|
+ }
|
|
ret = mmc_blk_cqe_issue_flush(mq, req);
|
|
break;
|
|
case REQ_OP_READ:
|
|
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
|
|
index 19f1ee57fb345..6089f4b46ada6 100644
|
|
--- a/drivers/mmc/core/core.c
|
|
+++ b/drivers/mmc/core/core.c
|
|
@@ -1204,7 +1204,7 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
|
|
|
|
err = mmc_wait_for_cmd(host, &cmd, 0);
|
|
if (err)
|
|
- return err;
|
|
+ goto power_cycle;
|
|
|
|
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
|
|
return -EIO;
|
|
@@ -2366,80 +2366,6 @@ void mmc_stop_host(struct mmc_host *host)
|
|
mmc_release_host(host);
|
|
}
|
|
|
|
-#ifdef CONFIG_PM_SLEEP
|
|
-/* Do the card removal on suspend if card is assumed removeable
|
|
- * Do that in pm notifier while userspace isn't yet frozen, so we will be able
|
|
- to sync the card.
|
|
-*/
|
|
-static int mmc_pm_notify(struct notifier_block *notify_block,
|
|
- unsigned long mode, void *unused)
|
|
-{
|
|
- struct mmc_host *host = container_of(
|
|
- notify_block, struct mmc_host, pm_notify);
|
|
- unsigned long flags;
|
|
- int err = 0;
|
|
-
|
|
- switch (mode) {
|
|
- case PM_HIBERNATION_PREPARE:
|
|
- case PM_SUSPEND_PREPARE:
|
|
- case PM_RESTORE_PREPARE:
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
- host->rescan_disable = 1;
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
- cancel_delayed_work_sync(&host->detect);
|
|
-
|
|
- if (!host->bus_ops)
|
|
- break;
|
|
-
|
|
- /* Validate prerequisites for suspend */
|
|
- if (host->bus_ops->pre_suspend)
|
|
- err = host->bus_ops->pre_suspend(host);
|
|
- if (!err)
|
|
- break;
|
|
-
|
|
- if (!mmc_card_is_removable(host)) {
|
|
- dev_warn(mmc_dev(host),
|
|
- "pre_suspend failed for non-removable host: "
|
|
- "%d\n", err);
|
|
- /* Avoid removing non-removable hosts */
|
|
- break;
|
|
- }
|
|
-
|
|
- /* Calling bus_ops->remove() with a claimed host can deadlock */
|
|
- host->bus_ops->remove(host);
|
|
- mmc_claim_host(host);
|
|
- mmc_detach_bus(host);
|
|
- mmc_power_off(host);
|
|
- mmc_release_host(host);
|
|
- host->pm_flags = 0;
|
|
- break;
|
|
-
|
|
- case PM_POST_SUSPEND:
|
|
- case PM_POST_HIBERNATION:
|
|
- case PM_POST_RESTORE:
|
|
-
|
|
- spin_lock_irqsave(&host->lock, flags);
|
|
- host->rescan_disable = 0;
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
- _mmc_detect_change(host, 0, false);
|
|
-
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-void mmc_register_pm_notifier(struct mmc_host *host)
|
|
-{
|
|
- host->pm_notify.notifier_call = mmc_pm_notify;
|
|
- register_pm_notifier(&host->pm_notify);
|
|
-}
|
|
-
|
|
-void mmc_unregister_pm_notifier(struct mmc_host *host)
|
|
-{
|
|
- unregister_pm_notifier(&host->pm_notify);
|
|
-}
|
|
-#endif
|
|
-
|
|
static int __init mmc_init(void)
|
|
{
|
|
int ret;
|
|
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
|
|
index 575ac0257af2f..db3c9c68875d8 100644
|
|
--- a/drivers/mmc/core/core.h
|
|
+++ b/drivers/mmc/core/core.h
|
|
@@ -29,6 +29,7 @@ struct mmc_bus_ops {
|
|
int (*shutdown)(struct mmc_host *);
|
|
int (*hw_reset)(struct mmc_host *);
|
|
int (*sw_reset)(struct mmc_host *);
|
|
+ bool (*cache_enabled)(struct mmc_host *);
|
|
};
|
|
|
|
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
|
|
@@ -93,14 +94,6 @@ int mmc_execute_tuning(struct mmc_card *card);
|
|
int mmc_hs200_to_hs400(struct mmc_card *card);
|
|
int mmc_hs400_to_hs200(struct mmc_card *card);
|
|
|
|
-#ifdef CONFIG_PM_SLEEP
|
|
-void mmc_register_pm_notifier(struct mmc_host *host);
|
|
-void mmc_unregister_pm_notifier(struct mmc_host *host);
|
|
-#else
|
|
-static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
|
|
-static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
|
|
-#endif
|
|
-
|
|
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
|
|
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
|
|
|
|
@@ -171,4 +164,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
|
|
host->ops->post_req(host, mrq, err);
|
|
}
|
|
|
|
+static inline bool mmc_cache_enabled(struct mmc_host *host)
|
|
+{
|
|
+ if (host->bus_ops->cache_enabled)
|
|
+ return host->bus_ops->cache_enabled(host);
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
#endif
|
|
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
|
|
index 96b2ca1f1b06d..fa59e6f4801c1 100644
|
|
--- a/drivers/mmc/core/host.c
|
|
+++ b/drivers/mmc/core/host.c
|
|
@@ -34,6 +34,42 @@
|
|
|
|
static DEFINE_IDA(mmc_host_ida);
|
|
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+static int mmc_host_class_prepare(struct device *dev)
|
|
+{
|
|
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
|
|
+
|
|
+ /*
|
|
+ * It's safe to access the bus_ops pointer, as both userspace and the
|
|
+ * workqueue for detecting cards are frozen at this point.
|
|
+ */
|
|
+ if (!host->bus_ops)
|
|
+ return 0;
|
|
+
|
|
+ /* Validate conditions for system suspend. */
|
|
+ if (host->bus_ops->pre_suspend)
|
|
+ return host->bus_ops->pre_suspend(host);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void mmc_host_class_complete(struct device *dev)
|
|
+{
|
|
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
|
|
+
|
|
+ _mmc_detect_change(host, 0, false);
|
|
+}
|
|
+
|
|
+static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
|
|
+ .prepare = mmc_host_class_prepare,
|
|
+ .complete = mmc_host_class_complete,
|
|
+};
|
|
+
|
|
+#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
|
|
+#else
|
|
+#define MMC_HOST_CLASS_DEV_PM_OPS NULL
|
|
+#endif
|
|
+
|
|
static void mmc_host_classdev_release(struct device *dev)
|
|
{
|
|
struct mmc_host *host = cls_dev_to_mmc_host(dev);
|
|
@@ -45,6 +81,7 @@ static void mmc_host_classdev_release(struct device *dev)
|
|
static struct class mmc_host_class = {
|
|
.name = "mmc_host",
|
|
.dev_release = mmc_host_classdev_release,
|
|
+ .pm = MMC_HOST_CLASS_DEV_PM_OPS,
|
|
};
|
|
|
|
int mmc_register_host_class(void)
|
|
@@ -493,8 +530,6 @@ int mmc_add_host(struct mmc_host *host)
|
|
#endif
|
|
|
|
mmc_start_host(host);
|
|
- mmc_register_pm_notifier(host);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
@@ -510,7 +545,6 @@ EXPORT_SYMBOL(mmc_add_host);
|
|
*/
|
|
void mmc_remove_host(struct mmc_host *host)
|
|
{
|
|
- mmc_unregister_pm_notifier(host);
|
|
mmc_stop_host(host);
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
|
|
index 9ce34e8800335..7494d595035e3 100644
|
|
--- a/drivers/mmc/core/mmc.c
|
|
+++ b/drivers/mmc/core/mmc.c
|
|
@@ -2033,6 +2033,12 @@ static void mmc_detect(struct mmc_host *host)
|
|
}
|
|
}
|
|
|
|
+static bool _mmc_cache_enabled(struct mmc_host *host)
|
|
+{
|
|
+ return host->card->ext_csd.cache_size > 0 &&
|
|
+ host->card->ext_csd.cache_ctrl & 1;
|
|
+}
|
|
+
|
|
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
|
|
{
|
|
int err = 0;
|
|
@@ -2212,6 +2218,7 @@ static const struct mmc_bus_ops mmc_ops = {
|
|
.alive = mmc_alive,
|
|
.shutdown = mmc_shutdown,
|
|
.hw_reset = _mmc_hw_reset,
|
|
+ .cache_enabled = _mmc_cache_enabled,
|
|
};
|
|
|
|
/*
|
|
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
|
|
index baa6314f69b41..ebad70e4481af 100644
|
|
--- a/drivers/mmc/core/mmc_ops.c
|
|
+++ b/drivers/mmc/core/mmc_ops.c
|
|
@@ -988,9 +988,7 @@ int mmc_flush_cache(struct mmc_card *card)
|
|
{
|
|
int err = 0;
|
|
|
|
- if (mmc_card_mmc(card) &&
|
|
- (card->ext_csd.cache_size > 0) &&
|
|
- (card->ext_csd.cache_ctrl & 1)) {
|
|
+ if (mmc_cache_enabled(card->host)) {
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
|
EXT_CSD_FLUSH_CACHE, 1,
|
|
MMC_CACHE_FLUSH_TIMEOUT_MS);
|
|
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
|
|
index 6f054c449d467..636d4e3aa0e35 100644
|
|
--- a/drivers/mmc/core/sd.c
|
|
+++ b/drivers/mmc/core/sd.c
|
|
@@ -135,6 +135,9 @@ static int mmc_decode_csd(struct mmc_card *card)
|
|
csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
|
|
csd->erase_size <<= csd->write_blkbits - 9;
|
|
}
|
|
+
|
|
+ if (UNSTUFF_BITS(resp, 13, 1))
|
|
+ mmc_card_set_readonly(card);
|
|
break;
|
|
case 1:
|
|
/*
|
|
@@ -169,6 +172,9 @@ static int mmc_decode_csd(struct mmc_card *card)
|
|
csd->write_blkbits = 9;
|
|
csd->write_partial = 0;
|
|
csd->erase_size = 1;
|
|
+
|
|
+ if (UNSTUFF_BITS(resp, 13, 1))
|
|
+ mmc_card_set_readonly(card);
|
|
break;
|
|
default:
|
|
pr_err("%s: unrecognised CSD structure version %d\n",
|
|
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
|
|
index 694a212cbe25a..1b0853a82189a 100644
|
|
--- a/drivers/mmc/core/sdio.c
|
|
+++ b/drivers/mmc/core/sdio.c
|
|
@@ -985,21 +985,37 @@ out:
|
|
*/
|
|
static int mmc_sdio_pre_suspend(struct mmc_host *host)
|
|
{
|
|
- int i, err = 0;
|
|
+ int i;
|
|
|
|
for (i = 0; i < host->card->sdio_funcs; i++) {
|
|
struct sdio_func *func = host->card->sdio_func[i];
|
|
if (func && sdio_func_present(func) && func->dev.driver) {
|
|
const struct dev_pm_ops *pmops = func->dev.driver->pm;
|
|
- if (!pmops || !pmops->suspend || !pmops->resume) {
|
|
+ if (!pmops || !pmops->suspend || !pmops->resume)
|
|
/* force removal of entire card in that case */
|
|
- err = -ENOSYS;
|
|
- break;
|
|
- }
|
|
+ goto remove;
|
|
}
|
|
}
|
|
|
|
- return err;
|
|
+ return 0;
|
|
+
|
|
+remove:
|
|
+ if (!mmc_card_is_removable(host)) {
|
|
+ dev_warn(mmc_dev(host),
|
|
+ "missing suspend/resume ops for non-removable SDIO card\n");
|
|
+ /* Don't remove a non-removable card - we can't re-detect it. */
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* Remove the SDIO card and let it be re-detected later on. */
|
|
+ mmc_sdio_remove(host);
|
|
+ mmc_claim_host(host);
|
|
+ mmc_detach_bus(host);
|
|
+ mmc_power_off(host);
|
|
+ mmc_release_host(host);
|
|
+ host->pm_flags = 0;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
|
|
index f9780c65ebe98..f24623aac2dbe 100644
|
|
--- a/drivers/mmc/host/sdhci-brcmstb.c
|
|
+++ b/drivers/mmc/host/sdhci-brcmstb.c
|
|
@@ -199,7 +199,6 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host,
|
|
if (dma64) {
|
|
dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
|
|
cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
|
|
- cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
|
|
}
|
|
|
|
ret = cqhci_init(cq_host, host->mmc, dma64);
|
|
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
|
|
index a20459744d213..94327988da914 100644
|
|
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
|
|
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
|
|
@@ -1488,7 +1488,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
|
|
|
|
mmc_of_parse_voltage(np, &host->ocr_mask);
|
|
|
|
- if (esdhc_is_usdhc(imx_data)) {
|
|
+ if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pinctrl)) {
|
|
imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
|
|
ESDHC_PINCTRL_STATE_100MHZ);
|
|
imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
|
|
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
|
|
index 9552708846ca3..bf04a08eeba13 100644
|
|
--- a/drivers/mmc/host/sdhci-pci-core.c
|
|
+++ b/drivers/mmc/host/sdhci-pci-core.c
|
|
@@ -516,6 +516,7 @@ struct intel_host {
|
|
int drv_strength;
|
|
bool d3_retune;
|
|
bool rpm_retune_ok;
|
|
+ bool needs_pwr_off;
|
|
u32 glk_rx_ctrl1;
|
|
u32 glk_tun_val;
|
|
u32 active_ltr;
|
|
@@ -643,9 +644,25 @@ out:
|
|
static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
|
|
unsigned short vdd)
|
|
{
|
|
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
|
|
+ struct intel_host *intel_host = sdhci_pci_priv(slot);
|
|
int cntr;
|
|
u8 reg;
|
|
|
|
+ /*
|
|
+ * Bus power may control card power, but a full reset still may not
|
|
+ * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
|
|
+ * That might be needed to initialize correctly, if the card was left
|
|
+ * powered on previously.
|
|
+ */
|
|
+ if (intel_host->needs_pwr_off) {
|
|
+ intel_host->needs_pwr_off = false;
|
|
+ if (mode != MMC_POWER_OFF) {
|
|
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
|
|
+ usleep_range(10000, 12500);
|
|
+ }
|
|
+ }
|
|
+
|
|
sdhci_set_power(host, mode, vdd);
|
|
|
|
if (mode == MMC_POWER_OFF)
|
|
@@ -1135,6 +1152,14 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
|
|
return 0;
|
|
}
|
|
|
|
+static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
|
|
+{
|
|
+ struct intel_host *intel_host = sdhci_pci_priv(slot);
|
|
+ u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
|
|
+
|
|
+ intel_host->needs_pwr_off = reg & SDHCI_POWER_ON;
|
|
+}
|
|
+
|
|
static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
|
|
{
|
|
byt_probe_slot(slot);
|
|
@@ -1152,6 +1177,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
|
|
slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
|
|
slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
|
|
|
|
+ byt_needs_pwr_off(slot);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -1903,6 +1930,8 @@ static const struct pci_device_id pci_ids[] = {
|
|
SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
|
|
SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
|
|
SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
|
|
+ SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc),
|
|
+ SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd),
|
|
SDHCI_PCI_DEVICE(O2, 8120, o2),
|
|
SDHCI_PCI_DEVICE(O2, 8220, o2),
|
|
SDHCI_PCI_DEVICE(O2, 8221, o2),
|
|
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
|
|
index d0ed232af0eb8..8f90c4163bb5c 100644
|
|
--- a/drivers/mmc/host/sdhci-pci.h
|
|
+++ b/drivers/mmc/host/sdhci-pci.h
|
|
@@ -57,6 +57,8 @@
|
|
#define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5
|
|
#define PCI_DEVICE_ID_INTEL_JSL_EMMC 0x4dc4
|
|
#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8
|
|
+#define PCI_DEVICE_ID_INTEL_LKF_EMMC 0x98c4
|
|
+#define PCI_DEVICE_ID_INTEL_LKF_SD 0x98f8
|
|
|
|
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
|
|
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
|
|
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
|
|
index 41d193fa77bbf..8ea9132ebca4e 100644
|
|
--- a/drivers/mmc/host/sdhci-tegra.c
|
|
+++ b/drivers/mmc/host/sdhci-tegra.c
|
|
@@ -119,6 +119,10 @@
|
|
/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
|
|
#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
|
|
|
|
+#define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
|
|
+ SDHCI_TRNS_BLK_CNT_EN | \
|
|
+ SDHCI_TRNS_DMA)
|
|
+
|
|
struct sdhci_tegra_soc_data {
|
|
const struct sdhci_pltfm_data *pdata;
|
|
u64 dma_mask;
|
|
@@ -1156,6 +1160,7 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
|
|
static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
|
|
{
|
|
struct mmc_host *mmc = cq_host->mmc;
|
|
+ struct sdhci_host *host = mmc_priv(mmc);
|
|
u8 ctrl;
|
|
ktime_t timeout;
|
|
bool timed_out;
|
|
@@ -1170,6 +1175,7 @@ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
|
|
*/
|
|
if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
|
|
cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
|
|
+ sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
|
|
sdhci_cqe_enable(mmc);
|
|
writel(val, cq_host->mmio + reg);
|
|
timeout = ktime_add_us(ktime_get(), 50);
|
|
@@ -1205,6 +1211,7 @@ static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
|
|
static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
|
|
{
|
|
struct cqhci_host *cq_host = mmc->cqe_private;
|
|
+ struct sdhci_host *host = mmc_priv(mmc);
|
|
u32 val;
|
|
|
|
/*
|
|
@@ -1218,6 +1225,7 @@ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
|
|
if (val & CQHCI_ENABLE)
|
|
cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
|
|
CQHCI_CFG);
|
|
+ sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
|
|
sdhci_cqe_enable(mmc);
|
|
if (val & CQHCI_ENABLE)
|
|
cqhci_writel(cq_host, val, CQHCI_CFG);
|
|
@@ -1281,12 +1289,36 @@ static void tegra_sdhci_set_timeout(struct sdhci_host *host,
|
|
__sdhci_set_timeout(host, cmd);
|
|
}
|
|
|
|
+static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
|
|
+{
|
|
+ struct cqhci_host *cq_host = mmc->cqe_private;
|
|
+ u32 reg;
|
|
+
|
|
+ reg = cqhci_readl(cq_host, CQHCI_CFG);
|
|
+ reg |= CQHCI_ENABLE;
|
|
+ cqhci_writel(cq_host, reg, CQHCI_CFG);
|
|
+}
|
|
+
|
|
+static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
|
|
+{
|
|
+ struct cqhci_host *cq_host = mmc->cqe_private;
|
|
+ struct sdhci_host *host = mmc_priv(mmc);
|
|
+ u32 reg;
|
|
+
|
|
+ reg = cqhci_readl(cq_host, CQHCI_CFG);
|
|
+ reg &= ~CQHCI_ENABLE;
|
|
+ cqhci_writel(cq_host, reg, CQHCI_CFG);
|
|
+ sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
|
|
+}
|
|
+
|
|
static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
|
|
.write_l = tegra_cqhci_writel,
|
|
.enable = sdhci_tegra_cqe_enable,
|
|
.disable = sdhci_cqe_disable,
|
|
.dumpregs = sdhci_tegra_dumpregs,
|
|
.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
|
|
+ .pre_enable = sdhci_tegra_cqe_pre_enable,
|
|
+ .post_disable = sdhci_tegra_cqe_post_disable,
|
|
};
|
|
|
|
static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
|
|
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
|
|
index 646823ddd3171..130fd2ded78ad 100644
|
|
--- a/drivers/mmc/host/sdhci.c
|
|
+++ b/drivers/mmc/host/sdhci.c
|
|
@@ -2997,6 +2997,37 @@ static bool sdhci_request_done(struct sdhci_host *host)
|
|
return true;
|
|
}
|
|
|
|
+ /*
|
|
+ * The controller needs a reset of internal state machines
|
|
+ * upon error conditions.
|
|
+ */
|
|
+ if (sdhci_needs_reset(host, mrq)) {
|
|
+ /*
|
|
+ * Do not finish until command and data lines are available for
|
|
+ * reset. Note there can only be one other mrq, so it cannot
|
|
+ * also be in mrqs_done, otherwise host->cmd and host->data_cmd
|
|
+ * would both be null.
|
|
+ */
|
|
+ if (host->cmd || host->data_cmd) {
|
|
+ spin_unlock_irqrestore(&host->lock, flags);
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Some controllers need this kick or reset won't work here */
|
|
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
|
|
+ /* This is to force an update */
|
|
+ host->ops->set_clock(host, host->clock);
|
|
+
|
|
+ /*
|
|
+ * Spec says we should do both at the same time, but Ricoh
|
|
+ * controllers do not like that.
|
|
+ */
|
|
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
|
|
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
|
|
+
|
|
+ host->pending_reset = false;
|
|
+ }
|
|
+
|
|
/*
|
|
* Always unmap the data buffers if they were mapped by
|
|
* sdhci_prepare_data() whenever we finish with a request.
|
|
@@ -3060,35 +3091,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
|
|
}
|
|
}
|
|
|
|
- /*
|
|
- * The controller needs a reset of internal state machines
|
|
- * upon error conditions.
|
|
- */
|
|
- if (sdhci_needs_reset(host, mrq)) {
|
|
- /*
|
|
- * Do not finish until command and data lines are available for
|
|
- * reset. Note there can only be one other mrq, so it cannot
|
|
- * also be in mrqs_done, otherwise host->cmd and host->data_cmd
|
|
- * would both be null.
|
|
- */
|
|
- if (host->cmd || host->data_cmd) {
|
|
- spin_unlock_irqrestore(&host->lock, flags);
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Some controllers need this kick or reset won't work here */
|
|
- if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
|
|
- /* This is to force an update */
|
|
- host->ops->set_clock(host, host->clock);
|
|
-
|
|
- /* Spec says we should do both at the same time, but Ricoh
|
|
- controllers do not like that. */
|
|
- sdhci_do_reset(host, SDHCI_RESET_CMD);
|
|
- sdhci_do_reset(host, SDHCI_RESET_DATA);
|
|
-
|
|
- host->pending_reset = false;
|
|
- }
|
|
-
|
|
host->mrqs_done[i] = NULL;
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
|
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
|
|
index a6cd16771d4e9..73d5bebd0f33d 100644
|
|
--- a/drivers/mmc/host/uniphier-sd.c
|
|
+++ b/drivers/mmc/host/uniphier-sd.c
|
|
@@ -637,7 +637,7 @@ static int uniphier_sd_probe(struct platform_device *pdev)
|
|
|
|
ret = tmio_mmc_host_probe(host);
|
|
if (ret)
|
|
- goto free_host;
|
|
+ goto disable_clk;
|
|
|
|
ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
|
|
dev_name(dev), host);
|
|
@@ -648,6 +648,8 @@ static int uniphier_sd_probe(struct platform_device *pdev)
|
|
|
|
remove_host:
|
|
tmio_mmc_host_remove(host);
|
|
+disable_clk:
|
|
+ uniphier_sd_clk_disable(host);
|
|
free_host:
|
|
tmio_mmc_host_free(host);
|
|
|
|
@@ -660,6 +662,7 @@ static int uniphier_sd_remove(struct platform_device *pdev)
|
|
|
|
tmio_mmc_host_remove(host);
|
|
uniphier_sd_clk_disable(host);
|
|
+ tmio_mmc_host_free(host);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
|
|
index a35450002284c..58782cfaf71cf 100644
|
|
--- a/drivers/mtd/maps/physmap-bt1-rom.c
|
|
+++ b/drivers/mtd/maps/physmap-bt1-rom.c
|
|
@@ -79,7 +79,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map,
|
|
if (shift) {
|
|
chunk = min_t(ssize_t, 4 - shift, len);
|
|
data = readl_relaxed(src - shift);
|
|
- memcpy(to, &data + shift, chunk);
|
|
+ memcpy(to, (char *)&data + shift, chunk);
|
|
src += chunk;
|
|
to += chunk;
|
|
len -= chunk;
|
|
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
|
|
index e6ceec8f50dce..8aab1017b4600 100644
|
|
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
|
|
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
|
|
@@ -883,10 +883,12 @@ static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
|
|
NULL, 0,
|
|
chip->ecc.strength);
|
|
|
|
- if (ret >= 0)
|
|
+ if (ret >= 0) {
|
|
+ mtd->ecc_stats.corrected += ret;
|
|
max_bitflips = max(ret, max_bitflips);
|
|
- else
|
|
+ } else {
|
|
mtd->ecc_stats.failed++;
|
|
+ }
|
|
|
|
databuf += chip->ecc.size;
|
|
eccbuf += chip->ecc.bytes;
|
|
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
|
|
index 61d932c1b7180..17f63f95f4a28 100644
|
|
--- a/drivers/mtd/nand/spi/core.c
|
|
+++ b/drivers/mtd/nand/spi/core.c
|
|
@@ -1263,12 +1263,14 @@ static const struct spi_device_id spinand_ids[] = {
|
|
{ .name = "spi-nand" },
|
|
{ /* sentinel */ },
|
|
};
|
|
+MODULE_DEVICE_TABLE(spi, spinand_ids);
|
|
|
|
#ifdef CONFIG_OF
|
|
static const struct of_device_id spinand_of_ids[] = {
|
|
{ .compatible = "spi-nand" },
|
|
{ /* sentinel */ },
|
|
};
|
|
+MODULE_DEVICE_TABLE(of, spinand_of_ids);
|
|
#endif
|
|
|
|
static struct spi_mem_driver spinand_drv = {
|
|
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
|
|
index b17faccc95c43..ac94f4336f273 100644
|
|
--- a/drivers/mtd/spi-nor/core.c
|
|
+++ b/drivers/mtd/spi-nor/core.c
|
|
@@ -3264,6 +3264,37 @@ static void spi_nor_resume(struct mtd_info *mtd)
|
|
dev_err(dev, "resume() failed\n");
|
|
}
|
|
|
|
+static int spi_nor_get_device(struct mtd_info *mtd)
|
|
+{
|
|
+ struct mtd_info *master = mtd_get_master(mtd);
|
|
+ struct spi_nor *nor = mtd_to_spi_nor(master);
|
|
+ struct device *dev;
|
|
+
|
|
+ if (nor->spimem)
|
|
+ dev = nor->spimem->spi->controller->dev.parent;
|
|
+ else
|
|
+ dev = nor->dev;
|
|
+
|
|
+ if (!try_module_get(dev->driver->owner))
|
|
+ return -ENODEV;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void spi_nor_put_device(struct mtd_info *mtd)
|
|
+{
|
|
+ struct mtd_info *master = mtd_get_master(mtd);
|
|
+ struct spi_nor *nor = mtd_to_spi_nor(master);
|
|
+ struct device *dev;
|
|
+
|
|
+ if (nor->spimem)
|
|
+ dev = nor->spimem->spi->controller->dev.parent;
|
|
+ else
|
|
+ dev = nor->dev;
|
|
+
|
|
+ module_put(dev->driver->owner);
|
|
+}
|
|
+
|
|
void spi_nor_restore(struct spi_nor *nor)
|
|
{
|
|
/* restore the addressing mode */
|
|
@@ -3458,6 +3489,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
|
|
mtd->_read = spi_nor_read;
|
|
mtd->_suspend = spi_nor_suspend;
|
|
mtd->_resume = spi_nor_resume;
|
|
+ mtd->_get_device = spi_nor_get_device;
|
|
+ mtd->_put_device = spi_nor_put_device;
|
|
|
|
if (nor->params->locking_ops) {
|
|
mtd->_lock = spi_nor_lock;
|
|
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
|
|
index 9203abaac2297..662b212787d4d 100644
|
|
--- a/drivers/mtd/spi-nor/macronix.c
|
|
+++ b/drivers/mtd/spi-nor/macronix.c
|
|
@@ -73,9 +73,6 @@ static const struct flash_info macronix_parts[] = {
|
|
SECT_4K | SPI_NOR_DUAL_READ |
|
|
SPI_NOR_QUAD_READ) },
|
|
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
|
|
- { "mx25l51245g", INFO(0xc2201a, 0, 64 * 1024, 1024,
|
|
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
|
|
- SPI_NOR_4B_OPCODES) },
|
|
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
|
|
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
|
|
SPI_NOR_4B_OPCODES) },
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
|
|
index 7846a21555ef8..1f6bc0c7e91dd 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
|
|
@@ -535,6 +535,16 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
|
|
u16 erif_index = 0;
|
|
int err;
|
|
|
|
+ /* Add the eRIF */
|
|
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
|
|
+ erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
|
|
+ err = mr->mr_ops->route_erif_add(mlxsw_sp,
|
|
+ rve->mr_route->route_priv,
|
|
+ erif_index);
|
|
+ if (err)
|
|
+ return err;
|
|
+ }
|
|
+
|
|
/* Update the route action, as the new eVIF can be a tunnel or a pimreg
|
|
* device which will require updating the action.
|
|
*/
|
|
@@ -544,17 +554,7 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
|
|
rve->mr_route->route_priv,
|
|
route_action);
|
|
if (err)
|
|
- return err;
|
|
- }
|
|
-
|
|
- /* Add the eRIF */
|
|
- if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
|
|
- erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
|
|
- err = mr->mr_ops->route_erif_add(mlxsw_sp,
|
|
- rve->mr_route->route_priv,
|
|
- erif_index);
|
|
- if (err)
|
|
- goto err_route_erif_add;
|
|
+ goto err_route_action_update;
|
|
}
|
|
|
|
/* Update the minimum MTU */
|
|
@@ -572,14 +572,14 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
|
|
return 0;
|
|
|
|
err_route_min_mtu_update:
|
|
- if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
|
|
- mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
|
|
- erif_index);
|
|
-err_route_erif_add:
|
|
if (route_action != rve->mr_route->route_action)
|
|
mr->mr_ops->route_action_update(mlxsw_sp,
|
|
rve->mr_route->route_priv,
|
|
rve->mr_route->route_action);
|
|
+err_route_action_update:
|
|
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
|
|
+ mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
|
|
+ erif_index);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
|
|
index d75cf5ff5686a..49df02ecee912 100644
|
|
--- a/drivers/net/ethernet/sfc/farch.c
|
|
+++ b/drivers/net/ethernet/sfc/farch.c
|
|
@@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
|
|
/* Transmit completion */
|
|
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
|
|
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
|
|
- tx_queue = efx_channel_get_tx_queue(
|
|
- channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
|
|
+ tx_queue = channel->tx_queue +
|
|
+ (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
|
|
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
|
|
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
|
|
/* Rewrite the FIFO write pointer */
|
|
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
|
|
- tx_queue = efx_channel_get_tx_queue(
|
|
- channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
|
|
+ tx_queue = channel->tx_queue +
|
|
+ (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
|
|
|
|
netif_tx_lock(efx->net_dev);
|
|
efx_farch_notify_tx_desc(tx_queue);
|
|
@@ -1081,16 +1081,16 @@ static void
|
|
efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
|
|
{
|
|
struct efx_tx_queue *tx_queue;
|
|
+ struct efx_channel *channel;
|
|
int qid;
|
|
|
|
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
|
|
if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
|
|
- tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
|
|
- qid % EFX_MAX_TXQ_PER_CHANNEL);
|
|
- if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
|
|
+ channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
|
|
+ tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
|
|
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
|
|
efx_farch_magic_event(tx_queue->channel,
|
|
EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
|
|
- }
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
|
|
index 592e9dadcb556..3a243c5326471 100644
|
|
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
|
|
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
|
|
@@ -1513,7 +1513,7 @@ static int rsi_restore(struct device *dev)
|
|
}
|
|
static const struct dev_pm_ops rsi_pm_ops = {
|
|
.suspend = rsi_suspend,
|
|
- .resume = rsi_resume,
|
|
+ .resume_noirq = rsi_resume,
|
|
.freeze = rsi_freeze,
|
|
.thaw = rsi_thaw,
|
|
.restore = rsi_restore,
|
|
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
|
|
index 682854e0e079d..4845d12e374ac 100644
|
|
--- a/drivers/nvme/target/discovery.c
|
|
+++ b/drivers/nvme/target/discovery.c
|
|
@@ -178,12 +178,14 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
|
|
if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
|
|
req->error_loc =
|
|
offsetof(struct nvme_get_log_page_command, lid);
|
|
- status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
|
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
|
goto out;
|
|
}
|
|
|
|
/* Spec requires dword aligned offsets */
|
|
if (offset & 0x3) {
|
|
+ req->error_loc =
|
|
+ offsetof(struct nvme_get_log_page_command, lpo);
|
|
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
|
goto out;
|
|
}
|
|
@@ -250,7 +252,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
|
|
|
|
if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
|
|
req->error_loc = offsetof(struct nvme_identify, cns);
|
|
- status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
|
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
|
|
index bcd1cd9ba8c80..fcf935bf6f5e2 100644
|
|
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
|
|
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
|
|
@@ -707,6 +707,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
|
}
|
|
}
|
|
|
|
+ dw_pcie_iatu_detect(pci);
|
|
+
|
|
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
|
if (!res)
|
|
return -EINVAL;
|
|
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
|
|
index 8a84c005f32bd..e14e6d8661d3f 100644
|
|
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
|
|
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
|
|
@@ -421,6 +421,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
|
if (ret)
|
|
goto err_free_msi;
|
|
}
|
|
+ dw_pcie_iatu_detect(pci);
|
|
|
|
dw_pcie_setup_rc(pp);
|
|
dw_pcie_msi_init(pp);
|
|
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
|
|
index 645fa18923751..6d709dbd9deb8 100644
|
|
--- a/drivers/pci/controller/dwc/pcie-designware.c
|
|
+++ b/drivers/pci/controller/dwc/pcie-designware.c
|
|
@@ -610,11 +610,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
|
|
pci->num_ob_windows = ob;
|
|
}
|
|
|
|
-void dw_pcie_setup(struct dw_pcie *pci)
|
|
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
|
|
{
|
|
- u32 val;
|
|
struct device *dev = pci->dev;
|
|
- struct device_node *np = dev->of_node;
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
if (pci->version >= 0x480A || (!pci->version &&
|
|
@@ -643,6 +641,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
|
|
|
|
dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
|
|
pci->num_ob_windows, pci->num_ib_windows);
|
|
+}
|
|
+
|
|
+void dw_pcie_setup(struct dw_pcie *pci)
|
|
+{
|
|
+ u32 val;
|
|
+ struct device *dev = pci->dev;
|
|
+ struct device_node *np = dev->of_node;
|
|
|
|
if (pci->link_gen > 0)
|
|
dw_pcie_link_set_max_speed(pci, pci->link_gen);
|
|
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
|
|
index 0207840756c47..ba27494602ac8 100644
|
|
--- a/drivers/pci/controller/dwc/pcie-designware.h
|
|
+++ b/drivers/pci/controller/dwc/pcie-designware.h
|
|
@@ -304,6 +304,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
|
void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
|
|
enum dw_pcie_region_type type);
|
|
void dw_pcie_setup(struct dw_pcie *pci);
|
|
+void dw_pcie_iatu_detect(struct dw_pcie *pci);
|
|
|
|
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
|
|
{
|
|
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
|
|
index 9449dfde2841e..5ddc27d9a275e 100644
|
|
--- a/drivers/pci/pci.c
|
|
+++ b/drivers/pci/pci.c
|
|
@@ -1870,20 +1870,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
|
|
int err;
|
|
int i, bars = 0;
|
|
|
|
- /*
|
|
- * Power state could be unknown at this point, either due to a fresh
|
|
- * boot or a device removal call. So get the current power state
|
|
- * so that things like MSI message writing will behave as expected
|
|
- * (e.g. if the device really is in D0 at enable time).
|
|
- */
|
|
- if (dev->pm_cap) {
|
|
- u16 pmcsr;
|
|
- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
|
|
- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
|
|
- }
|
|
-
|
|
- if (atomic_inc_return(&dev->enable_cnt) > 1)
|
|
+ if (atomic_inc_return(&dev->enable_cnt) > 1) {
|
|
+ pci_update_current_state(dev, dev->current_state);
|
|
return 0; /* already enabled */
|
|
+ }
|
|
|
|
bridge = pci_upstream_bridge(dev);
|
|
if (bridge)
|
|
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
|
|
index 933bd8410fc2a..ef9676418c9f4 100644
|
|
--- a/drivers/perf/arm_pmu_platform.c
|
|
+++ b/drivers/perf/arm_pmu_platform.c
|
|
@@ -6,6 +6,7 @@
|
|
* Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
|
|
*/
|
|
#define pr_fmt(fmt) "hw perfevents: " fmt
|
|
+#define dev_fmt pr_fmt
|
|
|
|
#include <linux/bug.h>
|
|
#include <linux/cpumask.h>
|
|
@@ -100,10 +101,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
|
|
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
|
|
|
|
num_irqs = platform_irq_count(pdev);
|
|
- if (num_irqs < 0) {
|
|
- pr_err("unable to count PMU IRQs\n");
|
|
- return num_irqs;
|
|
- }
|
|
+ if (num_irqs < 0)
|
|
+ return dev_err_probe(&pdev->dev, num_irqs, "unable to count PMU IRQs\n");
|
|
|
|
/*
|
|
* In this case we have no idea which CPUs are covered by the PMU.
|
|
@@ -236,7 +235,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
|
|
|
|
ret = armpmu_register(pmu);
|
|
if (ret)
|
|
- goto out_free;
|
|
+ goto out_free_irqs;
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
|
|
index 9887f908f5401..812e5409d3595 100644
|
|
--- a/drivers/phy/ti/phy-twl4030-usb.c
|
|
+++ b/drivers/phy/ti/phy-twl4030-usb.c
|
|
@@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
|
|
|
|
usb_remove_phy(&twl->phy);
|
|
pm_runtime_get_sync(twl->dev);
|
|
- cancel_delayed_work(&twl->id_workaround_work);
|
|
+ cancel_delayed_work_sync(&twl->id_workaround_work);
|
|
device_remove_file(twl->dev, &dev_attr_vbus);
|
|
|
|
/* set transceiver mode to power on defaults */
|
|
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
|
|
index 3ea163498647f..135af21b46613 100644
|
|
--- a/drivers/pinctrl/pinctrl-ingenic.c
|
|
+++ b/drivers/pinctrl/pinctrl-ingenic.c
|
|
@@ -2089,26 +2089,48 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
|
|
enum pin_config_param param = pinconf_to_config_param(*config);
|
|
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
|
|
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
|
|
- bool pull;
|
|
+ unsigned int bias;
|
|
+ bool pull, pullup, pulldown;
|
|
|
|
- if (jzpc->info->version >= ID_JZ4770)
|
|
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
|
|
- else
|
|
- pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
|
|
+ if (jzpc->info->version >= ID_X1830) {
|
|
+ unsigned int half = PINS_PER_GPIO_CHIP / 2;
|
|
+ unsigned int idxh = (pin % half) * 2;
|
|
+
|
|
+ if (idx < half)
|
|
+ regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
|
|
+ X1830_GPIO_PEL, &bias);
|
|
+ else
|
|
+ regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
|
|
+ X1830_GPIO_PEH, &bias);
|
|
+
|
|
+ bias = (bias >> idxh) & (GPIO_PULL_UP | GPIO_PULL_DOWN);
|
|
+
|
|
+ pullup = (bias == GPIO_PULL_UP) && (jzpc->info->pull_ups[offt] & BIT(idx));
|
|
+ pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
|
|
+
|
|
+ } else {
|
|
+ if (jzpc->info->version >= ID_JZ4770)
|
|
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
|
|
+ else
|
|
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
|
|
+
|
|
+ pullup = pull && (jzpc->info->pull_ups[offt] & BIT(idx));
|
|
+ pulldown = pull && (jzpc->info->pull_downs[offt] & BIT(idx));
|
|
+ }
|
|
|
|
switch (param) {
|
|
case PIN_CONFIG_BIAS_DISABLE:
|
|
- if (pull)
|
|
+ if (pullup || pulldown)
|
|
return -EINVAL;
|
|
break;
|
|
|
|
case PIN_CONFIG_BIAS_PULL_UP:
|
|
- if (!pull || !(jzpc->info->pull_ups[offt] & BIT(idx)))
|
|
+ if (!pullup)
|
|
return -EINVAL;
|
|
break;
|
|
|
|
case PIN_CONFIG_BIAS_PULL_DOWN:
|
|
- if (!pull || !(jzpc->info->pull_downs[offt] & BIT(idx)))
|
|
+ if (!pulldown)
|
|
return -EINVAL;
|
|
break;
|
|
|
|
@@ -2126,7 +2148,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
|
|
if (jzpc->info->version >= ID_X1830) {
|
|
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
|
|
unsigned int half = PINS_PER_GPIO_CHIP / 2;
|
|
- unsigned int idxh = pin % half * 2;
|
|
+ unsigned int idxh = (pin % half) * 2;
|
|
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
|
|
|
|
if (idx < half) {
|
|
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
|
|
index b5888aeb4bcff..260d49dca1ad1 100644
|
|
--- a/drivers/platform/x86/intel_pmc_core.c
|
|
+++ b/drivers/platform/x86/intel_pmc_core.c
|
|
@@ -1186,9 +1186,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
|
|
* the platform BIOS enforces 24Mhz crystal to shutdown
|
|
* before PMC can assert SLP_S0#.
|
|
*/
|
|
+static bool xtal_ignore;
|
|
static int quirk_xtal_ignore(const struct dmi_system_id *id)
|
|
{
|
|
- struct pmc_dev *pmcdev = &pmc;
|
|
+ xtal_ignore = true;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
|
|
+{
|
|
u32 value;
|
|
|
|
value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
|
|
@@ -1197,7 +1203,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
|
|
/* Low Voltage Mode Enable */
|
|
value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
|
|
pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
|
|
- return 0;
|
|
}
|
|
|
|
static const struct dmi_system_id pmc_core_dmi_table[] = {
|
|
@@ -1212,6 +1217,14 @@ static const struct dmi_system_id pmc_core_dmi_table[] = {
|
|
{}
|
|
};
|
|
|
|
+static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
|
|
+{
|
|
+ dmi_check_system(pmc_core_dmi_table);
|
|
+
|
|
+ if (xtal_ignore)
|
|
+ pmc_core_xtal_ignore(pmcdev);
|
|
+}
|
|
+
|
|
static int pmc_core_probe(struct platform_device *pdev)
|
|
{
|
|
static bool device_initialized;
|
|
@@ -1253,7 +1266,7 @@ static int pmc_core_probe(struct platform_device *pdev)
|
|
mutex_init(&pmcdev->lock);
|
|
platform_set_drvdata(pdev, pmcdev);
|
|
pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
|
|
- dmi_check_system(pmc_core_dmi_table);
|
|
+ pmc_core_do_dmi_quirks(pmcdev);
|
|
|
|
/*
|
|
* On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
|
|
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
|
|
index a2a2d923e60cb..df1fc6c719f32 100644
|
|
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
|
|
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
|
|
@@ -21,12 +21,16 @@
|
|
#define PUNIT_MAILBOX_BUSY_BIT 31
|
|
|
|
/*
|
|
- * The average time to complete some commands is about 40us. The current
|
|
- * count is enough to satisfy 40us. But when the firmware is very busy, this
|
|
- * causes timeout occasionally. So increase to deal with some worst case
|
|
- * scenarios. Most of the command still complete in few us.
|
|
+ * The average time to complete mailbox commands is less than 40us. Most of
|
|
+ * the commands complete in few micro seconds. But the same firmware handles
|
|
+ * requests from all power management features.
|
|
+ * We can create a scenario where we flood the firmware with requests then
|
|
+ * the mailbox response can be delayed for 100s of micro seconds. So define
|
|
+ * two timeouts. One for average case and one for long.
|
|
+ * If the firmware is taking more than average, just call cond_resched().
|
|
*/
|
|
-#define OS_MAILBOX_RETRY_COUNT 100
|
|
+#define OS_MAILBOX_TIMEOUT_AVG_US 40
|
|
+#define OS_MAILBOX_TIMEOUT_MAX_US 1000
|
|
|
|
struct isst_if_device {
|
|
struct mutex mutex;
|
|
@@ -35,11 +39,13 @@ struct isst_if_device {
|
|
static int isst_if_mbox_cmd(struct pci_dev *pdev,
|
|
struct isst_if_mbox_cmd *mbox_cmd)
|
|
{
|
|
- u32 retries, data;
|
|
+ s64 tm_delta = 0;
|
|
+ ktime_t tm;
|
|
+ u32 data;
|
|
int ret;
|
|
|
|
/* Poll for rb bit == 0 */
|
|
- retries = OS_MAILBOX_RETRY_COUNT;
|
|
+ tm = ktime_get();
|
|
do {
|
|
ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
|
|
&data);
|
|
@@ -48,11 +54,14 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
|
|
|
|
if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
|
|
ret = -EBUSY;
|
|
+ tm_delta = ktime_us_delta(ktime_get(), tm);
|
|
+ if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
|
|
+ cond_resched();
|
|
continue;
|
|
}
|
|
ret = 0;
|
|
break;
|
|
- } while (--retries);
|
|
+ } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
|
|
|
|
if (ret)
|
|
return ret;
|
|
@@ -74,7 +83,8 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
|
|
return ret;
|
|
|
|
/* Poll for rb bit == 0 */
|
|
- retries = OS_MAILBOX_RETRY_COUNT;
|
|
+ tm_delta = 0;
|
|
+ tm = ktime_get();
|
|
do {
|
|
ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
|
|
&data);
|
|
@@ -83,6 +93,9 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
|
|
|
|
if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
|
|
ret = -EBUSY;
|
|
+ tm_delta = ktime_us_delta(ktime_get(), tm);
|
|
+ if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
|
|
+ cond_resched();
|
|
continue;
|
|
}
|
|
|
|
@@ -96,7 +109,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
|
|
mbox_cmd->resp_data = data;
|
|
ret = 0;
|
|
break;
|
|
- } while (--retries);
|
|
+ } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
|
|
index 315e0909e6a48..72a2bcf3ab32b 100644
|
|
--- a/drivers/power/supply/bq27xxx_battery.c
|
|
+++ b/drivers/power/supply/bq27xxx_battery.c
|
|
@@ -1631,27 +1631,6 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg)
|
|
return tval * 60;
|
|
}
|
|
|
|
-/*
|
|
- * Read an average power register.
|
|
- * Return < 0 if something fails.
|
|
- */
|
|
-static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
|
|
-{
|
|
- int tval;
|
|
-
|
|
- tval = bq27xxx_read(di, BQ27XXX_REG_AP, false);
|
|
- if (tval < 0) {
|
|
- dev_err(di->dev, "error reading average power register %02x: %d\n",
|
|
- BQ27XXX_REG_AP, tval);
|
|
- return tval;
|
|
- }
|
|
-
|
|
- if (di->opts & BQ27XXX_O_ZERO)
|
|
- return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
|
|
- else
|
|
- return tval;
|
|
-}
|
|
-
|
|
/*
|
|
* Returns true if a battery over temperature condition is detected
|
|
*/
|
|
@@ -1739,8 +1718,6 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
|
|
}
|
|
if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
|
|
cache.cycle_count = bq27xxx_battery_read_cyct(di);
|
|
- if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR)
|
|
- cache.power_avg = bq27xxx_battery_read_pwr_avg(di);
|
|
|
|
/* We only have to read charge design full once */
|
|
if (di->charge_design_full <= 0)
|
|
@@ -1803,6 +1780,32 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
|
|
return 0;
|
|
}
|
|
|
|
+/*
|
|
+ * Get the average power in µW
|
|
+ * Return < 0 if something fails.
|
|
+ */
|
|
+static int bq27xxx_battery_pwr_avg(struct bq27xxx_device_info *di,
|
|
+ union power_supply_propval *val)
|
|
+{
|
|
+ int power;
|
|
+
|
|
+ power = bq27xxx_read(di, BQ27XXX_REG_AP, false);
|
|
+ if (power < 0) {
|
|
+ dev_err(di->dev,
|
|
+ "error reading average power register %02x: %d\n",
|
|
+ BQ27XXX_REG_AP, power);
|
|
+ return power;
|
|
+ }
|
|
+
|
|
+ if (di->opts & BQ27XXX_O_ZERO)
|
|
+ val->intval = (power * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
|
|
+ else
|
|
+ /* Other gauges return a signed value in units of 10mW */
|
|
+ val->intval = (int)((s16)power) * 10000;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
|
|
union power_supply_propval *val)
|
|
{
|
|
@@ -1987,7 +1990,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
|
|
ret = bq27xxx_simple_value(di->cache.energy, val);
|
|
break;
|
|
case POWER_SUPPLY_PROP_POWER_AVG:
|
|
- ret = bq27xxx_simple_value(di->cache.power_avg, val);
|
|
+ ret = bq27xxx_battery_pwr_avg(di, val);
|
|
break;
|
|
case POWER_SUPPLY_PROP_HEALTH:
|
|
ret = bq27xxx_simple_value(di->cache.health, val);
|
|
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
|
|
index cebc5c8fda1b5..793d4ca52f8a1 100644
|
|
--- a/drivers/power/supply/cpcap-battery.c
|
|
+++ b/drivers/power/supply/cpcap-battery.c
|
|
@@ -626,7 +626,7 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
|
|
break;
|
|
}
|
|
|
|
- if (!d)
|
|
+ if (list_entry_is_head(d, &ddata->irq_list, node))
|
|
return IRQ_NONE;
|
|
|
|
latest = cpcap_battery_latest(ddata);
|
|
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
|
|
index 22fff01425d63..891e1eb8e39d5 100644
|
|
--- a/drivers/power/supply/cpcap-charger.c
|
|
+++ b/drivers/power/supply/cpcap-charger.c
|
|
@@ -633,6 +633,9 @@ static void cpcap_usb_detect(struct work_struct *work)
|
|
return;
|
|
}
|
|
|
|
+ /* Delay for 80ms to avoid vbus bouncing when usb cable is plugged in */
|
|
+ usleep_range(80000, 120000);
|
|
+
|
|
/* Throttle chrgcurr2 interrupt for charger done and retry */
|
|
switch (ddata->state) {
|
|
case CPCAP_CHARGER_CHARGING:
|
|
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
|
|
index 0032069fbc2bb..66039c665dd1e 100644
|
|
--- a/drivers/power/supply/generic-adc-battery.c
|
|
+++ b/drivers/power/supply/generic-adc-battery.c
|
|
@@ -373,7 +373,7 @@ static int gab_remove(struct platform_device *pdev)
|
|
}
|
|
|
|
kfree(adc_bat->psy_desc.properties);
|
|
- cancel_delayed_work(&adc_bat->bat_work);
|
|
+ cancel_delayed_work_sync(&adc_bat->bat_work);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
|
|
index e7931ffb7151d..397e5a03b7d9a 100644
|
|
--- a/drivers/power/supply/lp8788-charger.c
|
|
+++ b/drivers/power/supply/lp8788-charger.c
|
|
@@ -501,7 +501,7 @@ static int lp8788_set_irqs(struct platform_device *pdev,
|
|
|
|
ret = request_threaded_irq(virq, NULL,
|
|
lp8788_charger_irq_thread,
|
|
- 0, name, pchg);
|
|
+ IRQF_ONESHOT, name, pchg);
|
|
if (ret)
|
|
break;
|
|
}
|
|
diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
|
|
index ac06ecf7fc9ca..a3bfb9612b174 100644
|
|
--- a/drivers/power/supply/pm2301_charger.c
|
|
+++ b/drivers/power/supply/pm2301_charger.c
|
|
@@ -1089,7 +1089,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
|
|
ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
|
|
NULL,
|
|
pm2xxx_charger_irq[0].isr,
|
|
- pm2->pdata->irq_type,
|
|
+ pm2->pdata->irq_type | IRQF_ONESHOT,
|
|
pm2xxx_charger_irq[0].name, pm2);
|
|
|
|
if (ret != 0) {
|
|
diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
|
|
index a2addc24ee8b8..3e3a598f114d1 100644
|
|
--- a/drivers/power/supply/s3c_adc_battery.c
|
|
+++ b/drivers/power/supply/s3c_adc_battery.c
|
|
@@ -395,7 +395,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
|
|
if (main_bat.charge_finished)
|
|
free_irq(gpiod_to_irq(main_bat.charge_finished), NULL);
|
|
|
|
- cancel_delayed_work(&bat_work);
|
|
+ cancel_delayed_work_sync(&bat_work);
|
|
|
|
if (pdata->exit)
|
|
pdata->exit();
|
|
diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
|
|
index 6b0098e5a88b5..0990b2fa6cd8d 100644
|
|
--- a/drivers/power/supply/tps65090-charger.c
|
|
+++ b/drivers/power/supply/tps65090-charger.c
|
|
@@ -301,7 +301,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
|
|
|
|
if (irq != -ENXIO) {
|
|
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
|
|
- tps65090_charger_isr, 0, "tps65090-charger", cdata);
|
|
+ tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata);
|
|
if (ret) {
|
|
dev_err(cdata->dev,
|
|
"Unable to register irq %d err %d\n", irq,
|
|
diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
|
|
index 814c2b81fdfec..ba33d1617e0b6 100644
|
|
--- a/drivers/power/supply/tps65217_charger.c
|
|
+++ b/drivers/power/supply/tps65217_charger.c
|
|
@@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
|
|
for (i = 0; i < NUM_CHARGER_IRQS; i++) {
|
|
ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
|
|
tps65217_charger_irq,
|
|
- 0, "tps65217-charger",
|
|
+ IRQF_ONESHOT, "tps65217-charger",
|
|
charger);
|
|
if (ret) {
|
|
dev_err(charger->dev,
|
|
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
|
|
index a2ede7d7897eb..08cbf688e14d3 100644
|
|
--- a/drivers/regulator/da9121-regulator.c
|
|
+++ b/drivers/regulator/da9121-regulator.c
|
|
@@ -40,6 +40,7 @@ struct da9121 {
|
|
unsigned int passive_delay;
|
|
int chip_irq;
|
|
int variant_id;
|
|
+ int subvariant_id;
|
|
};
|
|
|
|
/* Define ranges for different variants, enabling translation to/from
|
|
@@ -812,7 +813,6 @@ static struct regmap_config da9121_2ch_regmap_config = {
|
|
static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
|
|
{
|
|
u32 device_id;
|
|
- u8 chip_id = chip->variant_id;
|
|
u32 variant_id;
|
|
u8 variant_mrc, variant_vrc;
|
|
char *type;
|
|
@@ -839,22 +839,34 @@ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
|
|
|
|
variant_vrc = variant_id & DA9121_MASK_OTP_VARIANT_ID_VRC;
|
|
|
|
- switch (variant_vrc) {
|
|
- case DA9121_VARIANT_VRC:
|
|
- type = "DA9121/DA9130";
|
|
- config_match = (chip_id == DA9121_TYPE_DA9121_DA9130);
|
|
+ switch (chip->subvariant_id) {
|
|
+ case DA9121_SUBTYPE_DA9121:
|
|
+ type = "DA9121";
|
|
+ config_match = (variant_vrc == DA9121_VARIANT_VRC);
|
|
break;
|
|
- case DA9220_VARIANT_VRC:
|
|
- type = "DA9220/DA9132";
|
|
- config_match = (chip_id == DA9121_TYPE_DA9220_DA9132);
|
|
+ case DA9121_SUBTYPE_DA9130:
|
|
+ type = "DA9130";
|
|
+ config_match = (variant_vrc == DA9130_VARIANT_VRC);
|
|
break;
|
|
- case DA9122_VARIANT_VRC:
|
|
- type = "DA9122/DA9131";
|
|
- config_match = (chip_id == DA9121_TYPE_DA9122_DA9131);
|
|
+ case DA9121_SUBTYPE_DA9220:
|
|
+ type = "DA9220";
|
|
+ config_match = (variant_vrc == DA9220_VARIANT_VRC);
|
|
break;
|
|
- case DA9217_VARIANT_VRC:
|
|
+ case DA9121_SUBTYPE_DA9132:
|
|
+ type = "DA9132";
|
|
+ config_match = (variant_vrc == DA9132_VARIANT_VRC);
|
|
+ break;
|
|
+ case DA9121_SUBTYPE_DA9122:
|
|
+ type = "DA9122";
|
|
+ config_match = (variant_vrc == DA9122_VARIANT_VRC);
|
|
+ break;
|
|
+ case DA9121_SUBTYPE_DA9131:
|
|
+ type = "DA9131";
|
|
+ config_match = (variant_vrc == DA9131_VARIANT_VRC);
|
|
+ break;
|
|
+ case DA9121_SUBTYPE_DA9217:
|
|
type = "DA9217";
|
|
- config_match = (chip_id == DA9121_TYPE_DA9217);
|
|
+ config_match = (variant_vrc == DA9217_VARIANT_VRC);
|
|
break;
|
|
default:
|
|
type = "Unknown";
|
|
@@ -892,15 +904,27 @@ static int da9121_assign_chip_model(struct i2c_client *i2c,
|
|
|
|
chip->dev = &i2c->dev;
|
|
|
|
- switch (chip->variant_id) {
|
|
- case DA9121_TYPE_DA9121_DA9130:
|
|
- fallthrough;
|
|
- case DA9121_TYPE_DA9217:
|
|
+ /* Use configured subtype to select the regulator descriptor index and
|
|
+ * register map, common to both consumer and automotive grade variants
|
|
+ */
|
|
+ switch (chip->subvariant_id) {
|
|
+ case DA9121_SUBTYPE_DA9121:
|
|
+ case DA9121_SUBTYPE_DA9130:
|
|
+ chip->variant_id = DA9121_TYPE_DA9121_DA9130;
|
|
regmap = &da9121_1ch_regmap_config;
|
|
break;
|
|
- case DA9121_TYPE_DA9122_DA9131:
|
|
- fallthrough;
|
|
- case DA9121_TYPE_DA9220_DA9132:
|
|
+ case DA9121_SUBTYPE_DA9217:
|
|
+ chip->variant_id = DA9121_TYPE_DA9217;
|
|
+ regmap = &da9121_1ch_regmap_config;
|
|
+ break;
|
|
+ case DA9121_SUBTYPE_DA9122:
|
|
+ case DA9121_SUBTYPE_DA9131:
|
|
+ chip->variant_id = DA9121_TYPE_DA9122_DA9131;
|
|
+ regmap = &da9121_2ch_regmap_config;
|
|
+ break;
|
|
+ case DA9121_SUBTYPE_DA9220:
|
|
+ case DA9121_SUBTYPE_DA9132:
|
|
+ chip->variant_id = DA9121_TYPE_DA9220_DA9132;
|
|
regmap = &da9121_2ch_regmap_config;
|
|
break;
|
|
}
|
|
@@ -975,13 +999,13 @@ regmap_error:
|
|
}
|
|
|
|
static const struct of_device_id da9121_dt_ids[] = {
|
|
- { .compatible = "dlg,da9121", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
|
|
- { .compatible = "dlg,da9130", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
|
|
- { .compatible = "dlg,da9217", .data = (void *) DA9121_TYPE_DA9217 },
|
|
- { .compatible = "dlg,da9122", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
|
|
- { .compatible = "dlg,da9131", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
|
|
- { .compatible = "dlg,da9220", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
|
|
- { .compatible = "dlg,da9132", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
|
|
+ { .compatible = "dlg,da9121", .data = (void *) DA9121_SUBTYPE_DA9121 },
|
|
+ { .compatible = "dlg,da9130", .data = (void *) DA9121_SUBTYPE_DA9130 },
|
|
+ { .compatible = "dlg,da9217", .data = (void *) DA9121_SUBTYPE_DA9217 },
|
|
+ { .compatible = "dlg,da9122", .data = (void *) DA9121_SUBTYPE_DA9122 },
|
|
+ { .compatible = "dlg,da9131", .data = (void *) DA9121_SUBTYPE_DA9131 },
|
|
+ { .compatible = "dlg,da9220", .data = (void *) DA9121_SUBTYPE_DA9220 },
|
|
+ { .compatible = "dlg,da9132", .data = (void *) DA9121_SUBTYPE_DA9132 },
|
|
{ }
|
|
};
|
|
MODULE_DEVICE_TABLE(of, da9121_dt_ids);
|
|
@@ -1011,7 +1035,7 @@ static int da9121_i2c_probe(struct i2c_client *i2c,
|
|
}
|
|
|
|
chip->pdata = i2c->dev.platform_data;
|
|
- chip->variant_id = da9121_of_get_id(&i2c->dev);
|
|
+ chip->subvariant_id = da9121_of_get_id(&i2c->dev);
|
|
|
|
ret = da9121_assign_chip_model(i2c, chip);
|
|
if (ret < 0)
|
|
diff --git a/drivers/regulator/da9121-regulator.h b/drivers/regulator/da9121-regulator.h
|
|
index 3c34cb889ca87..357f416e17c1d 100644
|
|
--- a/drivers/regulator/da9121-regulator.h
|
|
+++ b/drivers/regulator/da9121-regulator.h
|
|
@@ -29,6 +29,16 @@ enum da9121_variant {
|
|
DA9121_TYPE_DA9217
|
|
};
|
|
|
|
+enum da9121_subvariant {
|
|
+ DA9121_SUBTYPE_DA9121,
|
|
+ DA9121_SUBTYPE_DA9130,
|
|
+ DA9121_SUBTYPE_DA9220,
|
|
+ DA9121_SUBTYPE_DA9132,
|
|
+ DA9121_SUBTYPE_DA9122,
|
|
+ DA9121_SUBTYPE_DA9131,
|
|
+ DA9121_SUBTYPE_DA9217
|
|
+};
|
|
+
|
|
/* Minimum, maximum and default polling millisecond periods are provided
|
|
* here as an example. It is expected that any final implementation will
|
|
* include a modification of these settings to match the required
|
|
@@ -279,6 +289,9 @@ enum da9121_variant {
|
|
#define DA9220_VARIANT_VRC 0x0
|
|
#define DA9122_VARIANT_VRC 0x2
|
|
#define DA9217_VARIANT_VRC 0x7
|
|
+#define DA9130_VARIANT_VRC 0x0
|
|
+#define DA9131_VARIANT_VRC 0x1
|
|
+#define DA9132_VARIANT_VRC 0x2
|
|
|
|
/* DA9121_REG_OTP_CUSTOMER_ID */
|
|
|
|
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
|
|
index 4b0a7cbb20962..f6def83c2d264 100644
|
|
--- a/drivers/s390/cio/device.c
|
|
+++ b/drivers/s390/cio/device.c
|
|
@@ -1525,8 +1525,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
|
|
switch (action) {
|
|
case IO_SCH_ORPH_UNREG:
|
|
case IO_SCH_UNREG:
|
|
- if (!cdev)
|
|
- css_sch_device_unregister(sch);
|
|
+ css_sch_device_unregister(sch);
|
|
break;
|
|
case IO_SCH_ORPH_ATTACH:
|
|
case IO_SCH_UNREG_ATTACH:
|
|
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
|
|
index 1ffdd411201cd..6946a7e26eff7 100644
|
|
--- a/drivers/s390/crypto/vfio_ap_ops.c
|
|
+++ b/drivers/s390/crypto/vfio_ap_ops.c
|
|
@@ -294,6 +294,19 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
|
|
matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
|
|
struct ap_matrix_mdev, pqap_hook);
|
|
|
|
+ /*
|
|
+ * If the KVM pointer is in the process of being set, wait until the
|
|
+ * process has completed.
|
|
+ */
|
|
+ wait_event_cmd(matrix_mdev->wait_for_kvm,
|
|
+ !matrix_mdev->kvm_busy,
|
|
+ mutex_unlock(&matrix_dev->lock),
|
|
+ mutex_lock(&matrix_dev->lock));
|
|
+
|
|
+ /* If the there is no guest using the mdev, there is nothing to do */
|
|
+ if (!matrix_mdev->kvm)
|
|
+ goto out_unlock;
|
|
+
|
|
q = vfio_ap_get_queue(matrix_mdev, apqn);
|
|
if (!q)
|
|
goto out_unlock;
|
|
@@ -337,6 +350,7 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
|
|
|
|
matrix_mdev->mdev = mdev;
|
|
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
|
|
+ init_waitqueue_head(&matrix_mdev->wait_for_kvm);
|
|
mdev_set_drvdata(mdev, matrix_mdev);
|
|
matrix_mdev->pqap_hook.hook = handle_pqap;
|
|
matrix_mdev->pqap_hook.owner = THIS_MODULE;
|
|
@@ -351,17 +365,23 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
|
|
{
|
|
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
|
|
|
- if (matrix_mdev->kvm)
|
|
+ mutex_lock(&matrix_dev->lock);
|
|
+
|
|
+ /*
|
|
+ * If the KVM pointer is in flux or the guest is running, disallow
|
|
+ * un-assignment of control domain.
|
|
+ */
|
|
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
|
+ mutex_unlock(&matrix_dev->lock);
|
|
return -EBUSY;
|
|
+ }
|
|
|
|
- mutex_lock(&matrix_dev->lock);
|
|
vfio_ap_mdev_reset_queues(mdev);
|
|
list_del(&matrix_mdev->node);
|
|
- mutex_unlock(&matrix_dev->lock);
|
|
-
|
|
kfree(matrix_mdev);
|
|
mdev_set_drvdata(mdev, NULL);
|
|
atomic_inc(&matrix_dev->available_instances);
|
|
+ mutex_unlock(&matrix_dev->lock);
|
|
|
|
return 0;
|
|
}
|
|
@@ -606,24 +626,31 @@ static ssize_t assign_adapter_store(struct device *dev,
|
|
struct mdev_device *mdev = mdev_from_dev(dev);
|
|
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
|
|
|
- /* If the guest is running, disallow assignment of adapter */
|
|
- if (matrix_mdev->kvm)
|
|
- return -EBUSY;
|
|
+ mutex_lock(&matrix_dev->lock);
|
|
+
|
|
+ /*
|
|
+ * If the KVM pointer is in flux or the guest is running, disallow
|
|
+ * un-assignment of adapter
|
|
+ */
|
|
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
|
+ ret = -EBUSY;
|
|
+ goto done;
|
|
+ }
|
|
|
|
ret = kstrtoul(buf, 0, &apid);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto done;
|
|
|
|
- if (apid > matrix_mdev->matrix.apm_max)
|
|
- return -ENODEV;
|
|
+ if (apid > matrix_mdev->matrix.apm_max) {
|
|
+ ret = -ENODEV;
|
|
+ goto done;
|
|
+ }
|
|
|
|
/*
|
|
* Set the bit in the AP mask (APM) corresponding to the AP adapter
|
|
* number (APID). The bits in the mask, from most significant to least
|
|
* significant bit, correspond to APIDs 0-255.
|
|
*/
|
|
- mutex_lock(&matrix_dev->lock);
|
|
-
|
|
ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
|
|
if (ret)
|
|
goto done;
|
|
@@ -672,22 +699,31 @@ static ssize_t unassign_adapter_store(struct device *dev,
|
|
struct mdev_device *mdev = mdev_from_dev(dev);
|
|
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
|
|
|
- /* If the guest is running, disallow un-assignment of adapter */
|
|
- if (matrix_mdev->kvm)
|
|
- return -EBUSY;
|
|
+ mutex_lock(&matrix_dev->lock);
|
|
+
|
|
+ /*
|
|
+ * If the KVM pointer is in flux or the guest is running, disallow
|
|
+ * un-assignment of adapter
|
|
+ */
|
|
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
|
+ ret = -EBUSY;
|
|
+ goto done;
|
|
+ }
|
|
|
|
ret = kstrtoul(buf, 0, &apid);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto done;
|
|
|
|
- if (apid > matrix_mdev->matrix.apm_max)
|
|
- return -ENODEV;
|
|
+ if (apid > matrix_mdev->matrix.apm_max) {
|
|
+ ret = -ENODEV;
|
|
+ goto done;
|
|
+ }
|
|
|
|
- mutex_lock(&matrix_dev->lock);
|
|
clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
|
|
+ ret = count;
|
|
+done:
|
|
mutex_unlock(&matrix_dev->lock);
|
|
-
|
|
- return count;
|
|
+ return ret;
|
|
}
|
|
static DEVICE_ATTR_WO(unassign_adapter);
|
|
|
|
@@ -753,17 +789,24 @@ static ssize_t assign_domain_store(struct device *dev,
|
|
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
|
unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
|
|
|
|
- /* If the guest is running, disallow assignment of domain */
|
|
- if (matrix_mdev->kvm)
|
|
- return -EBUSY;
|
|
+ mutex_lock(&matrix_dev->lock);
|
|
+
|
|
+ /*
|
|
+ * If the KVM pointer is in flux or the guest is running, disallow
|
|
+ * assignment of domain
|
|
+ */
|
|
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
|
+ ret = -EBUSY;
|
|
+ goto done;
|
|
+ }
|
|
|
|
ret = kstrtoul(buf, 0, &apqi);
|
|
if (ret)
|
|
- return ret;
|
|
- if (apqi > max_apqi)
|
|
- return -ENODEV;
|
|
-
|
|
- mutex_lock(&matrix_dev->lock);
|
|
+ goto done;
|
|
+ if (apqi > max_apqi) {
|
|
+ ret = -ENODEV;
|
|
+ goto done;
|
|
+ }
|
|
|
|
ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
|
|
if (ret)
|
|
@@ -814,22 +857,32 @@ static ssize_t unassign_domain_store(struct device *dev,
|
|
struct mdev_device *mdev = mdev_from_dev(dev);
|
|
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
|
|
|
- /* If the guest is running, disallow un-assignment of domain */
|
|
- if (matrix_mdev->kvm)
|
|
- return -EBUSY;
|
|
+ mutex_lock(&matrix_dev->lock);
|
|
+
|
|
+ /*
|
|
+ * If the KVM pointer is in flux or the guest is running, disallow
|
|
+ * un-assignment of domain
|
|
+ */
|
|
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
|
+ ret = -EBUSY;
|
|
+ goto done;
|
|
+ }
|
|
|
|
ret = kstrtoul(buf, 0, &apqi);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto done;
|
|
|
|
- if (apqi > matrix_mdev->matrix.aqm_max)
|
|
- return -ENODEV;
|
|
+ if (apqi > matrix_mdev->matrix.aqm_max) {
|
|
+ ret = -ENODEV;
|
|
+ goto done;
|
|
+ }
|
|
|
|
- mutex_lock(&matrix_dev->lock);
|
|
clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
|
|
- mutex_unlock(&matrix_dev->lock);
|
|
+ ret = count;
|
|
|
|
- return count;
|
|
+done:
|
|
+ mutex_unlock(&matrix_dev->lock);
|
|
+ return ret;
|
|
}
|
|
static DEVICE_ATTR_WO(unassign_domain);
|
|
|
|
@@ -858,27 +911,36 @@ static ssize_t assign_control_domain_store(struct device *dev,
|
|
struct mdev_device *mdev = mdev_from_dev(dev);
|
|
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
|
|
|
- /* If the guest is running, disallow assignment of control domain */
|
|
- if (matrix_mdev->kvm)
|
|
- return -EBUSY;
|
|
+ mutex_lock(&matrix_dev->lock);
|
|
+
|
|
+ /*
|
|
+ * If the KVM pointer is in flux or the guest is running, disallow
|
|
+ * assignment of control domain.
|
|
+ */
|
|
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
|
+ ret = -EBUSY;
|
|
+ goto done;
|
|
+ }
|
|
|
|
ret = kstrtoul(buf, 0, &id);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto done;
|
|
|
|
- if (id > matrix_mdev->matrix.adm_max)
|
|
- return -ENODEV;
|
|
+ if (id > matrix_mdev->matrix.adm_max) {
|
|
+ ret = -ENODEV;
|
|
+ goto done;
|
|
+ }
|
|
|
|
/* Set the bit in the ADM (bitmask) corresponding to the AP control
|
|
* domain number (id). The bits in the mask, from most significant to
|
|
* least significant, correspond to IDs 0 up to the one less than the
|
|
* number of control domains that can be assigned.
|
|
*/
|
|
- mutex_lock(&matrix_dev->lock);
|
|
set_bit_inv(id, matrix_mdev->matrix.adm);
|
|
+ ret = count;
|
|
+done:
|
|
mutex_unlock(&matrix_dev->lock);
|
|
-
|
|
- return count;
|
|
+ return ret;
|
|
}
|
|
static DEVICE_ATTR_WO(assign_control_domain);
|
|
|
|
@@ -908,21 +970,30 @@ static ssize_t unassign_control_domain_store(struct device *dev,
|
|
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
|
unsigned long max_domid = matrix_mdev->matrix.adm_max;
|
|
|
|
- /* If the guest is running, disallow un-assignment of control domain */
|
|
- if (matrix_mdev->kvm)
|
|
- return -EBUSY;
|
|
+ mutex_lock(&matrix_dev->lock);
|
|
+
|
|
+ /*
|
|
+ * If the KVM pointer is in flux or the guest is running, disallow
|
|
+ * un-assignment of control domain.
|
|
+ */
|
|
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
|
+ ret = -EBUSY;
|
|
+ goto done;
|
|
+ }
|
|
|
|
ret = kstrtoul(buf, 0, &domid);
|
|
if (ret)
|
|
- return ret;
|
|
- if (domid > max_domid)
|
|
- return -ENODEV;
|
|
+ goto done;
|
|
+ if (domid > max_domid) {
|
|
+ ret = -ENODEV;
|
|
+ goto done;
|
|
+ }
|
|
|
|
- mutex_lock(&matrix_dev->lock);
|
|
clear_bit_inv(domid, matrix_mdev->matrix.adm);
|
|
+ ret = count;
|
|
+done:
|
|
mutex_unlock(&matrix_dev->lock);
|
|
-
|
|
- return count;
|
|
+ return ret;
|
|
}
|
|
static DEVICE_ATTR_WO(unassign_control_domain);
|
|
|
|
@@ -1027,8 +1098,15 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
|
|
* @matrix_mdev: a mediated matrix device
|
|
* @kvm: reference to KVM instance
|
|
*
|
|
- * Verifies no other mediated matrix device has @kvm and sets a reference to
|
|
- * it in @matrix_mdev->kvm.
|
|
+ * Sets all data for @matrix_mdev that are needed to manage AP resources
|
|
+ * for the guest whose state is represented by @kvm.
|
|
+ *
|
|
+ * Note: The matrix_dev->lock must be taken prior to calling
|
|
+ * this function; however, the lock will be temporarily released while the
|
|
+ * guest's AP configuration is set to avoid a potential lockdep splat.
|
|
+ * The kvm->lock is taken to set the guest's AP configuration which, under
|
|
+ * certain circumstances, will result in a circular lock dependency if this is
|
|
+ * done under the @matrix_mdev->lock.
|
|
*
|
|
* Return 0 if no other mediated matrix device has a reference to @kvm;
|
|
* otherwise, returns an -EPERM.
|
|
@@ -1038,14 +1116,25 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
|
|
{
|
|
struct ap_matrix_mdev *m;
|
|
|
|
- list_for_each_entry(m, &matrix_dev->mdev_list, node) {
|
|
- if ((m != matrix_mdev) && (m->kvm == kvm))
|
|
- return -EPERM;
|
|
- }
|
|
+ if (kvm->arch.crypto.crycbd) {
|
|
+ list_for_each_entry(m, &matrix_dev->mdev_list, node) {
|
|
+ if (m != matrix_mdev && m->kvm == kvm)
|
|
+ return -EPERM;
|
|
+ }
|
|
|
|
- matrix_mdev->kvm = kvm;
|
|
- kvm_get_kvm(kvm);
|
|
- kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
|
|
+ kvm_get_kvm(kvm);
|
|
+ matrix_mdev->kvm_busy = true;
|
|
+ mutex_unlock(&matrix_dev->lock);
|
|
+ kvm_arch_crypto_set_masks(kvm,
|
|
+ matrix_mdev->matrix.apm,
|
|
+ matrix_mdev->matrix.aqm,
|
|
+ matrix_mdev->matrix.adm);
|
|
+ mutex_lock(&matrix_dev->lock);
|
|
+ kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
|
|
+ matrix_mdev->kvm = kvm;
|
|
+ matrix_mdev->kvm_busy = false;
|
|
+ wake_up_all(&matrix_mdev->wait_for_kvm);
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
@@ -1079,51 +1168,65 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
+/**
|
|
+ * vfio_ap_mdev_unset_kvm
|
|
+ *
|
|
+ * @matrix_mdev: a matrix mediated device
|
|
+ *
|
|
+ * Performs clean-up of resources no longer needed by @matrix_mdev.
|
|
+ *
|
|
+ * Note: The matrix_dev->lock must be taken prior to calling
|
|
+ * this function; however, the lock will be temporarily released while the
|
|
+ * guest's AP configuration is cleared to avoid a potential lockdep splat.
|
|
+ * The kvm->lock is taken to clear the guest's AP configuration which, under
|
|
+ * certain circumstances, will result in a circular lock dependency if this is
|
|
+ * done under the @matrix_mdev->lock.
|
|
+ *
|
|
+ */
|
|
static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
|
|
{
|
|
- kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
|
|
- matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
|
|
- vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
|
|
- kvm_put_kvm(matrix_mdev->kvm);
|
|
- matrix_mdev->kvm = NULL;
|
|
+ /*
|
|
+ * If the KVM pointer is in the process of being set, wait until the
|
|
+ * process has completed.
|
|
+ */
|
|
+ wait_event_cmd(matrix_mdev->wait_for_kvm,
|
|
+ !matrix_mdev->kvm_busy,
|
|
+ mutex_unlock(&matrix_dev->lock),
|
|
+ mutex_lock(&matrix_dev->lock));
|
|
+
|
|
+ if (matrix_mdev->kvm) {
|
|
+ matrix_mdev->kvm_busy = true;
|
|
+ mutex_unlock(&matrix_dev->lock);
|
|
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
|
|
+ mutex_lock(&matrix_dev->lock);
|
|
+ vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
|
|
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
|
|
+ kvm_put_kvm(matrix_mdev->kvm);
|
|
+ matrix_mdev->kvm = NULL;
|
|
+ matrix_mdev->kvm_busy = false;
|
|
+ wake_up_all(&matrix_mdev->wait_for_kvm);
|
|
+ }
|
|
}
|
|
|
|
static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
|
|
unsigned long action, void *data)
|
|
{
|
|
- int ret, notify_rc = NOTIFY_OK;
|
|
+ int notify_rc = NOTIFY_OK;
|
|
struct ap_matrix_mdev *matrix_mdev;
|
|
|
|
if (action != VFIO_GROUP_NOTIFY_SET_KVM)
|
|
return NOTIFY_OK;
|
|
|
|
- matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
|
|
mutex_lock(&matrix_dev->lock);
|
|
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
|
|
|
|
- if (!data) {
|
|
- if (matrix_mdev->kvm)
|
|
- vfio_ap_mdev_unset_kvm(matrix_mdev);
|
|
- goto notify_done;
|
|
- }
|
|
-
|
|
- ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
|
|
- if (ret) {
|
|
- notify_rc = NOTIFY_DONE;
|
|
- goto notify_done;
|
|
- }
|
|
-
|
|
- /* If there is no CRYCB pointer, then we can't copy the masks */
|
|
- if (!matrix_mdev->kvm->arch.crypto.crycbd) {
|
|
+ if (!data)
|
|
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
|
|
+ else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
|
|
notify_rc = NOTIFY_DONE;
|
|
- goto notify_done;
|
|
- }
|
|
-
|
|
- kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
|
|
- matrix_mdev->matrix.aqm,
|
|
- matrix_mdev->matrix.adm);
|
|
|
|
-notify_done:
|
|
mutex_unlock(&matrix_dev->lock);
|
|
+
|
|
return notify_rc;
|
|
}
|
|
|
|
@@ -1258,8 +1361,7 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
|
|
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
|
|
|
mutex_lock(&matrix_dev->lock);
|
|
- if (matrix_mdev->kvm)
|
|
- vfio_ap_mdev_unset_kvm(matrix_mdev);
|
|
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
|
|
mutex_unlock(&matrix_dev->lock);
|
|
|
|
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
|
|
@@ -1293,6 +1395,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
|
|
unsigned int cmd, unsigned long arg)
|
|
{
|
|
int ret;
|
|
+ struct ap_matrix_mdev *matrix_mdev;
|
|
|
|
mutex_lock(&matrix_dev->lock);
|
|
switch (cmd) {
|
|
@@ -1300,6 +1403,21 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
|
|
ret = vfio_ap_mdev_get_device_info(arg);
|
|
break;
|
|
case VFIO_DEVICE_RESET:
|
|
+ matrix_mdev = mdev_get_drvdata(mdev);
|
|
+ if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If the KVM pointer is in the process of being set, wait until
|
|
+ * the process has completed.
|
|
+ */
|
|
+ wait_event_cmd(matrix_mdev->wait_for_kvm,
|
|
+ !matrix_mdev->kvm_busy,
|
|
+ mutex_unlock(&matrix_dev->lock),
|
|
+ mutex_lock(&matrix_dev->lock));
|
|
+
|
|
ret = vfio_ap_mdev_reset_queues(mdev);
|
|
break;
|
|
default:
|
|
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
|
|
index 28e9d99897682..f82a6396acae7 100644
|
|
--- a/drivers/s390/crypto/vfio_ap_private.h
|
|
+++ b/drivers/s390/crypto/vfio_ap_private.h
|
|
@@ -83,6 +83,8 @@ struct ap_matrix_mdev {
|
|
struct ap_matrix matrix;
|
|
struct notifier_block group_notifier;
|
|
struct notifier_block iommu_notifier;
|
|
+ bool kvm_busy;
|
|
+ wait_queue_head_t wait_for_kvm;
|
|
struct kvm *kvm;
|
|
struct kvm_s390_module_hook pqap_hook;
|
|
struct mdev_device *mdev;
|
|
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
|
|
index 33b23884b133f..09fe6bb8880bc 100644
|
|
--- a/drivers/s390/crypto/zcrypt_card.c
|
|
+++ b/drivers/s390/crypto/zcrypt_card.c
|
|
@@ -192,5 +192,6 @@ void zcrypt_card_unregister(struct zcrypt_card *zc)
|
|
spin_unlock(&zcrypt_list_lock);
|
|
sysfs_remove_group(&zc->card->ap_dev.device.kobj,
|
|
&zcrypt_card_attr_group);
|
|
+ zcrypt_card_put(zc);
|
|
}
|
|
EXPORT_SYMBOL(zcrypt_card_unregister);
|
|
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
|
|
index 5062eae73d4aa..c3ffbd26b73ff 100644
|
|
--- a/drivers/s390/crypto/zcrypt_queue.c
|
|
+++ b/drivers/s390/crypto/zcrypt_queue.c
|
|
@@ -223,5 +223,6 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
|
|
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
|
|
&zcrypt_queue_attr_group);
|
|
zcrypt_card_put(zc);
|
|
+ zcrypt_queue_put(zq);
|
|
}
|
|
EXPORT_SYMBOL(zcrypt_queue_unregister);
|
|
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
|
|
index ea436a14087f1..5eff3368143d3 100644
|
|
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
|
|
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
|
|
@@ -573,10 +573,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
|
* even though it shouldn't according to T10.
|
|
* The retry without rtpg_ext_hdr_req set
|
|
* handles this.
|
|
+ * Note: some arrays return a sense key of ILLEGAL_REQUEST
|
|
+ * with ASC 00h if they don't support the extended header.
|
|
*/
|
|
if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
|
|
- sense_hdr.sense_key == ILLEGAL_REQUEST &&
|
|
- sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
|
|
+ sense_hdr.sense_key == ILLEGAL_REQUEST) {
|
|
pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
|
|
goto retry;
|
|
}
|
|
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
|
|
index 22826544da7e7..9989669beec3c 100644
|
|
--- a/drivers/scsi/libfc/fc_lport.c
|
|
+++ b/drivers/scsi/libfc/fc_lport.c
|
|
@@ -1731,7 +1731,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
|
|
|
|
if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
|
|
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
|
|
- "lport->mfs:%hu\n", mfs, lport->mfs);
|
|
+ "lport->mfs:%u\n", mfs, lport->mfs);
|
|
fc_lport_error(lport, fp);
|
|
goto out;
|
|
}
|
|
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
|
|
index 4528166dee36e..243513925e90a 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_attr.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_attr.c
|
|
@@ -1687,8 +1687,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
|
|
"0071 Set trunk mode failed with status: %d",
|
|
rc);
|
|
- if (rc != MBX_TIMEOUT)
|
|
- mempool_free(mbox, phba->mbox_mem_pool);
|
|
+ mempool_free(mbox, phba->mbox_mem_pool);
|
|
|
|
return 0;
|
|
}
|
|
@@ -6794,15 +6793,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
|
|
pmboxq->ctx_buf = NULL;
|
|
pmboxq->vport = vport;
|
|
|
|
- if (vport->fc_flag & FC_OFFLINE_MODE)
|
|
+ if (vport->fc_flag & FC_OFFLINE_MODE) {
|
|
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
|
|
- else
|
|
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
|
|
-
|
|
- if (rc != MBX_SUCCESS) {
|
|
- if (rc != MBX_TIMEOUT)
|
|
+ if (rc != MBX_SUCCESS) {
|
|
mempool_free(pmboxq, phba->mbox_mem_pool);
|
|
- return NULL;
|
|
+ return NULL;
|
|
+ }
|
|
+ } else {
|
|
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
|
|
+ if (rc != MBX_SUCCESS) {
|
|
+ if (rc != MBX_TIMEOUT)
|
|
+ mempool_free(pmboxq, phba->mbox_mem_pool);
|
|
+ return NULL;
|
|
+ }
|
|
}
|
|
|
|
memset(hs, 0, sizeof (struct fc_host_statistics));
|
|
@@ -6826,15 +6829,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
|
|
pmboxq->ctx_buf = NULL;
|
|
pmboxq->vport = vport;
|
|
|
|
- if (vport->fc_flag & FC_OFFLINE_MODE)
|
|
+ if (vport->fc_flag & FC_OFFLINE_MODE) {
|
|
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
|
|
- else
|
|
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
|
|
-
|
|
- if (rc != MBX_SUCCESS) {
|
|
- if (rc != MBX_TIMEOUT)
|
|
+ if (rc != MBX_SUCCESS) {
|
|
mempool_free(pmboxq, phba->mbox_mem_pool);
|
|
- return NULL;
|
|
+ return NULL;
|
|
+ }
|
|
+ } else {
|
|
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
|
|
+ if (rc != MBX_SUCCESS) {
|
|
+ if (rc != MBX_TIMEOUT)
|
|
+ mempool_free(pmboxq, phba->mbox_mem_pool);
|
|
+ return NULL;
|
|
+ }
|
|
}
|
|
|
|
hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
|
|
@@ -6907,15 +6914,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
|
|
pmboxq->vport = vport;
|
|
|
|
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
|
|
- (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
|
|
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
|
|
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
|
|
- else
|
|
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
|
|
-
|
|
- if (rc != MBX_SUCCESS) {
|
|
- if (rc != MBX_TIMEOUT)
|
|
+ if (rc != MBX_SUCCESS) {
|
|
mempool_free(pmboxq, phba->mbox_mem_pool);
|
|
- return;
|
|
+ return;
|
|
+ }
|
|
+ } else {
|
|
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
|
|
+ if (rc != MBX_SUCCESS) {
|
|
+ if (rc != MBX_TIMEOUT)
|
|
+ mempool_free(pmboxq, phba->mbox_mem_pool);
|
|
+ return;
|
|
+ }
|
|
}
|
|
|
|
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
|
|
@@ -6925,15 +6936,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
|
|
pmboxq->vport = vport;
|
|
|
|
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
|
|
- (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
|
|
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
|
|
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
|
|
- else
|
|
+ if (rc != MBX_SUCCESS) {
|
|
+ mempool_free(pmboxq, phba->mbox_mem_pool);
|
|
+ return;
|
|
+ }
|
|
+ } else {
|
|
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
|
|
-
|
|
- if (rc != MBX_SUCCESS) {
|
|
- if (rc != MBX_TIMEOUT)
|
|
- mempool_free( pmboxq, phba->mbox_mem_pool);
|
|
- return;
|
|
+ if (rc != MBX_SUCCESS) {
|
|
+ if (rc != MBX_TIMEOUT)
|
|
+ mempool_free(pmboxq, phba->mbox_mem_pool);
|
|
+ return;
|
|
+ }
|
|
}
|
|
|
|
lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
|
|
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
|
|
index f78e52a18b0bf..823f9a074ba2a 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_crtn.h
|
|
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
|
|
@@ -55,9 +55,6 @@ void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
|
|
void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
|
|
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
|
|
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
|
|
-void lpfc_supported_pages(struct lpfcMboxq *);
|
|
-void lpfc_pc_sli4_params(struct lpfcMboxq *);
|
|
-int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
|
int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
|
|
uint16_t, uint16_t, bool);
|
|
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
|
@@ -352,8 +349,8 @@ int lpfc_sli_hbq_size(void);
|
|
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
|
|
struct lpfc_iocbq *, void *);
|
|
int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
|
|
-int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
|
|
- uint64_t, lpfc_ctx_cmd);
|
|
+int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
|
|
+ lpfc_ctx_cmd abort_cmd);
|
|
int
|
|
lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
|
|
uint16_t, uint64_t, lpfc_ctx_cmd);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
|
|
index 96c087b8b4744..2dce17827504f 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_els.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_els.c
|
|
@@ -1597,7 +1597,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
|
|
struct lpfc_nodelist *new_ndlp;
|
|
struct serv_parm *sp;
|
|
uint8_t name[sizeof(struct lpfc_name)];
|
|
- uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
|
|
+ uint32_t keepDID = 0, keep_nlp_flag = 0;
|
|
uint32_t keep_new_nlp_flag = 0;
|
|
uint16_t keep_nlp_state;
|
|
u32 keep_nlp_fc4_type = 0;
|
|
@@ -1619,7 +1619,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
|
|
new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
|
|
|
|
/* return immediately if the WWPN matches ndlp */
|
|
- if (new_ndlp == ndlp)
|
|
+ if (!new_ndlp || (new_ndlp == ndlp))
|
|
return ndlp;
|
|
|
|
if (phba->sli_rev == LPFC_SLI_REV4) {
|
|
@@ -1638,30 +1638,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
|
|
(new_ndlp ? new_ndlp->nlp_flag : 0),
|
|
(new_ndlp ? new_ndlp->nlp_fc4_type : 0));
|
|
|
|
- if (!new_ndlp) {
|
|
- rc = memcmp(&ndlp->nlp_portname, name,
|
|
- sizeof(struct lpfc_name));
|
|
- if (!rc) {
|
|
- if (active_rrqs_xri_bitmap)
|
|
- mempool_free(active_rrqs_xri_bitmap,
|
|
- phba->active_rrq_pool);
|
|
- return ndlp;
|
|
- }
|
|
- new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
|
|
- if (!new_ndlp) {
|
|
- if (active_rrqs_xri_bitmap)
|
|
- mempool_free(active_rrqs_xri_bitmap,
|
|
- phba->active_rrq_pool);
|
|
- return ndlp;
|
|
- }
|
|
- } else {
|
|
- keepDID = new_ndlp->nlp_DID;
|
|
- if (phba->sli_rev == LPFC_SLI_REV4 &&
|
|
- active_rrqs_xri_bitmap)
|
|
- memcpy(active_rrqs_xri_bitmap,
|
|
- new_ndlp->active_rrqs_xri_bitmap,
|
|
- phba->cfg_rrq_xri_bitmap_sz);
|
|
- }
|
|
+ keepDID = new_ndlp->nlp_DID;
|
|
+
|
|
+ if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
|
|
+ memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
|
|
+ phba->cfg_rrq_xri_bitmap_sz);
|
|
|
|
/* At this point in this routine, we know new_ndlp will be
|
|
* returned. however, any previous GID_FTs that were done
|
|
@@ -3840,7 +3821,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
did = irsp->un.elsreq64.remoteID;
|
|
ndlp = lpfc_findnode_did(vport, did);
|
|
if (!ndlp && (cmd != ELS_CMD_PLOGI))
|
|
- return 1;
|
|
+ return 0;
|
|
}
|
|
|
|
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
|
|
@@ -4484,10 +4465,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|
* nlp_flag bitmap in the ndlp data structure, if the mbox command reference
|
|
* field in the command IOCB is not NULL, the referred mailbox command will
|
|
* be send out, and then invokes the lpfc_els_free_iocb() routine to release
|
|
- * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
|
|
- * link down event occurred during the discovery, the lpfc_nlp_not_used()
|
|
- * routine shall be invoked trying to release the ndlp if no other threads
|
|
- * are currently referring it.
|
|
+ * the IOCB.
|
|
**/
|
|
static void
|
|
lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
@@ -4497,10 +4475,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
|
|
struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
|
|
IOCB_t *irsp;
|
|
- uint8_t *pcmd;
|
|
LPFC_MBOXQ_t *mbox = NULL;
|
|
struct lpfc_dmabuf *mp = NULL;
|
|
- uint32_t ls_rjt = 0;
|
|
|
|
irsp = &rspiocb->iocb;
|
|
|
|
@@ -4512,18 +4488,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
if (cmdiocb->context_un.mbox)
|
|
mbox = cmdiocb->context_un.mbox;
|
|
|
|
- /* First determine if this is a LS_RJT cmpl. Note, this callback
|
|
- * function can have cmdiocb->contest1 (ndlp) field set to NULL.
|
|
- */
|
|
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
|
|
- if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
|
|
- /* A LS_RJT associated with Default RPI cleanup has its own
|
|
- * separate code path.
|
|
- */
|
|
- if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
|
|
- ls_rjt = 1;
|
|
- }
|
|
-
|
|
/* Check to see if link went down during discovery */
|
|
if (!ndlp || lpfc_els_chk_latt(vport)) {
|
|
if (mbox) {
|
|
@@ -4534,15 +4498,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
}
|
|
mempool_free(mbox, phba->mbox_mem_pool);
|
|
}
|
|
- if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
|
|
- if (lpfc_nlp_not_used(ndlp)) {
|
|
- ndlp = NULL;
|
|
- /* Indicate the node has already released,
|
|
- * should not reference to it from within
|
|
- * the routine lpfc_els_free_iocb.
|
|
- */
|
|
- cmdiocb->context1 = NULL;
|
|
- }
|
|
goto out;
|
|
}
|
|
|
|
@@ -4620,29 +4575,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
"Data: x%x x%x x%x\n",
|
|
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
|
|
ndlp->nlp_rpi);
|
|
-
|
|
- if (lpfc_nlp_not_used(ndlp)) {
|
|
- ndlp = NULL;
|
|
- /* Indicate node has already been released,
|
|
- * should not reference to it from within
|
|
- * the routine lpfc_els_free_iocb.
|
|
- */
|
|
- cmdiocb->context1 = NULL;
|
|
- }
|
|
- } else {
|
|
- /* Do not drop node for lpfc_els_abort'ed ELS cmds */
|
|
- if (!lpfc_error_lost_link(irsp) &&
|
|
- ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
|
|
- if (lpfc_nlp_not_used(ndlp)) {
|
|
- ndlp = NULL;
|
|
- /* Indicate node has already been
|
|
- * released, should not reference
|
|
- * to it from within the routine
|
|
- * lpfc_els_free_iocb.
|
|
- */
|
|
- cmdiocb->context1 = NULL;
|
|
- }
|
|
- }
|
|
}
|
|
mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
|
|
if (mp) {
|
|
@@ -4658,19 +4590,6 @@ out:
|
|
ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
|
|
ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
|
|
spin_unlock_irq(&ndlp->lock);
|
|
-
|
|
- /* If the node is not being used by another discovery thread,
|
|
- * and we are sending a reject, we are done with it.
|
|
- * Release driver reference count here and free associated
|
|
- * resources.
|
|
- */
|
|
- if (ls_rjt)
|
|
- if (lpfc_nlp_not_used(ndlp))
|
|
- /* Indicate node has already been released,
|
|
- * should not reference to it from within
|
|
- * the routine lpfc_els_free_iocb.
|
|
- */
|
|
- cmdiocb->context1 = NULL;
|
|
}
|
|
|
|
/* Release the originating I/O reference. */
|
|
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
index e5ace4a4f432a..c482a564a14dd 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
@@ -130,11 +130,8 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
|
|
"rport terminate: sid:x%x did:x%x flg:x%x",
|
|
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
|
|
|
|
- if (ndlp->nlp_sid != NLP_NO_SID) {
|
|
- lpfc_sli_abort_iocb(vport,
|
|
- &vport->phba->sli.sli3_ring[LPFC_FCP_RING],
|
|
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
|
|
- }
|
|
+ if (ndlp->nlp_sid != NLP_NO_SID)
|
|
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
|
|
}
|
|
|
|
/*
|
|
@@ -289,8 +286,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
|
|
|
|
if (ndlp->nlp_sid != NLP_NO_SID) {
|
|
warn_on = 1;
|
|
- lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
|
|
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
|
|
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
|
|
}
|
|
|
|
if (warn_on) {
|
|
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
|
|
index 541b9aef6bfec..f5bc2c32a8179 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_hw4.h
|
|
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
|
|
@@ -124,6 +124,7 @@ struct lpfc_sli_intf {
|
|
/* Define SLI4 Alignment requirements. */
|
|
#define LPFC_ALIGN_16_BYTE 16
|
|
#define LPFC_ALIGN_64_BYTE 64
|
|
+#define SLI4_PAGE_SIZE 4096
|
|
|
|
/* Define SLI4 specific definitions. */
|
|
#define LPFC_MQ_CQE_BYTE_OFFSET 256
|
|
@@ -2976,62 +2977,6 @@ struct lpfc_mbx_request_features {
|
|
#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3
|
|
};
|
|
|
|
-struct lpfc_mbx_supp_pages {
|
|
- uint32_t word1;
|
|
-#define qs_SHIFT 0
|
|
-#define qs_MASK 0x00000001
|
|
-#define qs_WORD word1
|
|
-#define wr_SHIFT 1
|
|
-#define wr_MASK 0x00000001
|
|
-#define wr_WORD word1
|
|
-#define pf_SHIFT 8
|
|
-#define pf_MASK 0x000000ff
|
|
-#define pf_WORD word1
|
|
-#define cpn_SHIFT 16
|
|
-#define cpn_MASK 0x000000ff
|
|
-#define cpn_WORD word1
|
|
- uint32_t word2;
|
|
-#define list_offset_SHIFT 0
|
|
-#define list_offset_MASK 0x000000ff
|
|
-#define list_offset_WORD word2
|
|
-#define next_offset_SHIFT 8
|
|
-#define next_offset_MASK 0x000000ff
|
|
-#define next_offset_WORD word2
|
|
-#define elem_cnt_SHIFT 16
|
|
-#define elem_cnt_MASK 0x000000ff
|
|
-#define elem_cnt_WORD word2
|
|
- uint32_t word3;
|
|
-#define pn_0_SHIFT 24
|
|
-#define pn_0_MASK 0x000000ff
|
|
-#define pn_0_WORD word3
|
|
-#define pn_1_SHIFT 16
|
|
-#define pn_1_MASK 0x000000ff
|
|
-#define pn_1_WORD word3
|
|
-#define pn_2_SHIFT 8
|
|
-#define pn_2_MASK 0x000000ff
|
|
-#define pn_2_WORD word3
|
|
-#define pn_3_SHIFT 0
|
|
-#define pn_3_MASK 0x000000ff
|
|
-#define pn_3_WORD word3
|
|
- uint32_t word4;
|
|
-#define pn_4_SHIFT 24
|
|
-#define pn_4_MASK 0x000000ff
|
|
-#define pn_4_WORD word4
|
|
-#define pn_5_SHIFT 16
|
|
-#define pn_5_MASK 0x000000ff
|
|
-#define pn_5_WORD word4
|
|
-#define pn_6_SHIFT 8
|
|
-#define pn_6_MASK 0x000000ff
|
|
-#define pn_6_WORD word4
|
|
-#define pn_7_SHIFT 0
|
|
-#define pn_7_MASK 0x000000ff
|
|
-#define pn_7_WORD word4
|
|
- uint32_t rsvd[27];
|
|
-#define LPFC_SUPP_PAGES 0
|
|
-#define LPFC_BLOCK_GUARD_PROFILES 1
|
|
-#define LPFC_SLI4_PARAMETERS 2
|
|
-};
|
|
-
|
|
struct lpfc_mbx_memory_dump_type3 {
|
|
uint32_t word1;
|
|
#define lpfc_mbx_memory_dump_type3_type_SHIFT 0
|
|
@@ -3248,121 +3193,6 @@ struct user_eeprom {
|
|
uint8_t reserved191[57];
|
|
};
|
|
|
|
-struct lpfc_mbx_pc_sli4_params {
|
|
- uint32_t word1;
|
|
-#define qs_SHIFT 0
|
|
-#define qs_MASK 0x00000001
|
|
-#define qs_WORD word1
|
|
-#define wr_SHIFT 1
|
|
-#define wr_MASK 0x00000001
|
|
-#define wr_WORD word1
|
|
-#define pf_SHIFT 8
|
|
-#define pf_MASK 0x000000ff
|
|
-#define pf_WORD word1
|
|
-#define cpn_SHIFT 16
|
|
-#define cpn_MASK 0x000000ff
|
|
-#define cpn_WORD word1
|
|
- uint32_t word2;
|
|
-#define if_type_SHIFT 0
|
|
-#define if_type_MASK 0x00000007
|
|
-#define if_type_WORD word2
|
|
-#define sli_rev_SHIFT 4
|
|
-#define sli_rev_MASK 0x0000000f
|
|
-#define sli_rev_WORD word2
|
|
-#define sli_family_SHIFT 8
|
|
-#define sli_family_MASK 0x000000ff
|
|
-#define sli_family_WORD word2
|
|
-#define featurelevel_1_SHIFT 16
|
|
-#define featurelevel_1_MASK 0x000000ff
|
|
-#define featurelevel_1_WORD word2
|
|
-#define featurelevel_2_SHIFT 24
|
|
-#define featurelevel_2_MASK 0x0000001f
|
|
-#define featurelevel_2_WORD word2
|
|
- uint32_t word3;
|
|
-#define fcoe_SHIFT 0
|
|
-#define fcoe_MASK 0x00000001
|
|
-#define fcoe_WORD word3
|
|
-#define fc_SHIFT 1
|
|
-#define fc_MASK 0x00000001
|
|
-#define fc_WORD word3
|
|
-#define nic_SHIFT 2
|
|
-#define nic_MASK 0x00000001
|
|
-#define nic_WORD word3
|
|
-#define iscsi_SHIFT 3
|
|
-#define iscsi_MASK 0x00000001
|
|
-#define iscsi_WORD word3
|
|
-#define rdma_SHIFT 4
|
|
-#define rdma_MASK 0x00000001
|
|
-#define rdma_WORD word3
|
|
- uint32_t sge_supp_len;
|
|
-#define SLI4_PAGE_SIZE 4096
|
|
- uint32_t word5;
|
|
-#define if_page_sz_SHIFT 0
|
|
-#define if_page_sz_MASK 0x0000ffff
|
|
-#define if_page_sz_WORD word5
|
|
-#define loopbk_scope_SHIFT 24
|
|
-#define loopbk_scope_MASK 0x0000000f
|
|
-#define loopbk_scope_WORD word5
|
|
-#define rq_db_window_SHIFT 28
|
|
-#define rq_db_window_MASK 0x0000000f
|
|
-#define rq_db_window_WORD word5
|
|
- uint32_t word6;
|
|
-#define eq_pages_SHIFT 0
|
|
-#define eq_pages_MASK 0x0000000f
|
|
-#define eq_pages_WORD word6
|
|
-#define eqe_size_SHIFT 8
|
|
-#define eqe_size_MASK 0x000000ff
|
|
-#define eqe_size_WORD word6
|
|
- uint32_t word7;
|
|
-#define cq_pages_SHIFT 0
|
|
-#define cq_pages_MASK 0x0000000f
|
|
-#define cq_pages_WORD word7
|
|
-#define cqe_size_SHIFT 8
|
|
-#define cqe_size_MASK 0x000000ff
|
|
-#define cqe_size_WORD word7
|
|
- uint32_t word8;
|
|
-#define mq_pages_SHIFT 0
|
|
-#define mq_pages_MASK 0x0000000f
|
|
-#define mq_pages_WORD word8
|
|
-#define mqe_size_SHIFT 8
|
|
-#define mqe_size_MASK 0x000000ff
|
|
-#define mqe_size_WORD word8
|
|
-#define mq_elem_cnt_SHIFT 16
|
|
-#define mq_elem_cnt_MASK 0x000000ff
|
|
-#define mq_elem_cnt_WORD word8
|
|
- uint32_t word9;
|
|
-#define wq_pages_SHIFT 0
|
|
-#define wq_pages_MASK 0x0000ffff
|
|
-#define wq_pages_WORD word9
|
|
-#define wqe_size_SHIFT 8
|
|
-#define wqe_size_MASK 0x000000ff
|
|
-#define wqe_size_WORD word9
|
|
- uint32_t word10;
|
|
-#define rq_pages_SHIFT 0
|
|
-#define rq_pages_MASK 0x0000ffff
|
|
-#define rq_pages_WORD word10
|
|
-#define rqe_size_SHIFT 8
|
|
-#define rqe_size_MASK 0x000000ff
|
|
-#define rqe_size_WORD word10
|
|
- uint32_t word11;
|
|
-#define hdr_pages_SHIFT 0
|
|
-#define hdr_pages_MASK 0x0000000f
|
|
-#define hdr_pages_WORD word11
|
|
-#define hdr_size_SHIFT 8
|
|
-#define hdr_size_MASK 0x0000000f
|
|
-#define hdr_size_WORD word11
|
|
-#define hdr_pp_align_SHIFT 16
|
|
-#define hdr_pp_align_MASK 0x0000ffff
|
|
-#define hdr_pp_align_WORD word11
|
|
- uint32_t word12;
|
|
-#define sgl_pages_SHIFT 0
|
|
-#define sgl_pages_MASK 0x0000000f
|
|
-#define sgl_pages_WORD word12
|
|
-#define sgl_pp_align_SHIFT 16
|
|
-#define sgl_pp_align_MASK 0x0000ffff
|
|
-#define sgl_pp_align_WORD word12
|
|
- uint32_t rsvd_13_63[51];
|
|
-};
|
|
#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
|
|
&(~((SLI4_PAGE_SIZE)-1)))
|
|
|
|
@@ -3994,8 +3824,6 @@ struct lpfc_mqe {
|
|
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
|
|
struct lpfc_mbx_query_fw_config query_fw_cfg;
|
|
struct lpfc_mbx_set_beacon_config beacon_config;
|
|
- struct lpfc_mbx_supp_pages supp_pages;
|
|
- struct lpfc_mbx_pc_sli4_params sli4_params;
|
|
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
|
|
struct lpfc_mbx_set_link_diag_state link_diag_state;
|
|
struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
|
|
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
|
|
index ac67f420ec264..971bbadda8491 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_init.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_init.c
|
|
@@ -6520,8 +6520,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
LPFC_MBOXQ_t *mboxq;
|
|
MAILBOX_t *mb;
|
|
int rc, i, max_buf_size;
|
|
- uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
|
|
- struct lpfc_mqe *mqe;
|
|
int longs;
|
|
int extra;
|
|
uint64_t wwn;
|
|
@@ -6755,32 +6753,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
|
lpfc_nvme_mod_param_dep(phba);
|
|
|
|
- /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
|
|
- lpfc_supported_pages(mboxq);
|
|
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
|
- if (!rc) {
|
|
- mqe = &mboxq->u.mqe;
|
|
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
|
|
- LPFC_MAX_SUPPORTED_PAGES);
|
|
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
|
|
- switch (pn_page[i]) {
|
|
- case LPFC_SLI4_PARAMETERS:
|
|
- phba->sli4_hba.pc_sli4_params.supported = 1;
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
- }
|
|
- /* Read the port's SLI4 Parameters capabilities if supported. */
|
|
- if (phba->sli4_hba.pc_sli4_params.supported)
|
|
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
|
|
- if (rc) {
|
|
- mempool_free(mboxq, phba->mbox_mem_pool);
|
|
- rc = -EIO;
|
|
- goto out_free_bsmbx;
|
|
- }
|
|
- }
|
|
-
|
|
/*
|
|
* Get sli4 parameters that override parameters from Port capabilities.
|
|
* If this call fails, it isn't critical unless the SLI4 parameters come
|
|
@@ -9607,8 +9579,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|
"3250 QUERY_FW_CFG mailbox failed with status "
|
|
"x%x add_status x%x, mbx status x%x\n",
|
|
shdr_status, shdr_add_status, rc);
|
|
- if (rc != MBX_TIMEOUT)
|
|
- mempool_free(mboxq, phba->mbox_mem_pool);
|
|
+ mempool_free(mboxq, phba->mbox_mem_pool);
|
|
rc = -ENXIO;
|
|
goto out_error;
|
|
}
|
|
@@ -9624,8 +9595,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|
"ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
|
|
phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
|
|
|
|
- if (rc != MBX_TIMEOUT)
|
|
- mempool_free(mboxq, phba->mbox_mem_pool);
|
|
+ mempool_free(mboxq, phba->mbox_mem_pool);
|
|
|
|
/*
|
|
* Set up HBA Event Queues (EQs)
|
|
@@ -10223,8 +10193,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
|
|
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
|
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
|
|
&shdr->response);
|
|
- if (rc != MBX_TIMEOUT)
|
|
- mempool_free(mboxq, phba->mbox_mem_pool);
|
|
+ mempool_free(mboxq, phba->mbox_mem_pool);
|
|
if (shdr_status || shdr_add_status || rc) {
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
|
"0495 SLI_FUNCTION_RESET mailbox "
|
|
@@ -12020,78 +11989,6 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
|
|
phba->pport->work_port_events = 0;
|
|
}
|
|
|
|
- /**
|
|
- * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
|
|
- * @phba: Pointer to HBA context object.
|
|
- * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
|
|
- *
|
|
- * This function is called in the SLI4 code path to read the port's
|
|
- * sli4 capabilities.
|
|
- *
|
|
- * This function may be be called from any context that can block-wait
|
|
- * for the completion. The expectation is that this routine is called
|
|
- * typically from probe_one or from the online routine.
|
|
- **/
|
|
-int
|
|
-lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
|
-{
|
|
- int rc;
|
|
- struct lpfc_mqe *mqe;
|
|
- struct lpfc_pc_sli4_params *sli4_params;
|
|
- uint32_t mbox_tmo;
|
|
-
|
|
- rc = 0;
|
|
- mqe = &mboxq->u.mqe;
|
|
-
|
|
- /* Read the port's SLI4 Parameters port capabilities */
|
|
- lpfc_pc_sli4_params(mboxq);
|
|
- if (!phba->sli4_hba.intr_enable)
|
|
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
|
- else {
|
|
- mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
|
|
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
|
|
- }
|
|
-
|
|
- if (unlikely(rc))
|
|
- return 1;
|
|
-
|
|
- sli4_params = &phba->sli4_hba.pc_sli4_params;
|
|
- sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
|
|
- sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
|
|
- sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
|
|
- sli4_params->featurelevel_1 = bf_get(featurelevel_1,
|
|
- &mqe->un.sli4_params);
|
|
- sli4_params->featurelevel_2 = bf_get(featurelevel_2,
|
|
- &mqe->un.sli4_params);
|
|
- sli4_params->proto_types = mqe->un.sli4_params.word3;
|
|
- sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
|
|
- sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
|
|
- sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
|
|
- sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
|
|
- sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
|
|
- sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
|
|
- sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
|
|
- sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
|
|
- sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
|
|
- sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
|
|
- sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
|
|
- sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
|
|
- sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
|
|
- sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
|
|
- sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
|
|
- sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
|
|
- sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
|
|
- sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
|
|
- sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
|
|
- sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
|
|
-
|
|
- /* Make sure that sge_supp_len can be handled by the driver */
|
|
- if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
|
|
- sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
|
|
-
|
|
- return rc;
|
|
-}
|
|
-
|
|
/**
|
|
* lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
|
|
* @phba: Pointer to HBA context object.
|
|
@@ -12150,7 +12047,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
|
else
|
|
phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
|
|
sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
|
|
- sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
|
|
+ sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
|
|
+ mbx_sli4_parameters);
|
|
sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
|
|
sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
|
|
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
|
|
index 3414ffcb26fed..8764fdfc41d49 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_mbox.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
|
|
@@ -2624,39 +2624,3 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
|
|
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
|
|
}
|
|
|
|
-/**
|
|
- * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
|
|
- * mailbox command.
|
|
- * @mbox: pointer to lpfc mbox command to initialize.
|
|
- *
|
|
- * The PORT_CAPABILITIES supported pages mailbox command is issued to
|
|
- * retrieve the particular feature pages supported by the port.
|
|
- **/
|
|
-void
|
|
-lpfc_supported_pages(struct lpfcMboxq *mbox)
|
|
-{
|
|
- struct lpfc_mbx_supp_pages *supp_pages;
|
|
-
|
|
- memset(mbox, 0, sizeof(*mbox));
|
|
- supp_pages = &mbox->u.mqe.un.supp_pages;
|
|
- bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
|
|
- bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
|
|
-}
|
|
-
|
|
-/**
|
|
- * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
|
|
- * @mbox: pointer to lpfc mbox command to initialize.
|
|
- *
|
|
- * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
|
|
- * retrieve the particular SLI4 features supported by the port.
|
|
- **/
|
|
-void
|
|
-lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
|
|
-{
|
|
- struct lpfc_mbx_pc_sli4_params *sli4_params;
|
|
-
|
|
- memset(mbox, 0, sizeof(*mbox));
|
|
- sli4_params = &mbox->u.mqe.un.sli4_params;
|
|
- bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
|
|
- bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
|
|
-}
|
|
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
index 1ac855640fc5d..b414c4210ce6e 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
@@ -277,106 +277,43 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
|
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
|
|
}
|
|
|
|
-/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
|
|
+/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
|
|
* @phba: pointer to lpfc hba data structure.
|
|
- * @link_mbox: pointer to CONFIG_LINK mailbox object
|
|
+ * @login_mbox: pointer to REG_RPI mailbox object
|
|
*
|
|
- * This routine is only called if we are SLI3, direct connect pt2pt
|
|
- * mode and the remote NPort issues the PLOGI after link up.
|
|
+ * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
|
|
*/
|
|
static void
|
|
-lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
|
|
+lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
|
|
{
|
|
- LPFC_MBOXQ_t *login_mbox;
|
|
- MAILBOX_t *mb = &link_mbox->u.mb;
|
|
struct lpfc_iocbq *save_iocb;
|
|
struct lpfc_nodelist *ndlp;
|
|
+ MAILBOX_t *mb = &login_mbox->u.mb;
|
|
+
|
|
int rc;
|
|
|
|
- ndlp = link_mbox->ctx_ndlp;
|
|
- login_mbox = link_mbox->context3;
|
|
+ ndlp = login_mbox->ctx_ndlp;
|
|
save_iocb = login_mbox->context3;
|
|
- link_mbox->context3 = NULL;
|
|
- login_mbox->context3 = NULL;
|
|
-
|
|
- /* Check for CONFIG_LINK error */
|
|
- if (mb->mbxStatus) {
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
|
- "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
|
|
- mb->mbxStatus);
|
|
- mempool_free(login_mbox, phba->mbox_mem_pool);
|
|
- mempool_free(link_mbox, phba->mbox_mem_pool);
|
|
- kfree(save_iocb);
|
|
- return;
|
|
- }
|
|
|
|
- /* Now that CONFIG_LINK completed, and our SID is configured,
|
|
- * we can now proceed with sending the PLOGI ACC.
|
|
- */
|
|
- rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
|
|
- save_iocb, ndlp, login_mbox);
|
|
- if (rc) {
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
|
- "4576 PLOGI ACC fails pt2pt discovery: %x\n",
|
|
- rc);
|
|
- mempool_free(login_mbox, phba->mbox_mem_pool);
|
|
+ if (mb->mbxStatus == MBX_SUCCESS) {
|
|
+ /* Now that REG_RPI completed successfully,
|
|
+ * we can now proceed with sending the PLOGI ACC.
|
|
+ */
|
|
+ rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
|
|
+ save_iocb, ndlp, NULL);
|
|
+ if (rc) {
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
|
+ "4576 PLOGI ACC fails pt2pt discovery: "
|
|
+ "DID %x Data: %x\n", ndlp->nlp_DID, rc);
|
|
+ }
|
|
}
|
|
|
|
- mempool_free(link_mbox, phba->mbox_mem_pool);
|
|
+ /* Now process the REG_RPI cmpl */
|
|
+ lpfc_mbx_cmpl_reg_login(phba, login_mbox);
|
|
+ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
|
|
kfree(save_iocb);
|
|
}
|
|
|
|
-/**
|
|
- * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
|
|
- * @phba: Pointer to HBA context object.
|
|
- * @pmb: Pointer to mailbox object.
|
|
- *
|
|
- * This function provides the unreg rpi mailbox completion handler for a tgt.
|
|
- * The routine frees the memory resources associated with the completed
|
|
- * mailbox command and transmits the ELS ACC.
|
|
- *
|
|
- * This routine is only called if we are SLI4, acting in target
|
|
- * mode and the remote NPort issues the PLOGI after link up.
|
|
- **/
|
|
-static void
|
|
-lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|
-{
|
|
- struct lpfc_vport *vport = pmb->vport;
|
|
- struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
|
|
- LPFC_MBOXQ_t *mbox = pmb->context3;
|
|
- struct lpfc_iocbq *piocb = NULL;
|
|
- int rc;
|
|
-
|
|
- if (mbox) {
|
|
- pmb->context3 = NULL;
|
|
- piocb = mbox->context3;
|
|
- mbox->context3 = NULL;
|
|
- }
|
|
-
|
|
- /*
|
|
- * Complete the unreg rpi mbx request, and update flags.
|
|
- * This will also restart any deferred events.
|
|
- */
|
|
- lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
|
|
-
|
|
- if (!piocb) {
|
|
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
|
- "4578 PLOGI ACC fail\n");
|
|
- if (mbox)
|
|
- mempool_free(mbox, phba->mbox_mem_pool);
|
|
- return;
|
|
- }
|
|
-
|
|
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
|
|
- if (rc) {
|
|
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
|
- "4579 PLOGI ACC fail %x\n", rc);
|
|
- if (mbox)
|
|
- mempool_free(mbox, phba->mbox_mem_pool);
|
|
- }
|
|
- kfree(piocb);
|
|
-}
|
|
-
|
|
static int
|
|
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
struct lpfc_iocbq *cmdiocb)
|
|
@@ -393,8 +330,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
struct lpfc_iocbq *save_iocb;
|
|
struct ls_rjt stat;
|
|
uint32_t vid, flag;
|
|
- u16 rpi;
|
|
- int rc, defer_acc;
|
|
+ int rc;
|
|
|
|
memset(&stat, 0, sizeof (struct ls_rjt));
|
|
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
|
@@ -443,7 +379,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
else
|
|
ndlp->nlp_fcp_info |= CLASS3;
|
|
|
|
- defer_acc = 0;
|
|
ndlp->nlp_class_sup = 0;
|
|
if (sp->cls1.classValid)
|
|
ndlp->nlp_class_sup |= FC_COS_CLASS1;
|
|
@@ -527,27 +462,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
|
|
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
|
|
|
|
- /* Issue config_link / reg_vfi to account for updated TOV's */
|
|
-
|
|
+ /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
|
|
+ * to account for updated TOV's / parameters
|
|
+ */
|
|
if (phba->sli_rev == LPFC_SLI_REV4)
|
|
lpfc_issue_reg_vfi(vport);
|
|
else {
|
|
- defer_acc = 1;
|
|
link_mbox = mempool_alloc(phba->mbox_mem_pool,
|
|
GFP_KERNEL);
|
|
if (!link_mbox)
|
|
goto out;
|
|
lpfc_config_link(phba, link_mbox);
|
|
- link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
|
|
+ link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
|
link_mbox->vport = vport;
|
|
link_mbox->ctx_ndlp = ndlp;
|
|
|
|
- save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
|
|
- if (!save_iocb)
|
|
+ rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
|
|
+ if (rc == MBX_NOT_FINISHED) {
|
|
+ mempool_free(link_mbox, phba->mbox_mem_pool);
|
|
goto out;
|
|
- /* Save info from cmd IOCB used in rsp */
|
|
- memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
|
|
- sizeof(struct lpfc_iocbq));
|
|
+ }
|
|
}
|
|
|
|
lpfc_can_disctmo(vport);
|
|
@@ -566,59 +500,28 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
if (!login_mbox)
|
|
goto out;
|
|
|
|
- /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
|
|
- if (phba->nvmet_support && !defer_acc) {
|
|
- link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
- if (!link_mbox)
|
|
- goto out;
|
|
-
|
|
- /* As unique identifiers such as iotag would be overwritten
|
|
- * with those from the cmdiocb, allocate separate temporary
|
|
- * storage for the copy.
|
|
- */
|
|
- save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
|
|
- if (!save_iocb)
|
|
- goto out;
|
|
-
|
|
- /* Unreg RPI is required for SLI4. */
|
|
- rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
|
|
- lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
|
|
- link_mbox->vport = vport;
|
|
- link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
|
|
- if (!link_mbox->ctx_ndlp)
|
|
- goto out;
|
|
-
|
|
- link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
|
|
-
|
|
- if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
|
|
- (!(vport->fc_flag & FC_OFFLINE_MODE)))
|
|
- ndlp->nlp_flag |= NLP_UNREG_INP;
|
|
+ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
|
|
+ if (!save_iocb)
|
|
+ goto out;
|
|
|
|
- /* Save info from cmd IOCB used in rsp */
|
|
- memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
|
|
+ /* Save info from cmd IOCB to be used in rsp after all mbox completes */
|
|
+ memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
|
|
+ sizeof(struct lpfc_iocbq));
|
|
|
|
- /* Delay sending ACC till unreg RPI completes. */
|
|
- defer_acc = 1;
|
|
- } else if (phba->sli_rev == LPFC_SLI_REV4)
|
|
+ /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
|
|
+ if (phba->sli_rev == LPFC_SLI_REV4)
|
|
lpfc_unreg_rpi(vport, ndlp);
|
|
|
|
+ /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
|
|
+ * always be deferring the ACC.
|
|
+ */
|
|
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
|
|
(uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
|
|
if (rc)
|
|
goto out;
|
|
|
|
- /* ACC PLOGI rsp command needs to execute first,
|
|
- * queue this login_mbox command to be processed later.
|
|
- */
|
|
login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
|
|
- /*
|
|
- * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
|
|
- * command issued in lpfc_cmpl_els_acc().
|
|
- */
|
|
login_mbox->vport = vport;
|
|
- spin_lock_irq(&ndlp->lock);
|
|
- ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
|
|
- spin_unlock_irq(&ndlp->lock);
|
|
|
|
/*
|
|
* If there is an outstanding PLOGI issued, abort it before
|
|
@@ -648,7 +551,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
* to register, then unregister the RPI.
|
|
*/
|
|
spin_lock_irq(&ndlp->lock);
|
|
- ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
|
|
+ ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
|
|
+ NLP_RCV_PLOGI);
|
|
spin_unlock_irq(&ndlp->lock);
|
|
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
|
|
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
|
|
@@ -658,42 +562,39 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
mempool_free(login_mbox, phba->mbox_mem_pool);
|
|
return 1;
|
|
}
|
|
- if (defer_acc) {
|
|
- /* So the order here should be:
|
|
- * SLI3 pt2pt
|
|
- * Issue CONFIG_LINK mbox
|
|
- * CONFIG_LINK cmpl
|
|
- * SLI4 tgt
|
|
- * Issue UNREG RPI mbx
|
|
- * UNREG RPI cmpl
|
|
- * Issue PLOGI ACC
|
|
- * PLOGI ACC cmpl
|
|
- * Issue REG_LOGIN mbox
|
|
- */
|
|
|
|
- /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
|
|
- link_mbox->context3 = login_mbox;
|
|
- login_mbox->context3 = save_iocb;
|
|
+ /* So the order here should be:
|
|
+ * SLI3 pt2pt
|
|
+ * Issue CONFIG_LINK mbox
|
|
+ * CONFIG_LINK cmpl
|
|
+ * SLI4 pt2pt
|
|
+ * Issue REG_VFI mbox
|
|
+ * REG_VFI cmpl
|
|
+ * SLI4
|
|
+ * Issue UNREG RPI mbx
|
|
+ * UNREG RPI cmpl
|
|
+ * Issue REG_RPI mbox
|
|
+ * REG RPI cmpl
|
|
+ * Issue PLOGI ACC
|
|
+ * PLOGI ACC cmpl
|
|
+ */
|
|
+ login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
|
|
+ login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
|
|
+ login_mbox->context3 = save_iocb; /* For PLOGI ACC */
|
|
|
|
- /* Start the ball rolling by issuing CONFIG_LINK here */
|
|
- rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
|
|
- if (rc == MBX_NOT_FINISHED)
|
|
- goto out;
|
|
- return 1;
|
|
- }
|
|
+ spin_lock_irq(&ndlp->lock);
|
|
+ ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
|
|
+ spin_unlock_irq(&ndlp->lock);
|
|
+
|
|
+ /* Start the ball rolling by issuing REG_LOGIN here */
|
|
+ rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
|
|
+ if (rc == MBX_NOT_FINISHED)
|
|
+ goto out;
|
|
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
|
|
|
|
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
|
|
- if (rc)
|
|
- mempool_free(login_mbox, phba->mbox_mem_pool);
|
|
return 1;
|
|
out:
|
|
- if (defer_acc)
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
|
- "4577 discovery failure: %p %p %p\n",
|
|
- save_iocb, link_mbox, login_mbox);
|
|
kfree(save_iocb);
|
|
- if (link_mbox)
|
|
- mempool_free(link_mbox, phba->mbox_mem_pool);
|
|
if (login_mbox)
|
|
mempool_free(login_mbox, phba->mbox_mem_pool);
|
|
|
|
@@ -901,9 +802,14 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
}
|
|
} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
|
|
((ndlp->nlp_type & NLP_FCP_TARGET) ||
|
|
- !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
|
|
+ (ndlp->nlp_type & NLP_NVME_TARGET) ||
|
|
+ (vport->fc_flag & FC_PT2PT))) ||
|
|
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
|
|
- /* Only try to re-login if this is NOT a Fabric Node */
|
|
+ /* Only try to re-login if this is NOT a Fabric Node
|
|
+ * AND the remote NPORT is a FCP/NVME Target or we
|
|
+ * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
|
|
+ * case for LOGO as a response to ADISC behavior.
|
|
+ */
|
|
mod_timer(&ndlp->nlp_delayfunc,
|
|
jiffies + msecs_to_jiffies(1000 * 1));
|
|
spin_lock_irq(&ndlp->lock);
|
|
@@ -1968,8 +1874,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
|
|
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
|
|
|
lpfc_issue_els_logo(vport, ndlp, 0);
|
|
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
|
|
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
|
return ndlp->nlp_state;
|
|
}
|
|
|
|
@@ -2614,12 +2518,10 @@ static uint32_t
|
|
lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
void *arg, uint32_t evt)
|
|
{
|
|
- struct lpfc_hba *phba = vport->phba;
|
|
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
|
|
|
|
/* flush the target */
|
|
- lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
|
|
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
|
|
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
|
|
|
|
/* Treat like rcv logo */
|
|
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
|
|
index a71df8788fff3..0dbe1d3993781 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
|
|
@@ -3299,7 +3299,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
|
|
bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
|
|
|
|
/* Word 10 */
|
|
- bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
|
|
bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
|
|
bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
|
|
LPFC_WQE_LENLOC_WORD12);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
|
|
index 95caad764fb7b..3037a928eefce 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_sli.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_sli.c
|
|
@@ -5678,12 +5678,10 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
|
|
phba->sli4_hba.lnk_info.lnk_no,
|
|
phba->BIOSVersion);
|
|
out_free_mboxq:
|
|
- if (rc != MBX_TIMEOUT) {
|
|
- if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
|
|
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
|
- else
|
|
- mempool_free(mboxq, phba->mbox_mem_pool);
|
|
- }
|
|
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
|
|
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
|
+ else
|
|
+ mempool_free(mboxq, phba->mbox_mem_pool);
|
|
return rc;
|
|
}
|
|
|
|
@@ -5784,12 +5782,10 @@ retrieve_ppname:
|
|
}
|
|
|
|
out_free_mboxq:
|
|
- if (rc != MBX_TIMEOUT) {
|
|
- if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
|
|
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
|
- else
|
|
- mempool_free(mboxq, phba->mbox_mem_pool);
|
|
- }
|
|
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
|
|
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
|
+ else
|
|
+ mempool_free(mboxq, phba->mbox_mem_pool);
|
|
return rc;
|
|
}
|
|
|
|
@@ -11639,7 +11635,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|
icmd = &cmdiocb->iocb;
|
|
if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
|
|
icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
|
|
- (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
|
|
+ cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
|
|
return IOCB_ABORTING;
|
|
|
|
if (!pring) {
|
|
@@ -11937,7 +11933,6 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
/**
|
|
* lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
|
|
* @vport: Pointer to virtual port.
|
|
- * @pring: Pointer to driver SLI ring object.
|
|
* @tgt_id: SCSI ID of the target.
|
|
* @lun_id: LUN ID of the scsi device.
|
|
* @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
|
|
@@ -11952,18 +11947,22 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
* FCP iocbs associated with SCSI target specified by tgt_id parameter.
|
|
* When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
|
|
* FCP iocbs associated with virtual port.
|
|
+ * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
|
|
+ * lpfc_sli4_calc_ring is used.
|
|
* This function returns number of iocbs it failed to abort.
|
|
* This function is called with no locks held.
|
|
**/
|
|
int
|
|
-lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
|
- uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
|
|
+lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
|
|
+ lpfc_ctx_cmd abort_cmd)
|
|
{
|
|
struct lpfc_hba *phba = vport->phba;
|
|
+ struct lpfc_sli_ring *pring = NULL;
|
|
struct lpfc_iocbq *iocbq;
|
|
int errcnt = 0, ret_val = 0;
|
|
unsigned long iflags;
|
|
int i;
|
|
+ void *fcp_cmpl = NULL;
|
|
|
|
/* all I/Os are in process of being flushed */
|
|
if (phba->hba_flag & HBA_IOQ_FLUSH)
|
|
@@ -11977,8 +11976,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
|
continue;
|
|
|
|
spin_lock_irqsave(&phba->hbalock, iflags);
|
|
+ if (phba->sli_rev == LPFC_SLI_REV3) {
|
|
+ pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
|
|
+ fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
|
|
+ } else if (phba->sli_rev == LPFC_SLI_REV4) {
|
|
+ pring = lpfc_sli4_calc_ring(phba, iocbq);
|
|
+ fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
|
|
+ }
|
|
ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
|
|
- lpfc_sli_abort_fcp_cmpl);
|
|
+ fcp_cmpl);
|
|
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
if (ret_val != IOCB_SUCCESS)
|
|
errcnt++;
|
|
@@ -17031,8 +17037,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|
"2509 RQ_DESTROY mailbox failed with "
|
|
"status x%x add_status x%x, mbx status x%x\n",
|
|
shdr_status, shdr_add_status, rc);
|
|
- if (rc != MBX_TIMEOUT)
|
|
- mempool_free(mbox, hrq->phba->mbox_mem_pool);
|
|
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
|
|
return -ENXIO;
|
|
}
|
|
bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
|
|
@@ -17129,7 +17134,9 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
|
|
shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
|
|
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
|
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
|
- if (rc != MBX_TIMEOUT)
|
|
+ if (!phba->sli4_hba.intr_enable)
|
|
+ mempool_free(mbox, phba->mbox_mem_pool);
|
|
+ else if (rc != MBX_TIMEOUT)
|
|
mempool_free(mbox, phba->mbox_mem_pool);
|
|
if (shdr_status || shdr_add_status || rc) {
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
|
@@ -17326,7 +17333,9 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
|
|
shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
|
|
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
|
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
|
- if (rc != MBX_TIMEOUT)
|
|
+ if (!phba->sli4_hba.intr_enable)
|
|
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
|
|
+ else if (rc != MBX_TIMEOUT)
|
|
lpfc_sli4_mbox_cmd_free(phba, mbox);
|
|
if (shdr_status || shdr_add_status || rc) {
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
|
@@ -17439,7 +17448,9 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
|
|
shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
|
|
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
|
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
|
- if (rc != MBX_TIMEOUT)
|
|
+ if (!phba->sli4_hba.intr_enable)
|
|
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
|
|
+ else if (rc != MBX_TIMEOUT)
|
|
lpfc_sli4_mbox_cmd_free(phba, mbox);
|
|
if (shdr_status || shdr_add_status || rc) {
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
|
@@ -18023,7 +18034,6 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
|
|
if (cmd_iocbq) {
|
|
ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
|
|
lpfc_nlp_put(ndlp);
|
|
- lpfc_nlp_not_used(ndlp);
|
|
lpfc_sli_release_iocbq(phba, cmd_iocbq);
|
|
}
|
|
|
|
@@ -18790,8 +18800,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
|
|
shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
|
|
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
|
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
|
- if (rc != MBX_TIMEOUT)
|
|
- mempool_free(mboxq, phba->mbox_mem_pool);
|
|
+ mempool_free(mboxq, phba->mbox_mem_pool);
|
|
if (shdr_status || shdr_add_status || rc) {
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
|
"2514 POST_RPI_HDR mailbox failed with "
|
|
@@ -20035,7 +20044,9 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
|
|
break;
|
|
}
|
|
}
|
|
- if (rc != MBX_TIMEOUT)
|
|
+ if (!phba->sli4_hba.intr_enable)
|
|
+ mempool_free(mbox, phba->mbox_mem_pool);
|
|
+ else if (rc != MBX_TIMEOUT)
|
|
mempool_free(mbox, phba->mbox_mem_pool);
|
|
if (shdr_status || shdr_add_status || rc) {
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
index 340d435ac0ce3..4ee29cccf0ead 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
@@ -7235,6 +7235,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
|
|
|
|
ioc_info(ioc, "sending diag reset !!\n");
|
|
|
|
+ pci_cfg_access_lock(ioc->pdev);
|
|
+
|
|
drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
|
|
|
|
count = 0;
|
|
@@ -7325,10 +7327,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
|
|
goto out;
|
|
}
|
|
|
|
+ pci_cfg_access_unlock(ioc->pdev);
|
|
ioc_info(ioc, "diag reset: SUCCESS\n");
|
|
return 0;
|
|
|
|
out:
|
|
+ pci_cfg_access_unlock(ioc->pdev);
|
|
ioc_err(ioc, "diag reset: FAILED\n");
|
|
return -EFAULT;
|
|
}
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
index 72439d6aa0578..712a6ee2fafbb 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
@@ -6475,6 +6475,9 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
|
|
if (!vphy)
|
|
return NULL;
|
|
|
|
+ if (!port->vphys_mask)
|
|
+ INIT_LIST_HEAD(&port->vphys_list);
|
|
+
|
|
/*
|
|
* Enable bit corresponding to HBA phy number on its
|
|
* parent hba_port object's vphys_mask field.
|
|
@@ -6482,7 +6485,6 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
|
|
port->vphys_mask |= (1 << phy_num);
|
|
vphy->phy_mask |= (1 << phy_num);
|
|
|
|
- INIT_LIST_HEAD(&port->vphys_list);
|
|
list_add_tail(&vphy->list, &port->vphys_list);
|
|
|
|
ioc_info(ioc,
|
|
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
|
|
index ab45ac1e5a72c..6a2c4a6fcded8 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_attr.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_attr.c
|
|
@@ -2855,6 +2855,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
|
|
vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
|
|
|
|
if (IS_FWI2_CAPABLE(ha)) {
|
|
+ int rval;
|
|
+
|
|
stats = dma_alloc_coherent(&ha->pdev->dev,
|
|
sizeof(*stats), &stats_dma, GFP_KERNEL);
|
|
if (!stats) {
|
|
@@ -2864,7 +2866,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
|
|
}
|
|
|
|
/* reset firmware statistics */
|
|
- qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
|
|
+ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
|
|
+ if (rval != QLA_SUCCESS)
|
|
+ ql_log(ql_log_warn, vha, 0x70de,
|
|
+ "Resetting ISP statistics failed: rval = %d\n",
|
|
+ rval);
|
|
|
|
dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
|
|
stats, stats_dma);
|
|
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
|
|
index 23b604832a54d..7fa085969a63a 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_bsg.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
|
|
@@ -24,10 +24,11 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
|
|
struct bsg_job *bsg_job = sp->u.bsg_job;
|
|
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
|
|
|
|
+ sp->free(sp);
|
|
+
|
|
bsg_reply->result = res;
|
|
bsg_job_done(bsg_job, bsg_reply->result,
|
|
bsg_reply->reply_payload_rcv_len);
|
|
- sp->free(sp);
|
|
}
|
|
|
|
void qla2x00_bsg_sp_free(srb_t *sp)
|
|
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
|
|
index f9142dbec112c..4a3809a8da4b0 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_isr.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_isr.c
|
|
@@ -3978,11 +3978,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
|
|
if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
|
|
/* user wants to control IRQ setting for target mode */
|
|
ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
|
|
- min((u16)ha->msix_count, (u16)num_online_cpus()),
|
|
+ min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
|
|
PCI_IRQ_MSIX);
|
|
} else
|
|
ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
|
|
- min((u16)ha->msix_count, (u16)num_online_cpus()),
|
|
+ min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
|
|
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
|
|
&desc);
|
|
|
|
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
|
|
index 0e0fe5b094966..b22114ef962a6 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_os.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_os.c
|
|
@@ -1008,8 +1008,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
|
|
if (rval != QLA_SUCCESS) {
|
|
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
|
|
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
|
|
- if (rval == QLA_INTERFACE_ERROR)
|
|
- goto qc24_free_sp_fail_command;
|
|
goto qc24_host_busy_free_sp;
|
|
}
|
|
|
|
@@ -1021,11 +1019,6 @@ qc24_host_busy_free_sp:
|
|
qc24_target_busy:
|
|
return SCSI_MLQUEUE_TARGET_BUSY;
|
|
|
|
-qc24_free_sp_fail_command:
|
|
- sp->free(sp);
|
|
- CMD_SP(cmd) = NULL;
|
|
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
|
|
-
|
|
qc24_fail_command:
|
|
cmd->scsi_done(cmd);
|
|
|
|
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
|
|
index c53f456fbd094..5ff14b409c23d 100644
|
|
--- a/drivers/scsi/smartpqi/smartpqi_init.c
|
|
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
|
|
@@ -5489,6 +5489,8 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
|
|
|
|
list_del(&io_request->request_list_entry);
|
|
set_host_byte(scmd, DID_RESET);
|
|
+ pqi_free_io_request(io_request);
|
|
+ scsi_dma_unmap(scmd);
|
|
pqi_scsi_done(scmd);
|
|
}
|
|
|
|
@@ -5525,6 +5527,8 @@ static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
|
|
|
|
list_del(&io_request->request_list_entry);
|
|
set_host_byte(scmd, DID_RESET);
|
|
+ pqi_free_io_request(io_request);
|
|
+ scsi_dma_unmap(scmd);
|
|
pqi_scsi_done(scmd);
|
|
}
|
|
|
|
@@ -6599,6 +6603,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
|
|
shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
|
|
shost->unique_id = shost->irq;
|
|
shost->nr_hw_queues = ctrl_info->num_queue_groups;
|
|
+ shost->host_tagset = 1;
|
|
shost->hostdata[0] = (unsigned long)ctrl_info;
|
|
|
|
rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
|
|
@@ -8217,6 +8222,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
|
|
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
0x152d, 0x8a37)
|
|
},
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ 0x193d, 0x8460)
|
|
+ },
|
|
{
|
|
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
0x193d, 0x1104)
|
|
@@ -8289,6 +8298,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
|
|
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
0x1bd4, 0x004f)
|
|
},
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ 0x1bd4, 0x0051)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ 0x1bd4, 0x0052)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ 0x1bd4, 0x0053)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ 0x1bd4, 0x0054)
|
|
+ },
|
|
{
|
|
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
0x19e5, 0xd227)
|
|
@@ -8449,6 +8474,122 @@ static const struct pci_device_id pqi_pci_id_table[] = {
|
|
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
PCI_VENDOR_ID_ADAPTEC2, 0x1380)
|
|
},
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1400)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1402)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1410)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1411)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1412)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1420)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1430)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1440)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1441)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1450)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1452)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1460)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1461)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1462)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1470)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1471)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1472)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1480)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1490)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x1491)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
|
|
+ },
|
|
{
|
|
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
PCI_VENDOR_ID_ADVANTECH, 0x8312)
|
|
@@ -8513,6 +8654,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
|
|
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
PCI_VENDOR_ID_HP, 0x1001)
|
|
},
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ PCI_VENDOR_ID_HP, 0x1002)
|
|
+ },
|
|
{
|
|
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
PCI_VENDOR_ID_HP, 0x1100)
|
|
@@ -8521,6 +8666,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
|
|
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
PCI_VENDOR_ID_HP, 0x1101)
|
|
},
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ 0x1590, 0x0294)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ 0x1590, 0x02db)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ 0x1590, 0x02dc)
|
|
+ },
|
|
+ {
|
|
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
+ 0x1590, 0x032e)
|
|
+ },
|
|
{
|
|
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
|
|
0x1d8d, 0x0800)
|
|
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
|
|
index df9a5ca8c99c4..0118bd986f902 100644
|
|
--- a/drivers/soc/tegra/pmc.c
|
|
+++ b/drivers/soc/tegra/pmc.c
|
|
@@ -317,6 +317,8 @@ struct tegra_pmc_soc {
|
|
bool invert);
|
|
int (*irq_set_wake)(struct irq_data *data, unsigned int on);
|
|
int (*irq_set_type)(struct irq_data *data, unsigned int type);
|
|
+ int (*powergate_set)(struct tegra_pmc *pmc, unsigned int id,
|
|
+ bool new_state);
|
|
|
|
const char * const *reset_sources;
|
|
unsigned int num_reset_sources;
|
|
@@ -517,6 +519,63 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
|
|
return -ENODEV;
|
|
}
|
|
|
|
+static int tegra20_powergate_set(struct tegra_pmc *pmc, unsigned int id,
|
|
+ bool new_state)
|
|
+{
|
|
+ unsigned int retries = 100;
|
|
+ bool status;
|
|
+ int ret;
|
|
+
|
|
+ /*
|
|
+ * As per TRM documentation, the toggle command will be dropped by PMC
|
|
+ * if there is contention with a HW-initiated toggling (i.e. CPU core
|
|
+ * power-gated), the command should be retried in that case.
|
|
+ */
|
|
+ do {
|
|
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
|
|
+
|
|
+ /* wait for PMC to execute the command */
|
|
+ ret = readx_poll_timeout(tegra_powergate_state, id, status,
|
|
+ status == new_state, 1, 10);
|
|
+ } while (ret == -ETIMEDOUT && retries--);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static inline bool tegra_powergate_toggle_ready(struct tegra_pmc *pmc)
|
|
+{
|
|
+ return !(tegra_pmc_readl(pmc, PWRGATE_TOGGLE) & PWRGATE_TOGGLE_START);
|
|
+}
|
|
+
|
|
+static int tegra114_powergate_set(struct tegra_pmc *pmc, unsigned int id,
|
|
+ bool new_state)
|
|
+{
|
|
+ bool status;
|
|
+ int err;
|
|
+
|
|
+ /* wait while PMC power gating is contended */
|
|
+ err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
|
|
+ status == true, 1, 100);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
|
|
+
|
|
+ /* wait for PMC to accept the command */
|
|
+ err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
|
|
+ status == true, 1, 100);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ /* wait for PMC to execute the command */
|
|
+ err = readx_poll_timeout(tegra_powergate_state, id, status,
|
|
+ status == new_state, 10, 100000);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/**
|
|
* tegra_powergate_set() - set the state of a partition
|
|
* @pmc: power management controller
|
|
@@ -526,7 +585,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
|
|
static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
|
|
bool new_state)
|
|
{
|
|
- bool status;
|
|
int err;
|
|
|
|
if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
|
|
@@ -539,10 +597,7 @@ static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
|
|
return 0;
|
|
}
|
|
|
|
- tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
|
|
-
|
|
- err = readx_poll_timeout(tegra_powergate_state, id, status,
|
|
- status == new_state, 10, 100000);
|
|
+ err = pmc->soc->powergate_set(pmc, id, new_state);
|
|
|
|
mutex_unlock(&pmc->powergates_lock);
|
|
|
|
@@ -2699,6 +2754,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
|
|
.regs = &tegra20_pmc_regs,
|
|
.init = tegra20_pmc_init,
|
|
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
|
|
+ .powergate_set = tegra20_powergate_set,
|
|
.reset_sources = NULL,
|
|
.num_reset_sources = 0,
|
|
.reset_levels = NULL,
|
|
@@ -2757,6 +2813,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
|
|
.regs = &tegra20_pmc_regs,
|
|
.init = tegra20_pmc_init,
|
|
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
|
|
+ .powergate_set = tegra20_powergate_set,
|
|
.reset_sources = tegra30_reset_sources,
|
|
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
|
|
.reset_levels = NULL,
|
|
@@ -2811,6 +2868,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
|
|
.regs = &tegra20_pmc_regs,
|
|
.init = tegra20_pmc_init,
|
|
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
|
|
+ .powergate_set = tegra114_powergate_set,
|
|
.reset_sources = tegra30_reset_sources,
|
|
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
|
|
.reset_levels = NULL,
|
|
@@ -2925,6 +2983,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
|
|
.regs = &tegra20_pmc_regs,
|
|
.init = tegra20_pmc_init,
|
|
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
|
|
+ .powergate_set = tegra114_powergate_set,
|
|
.reset_sources = tegra30_reset_sources,
|
|
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
|
|
.reset_levels = NULL,
|
|
@@ -3048,6 +3107,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
|
|
.regs = &tegra20_pmc_regs,
|
|
.init = tegra20_pmc_init,
|
|
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
|
|
+ .powergate_set = tegra114_powergate_set,
|
|
.irq_set_wake = tegra210_pmc_irq_set_wake,
|
|
.irq_set_type = tegra210_pmc_irq_set_type,
|
|
.reset_sources = tegra210_reset_sources,
|
|
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
|
|
index 580660599f461..c6d421a4b91b6 100644
|
|
--- a/drivers/soundwire/cadence_master.c
|
|
+++ b/drivers/soundwire/cadence_master.c
|
|
@@ -1449,10 +1449,12 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
|
|
}
|
|
|
|
/* Prepare slaves for clock stop */
|
|
- ret = sdw_bus_prep_clk_stop(&cdns->bus);
|
|
- if (ret < 0) {
|
|
- dev_err(cdns->dev, "prepare clock stop failed %d", ret);
|
|
- return ret;
|
|
+ if (slave_present) {
|
|
+ ret = sdw_bus_prep_clk_stop(&cdns->bus);
|
|
+ if (ret < 0 && ret != -ENODATA) {
|
|
+ dev_err(cdns->dev, "prepare clock stop failed %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
|
|
index eb9a243e95265..98ace748cd986 100644
|
|
--- a/drivers/spi/spi-ath79.c
|
|
+++ b/drivers/spi/spi-ath79.c
|
|
@@ -156,8 +156,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
|
|
|
|
master->use_gpio_descriptors = true;
|
|
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
|
|
- master->setup = spi_bitbang_setup;
|
|
- master->cleanup = spi_bitbang_cleanup;
|
|
+ master->flags = SPI_MASTER_GPIO_SS;
|
|
if (pdata) {
|
|
master->bus_num = pdata->bus_num;
|
|
master->num_chipselect = pdata->num_chipselect;
|
|
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
|
|
index 75b33d7d14b04..9a4d942fafcf5 100644
|
|
--- a/drivers/spi/spi-dln2.c
|
|
+++ b/drivers/spi/spi-dln2.c
|
|
@@ -780,7 +780,7 @@ exit_free_master:
|
|
|
|
static int dln2_spi_remove(struct platform_device *pdev)
|
|
{
|
|
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
|
|
+ struct spi_master *master = platform_get_drvdata(pdev);
|
|
struct dln2_spi *dln2 = spi_master_get_devdata(master);
|
|
|
|
pm_runtime_disable(&pdev->dev);
|
|
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
|
|
index 36a4922a134a1..ccd817ee4917b 100644
|
|
--- a/drivers/spi/spi-omap-100k.c
|
|
+++ b/drivers/spi/spi-omap-100k.c
|
|
@@ -424,7 +424,7 @@ err:
|
|
|
|
static int omap1_spi100k_remove(struct platform_device *pdev)
|
|
{
|
|
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
|
|
+ struct spi_master *master = platform_get_drvdata(pdev);
|
|
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
|
|
|
|
pm_runtime_disable(&pdev->dev);
|
|
@@ -438,7 +438,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
|
|
#ifdef CONFIG_PM
|
|
static int omap1_spi100k_runtime_suspend(struct device *dev)
|
|
{
|
|
- struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
|
|
+ struct spi_master *master = dev_get_drvdata(dev);
|
|
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
|
|
|
|
clk_disable_unprepare(spi100k->ick);
|
|
@@ -449,7 +449,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev)
|
|
|
|
static int omap1_spi100k_runtime_resume(struct device *dev)
|
|
{
|
|
- struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
|
|
+ struct spi_master *master = dev_get_drvdata(dev);
|
|
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
|
|
int ret;
|
|
|
|
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
|
|
index 8dcb2e70735c9..d39dec6d1c91e 100644
|
|
--- a/drivers/spi/spi-qup.c
|
|
+++ b/drivers/spi/spi-qup.c
|
|
@@ -1263,7 +1263,7 @@ static int spi_qup_remove(struct platform_device *pdev)
|
|
struct spi_qup *controller = spi_master_get_devdata(master);
|
|
int ret;
|
|
|
|
- ret = pm_runtime_get_sync(&pdev->dev);
|
|
+ ret = pm_runtime_resume_and_get(&pdev->dev);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
|
|
index 947e6b9dc9f4d..2786470a52011 100644
|
|
--- a/drivers/spi/spi-stm32-qspi.c
|
|
+++ b/drivers/spi/spi-stm32-qspi.c
|
|
@@ -727,21 +727,31 @@ static int __maybe_unused stm32_qspi_suspend(struct device *dev)
|
|
{
|
|
pinctrl_pm_select_sleep_state(dev);
|
|
|
|
- return 0;
|
|
+ return pm_runtime_force_suspend(dev);
|
|
}
|
|
|
|
static int __maybe_unused stm32_qspi_resume(struct device *dev)
|
|
{
|
|
struct stm32_qspi *qspi = dev_get_drvdata(dev);
|
|
+ int ret;
|
|
+
|
|
+ ret = pm_runtime_force_resume(dev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
|
|
pinctrl_pm_select_default_state(dev);
|
|
- clk_prepare_enable(qspi->clk);
|
|
+
|
|
+ ret = pm_runtime_get_sync(dev);
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put_noidle(dev);
|
|
+ return ret;
|
|
+ }
|
|
|
|
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
|
|
writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
|
|
|
|
- pm_runtime_mark_last_busy(qspi->dev);
|
|
- pm_runtime_put_autosuspend(qspi->dev);
|
|
+ pm_runtime_mark_last_busy(dev);
|
|
+ pm_runtime_put_autosuspend(dev);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
|
|
index 9417385c09217..e06aafe169e0c 100644
|
|
--- a/drivers/spi/spi-ti-qspi.c
|
|
+++ b/drivers/spi/spi-ti-qspi.c
|
|
@@ -733,6 +733,17 @@ static int ti_qspi_runtime_resume(struct device *dev)
|
|
return 0;
|
|
}
|
|
|
|
+static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
|
|
+{
|
|
+ if (qspi->rx_bb_addr)
|
|
+ dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
|
|
+ qspi->rx_bb_addr,
|
|
+ qspi->rx_bb_dma_addr);
|
|
+
|
|
+ if (qspi->rx_chan)
|
|
+ dma_release_channel(qspi->rx_chan);
|
|
+}
|
|
+
|
|
static const struct of_device_id ti_qspi_match[] = {
|
|
{.compatible = "ti,dra7xxx-qspi" },
|
|
{.compatible = "ti,am4372-qspi" },
|
|
@@ -886,6 +897,8 @@ no_dma:
|
|
if (!ret)
|
|
return 0;
|
|
|
|
+ ti_qspi_dma_cleanup(qspi);
|
|
+
|
|
pm_runtime_disable(&pdev->dev);
|
|
free_master:
|
|
spi_master_put(master);
|
|
@@ -904,12 +917,7 @@ static int ti_qspi_remove(struct platform_device *pdev)
|
|
pm_runtime_put_sync(&pdev->dev);
|
|
pm_runtime_disable(&pdev->dev);
|
|
|
|
- if (qspi->rx_bb_addr)
|
|
- dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
|
|
- qspi->rx_bb_addr,
|
|
- qspi->rx_bb_dma_addr);
|
|
- if (qspi->rx_chan)
|
|
- dma_release_channel(qspi->rx_chan);
|
|
+ ti_qspi_dma_cleanup(qspi);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
|
|
index ccca3a7409fac..6f81a3c4c7e04 100644
|
|
--- a/drivers/spi/spi.c
|
|
+++ b/drivers/spi/spi.c
|
|
@@ -795,7 +795,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
-static void spi_set_cs(struct spi_device *spi, bool enable)
|
|
+static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
|
|
{
|
|
bool enable1 = enable;
|
|
|
|
@@ -803,7 +803,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
|
|
* Avoid calling into the driver (or doing delays) if the chip select
|
|
* isn't actually changing from the last time this was called.
|
|
*/
|
|
- if ((spi->controller->last_cs_enable == enable) &&
|
|
+ if (!force && (spi->controller->last_cs_enable == enable) &&
|
|
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
|
|
return;
|
|
|
|
@@ -1251,7 +1251,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
|
|
struct spi_statistics *statm = &ctlr->statistics;
|
|
struct spi_statistics *stats = &msg->spi->statistics;
|
|
|
|
- spi_set_cs(msg->spi, true);
|
|
+ spi_set_cs(msg->spi, true, false);
|
|
|
|
SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
|
|
SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
|
|
@@ -1319,9 +1319,9 @@ fallback_pio:
|
|
&msg->transfers)) {
|
|
keep_cs = true;
|
|
} else {
|
|
- spi_set_cs(msg->spi, false);
|
|
+ spi_set_cs(msg->spi, false, false);
|
|
_spi_transfer_cs_change_delay(msg, xfer);
|
|
- spi_set_cs(msg->spi, true);
|
|
+ spi_set_cs(msg->spi, true, false);
|
|
}
|
|
}
|
|
|
|
@@ -1330,7 +1330,7 @@ fallback_pio:
|
|
|
|
out:
|
|
if (ret != 0 || !keep_cs)
|
|
- spi_set_cs(msg->spi, false);
|
|
+ spi_set_cs(msg->spi, false, false);
|
|
|
|
if (msg->status == -EINPROGRESS)
|
|
msg->status = ret;
|
|
@@ -3410,11 +3410,11 @@ int spi_setup(struct spi_device *spi)
|
|
*/
|
|
status = 0;
|
|
|
|
- spi_set_cs(spi, false);
|
|
+ spi_set_cs(spi, false, true);
|
|
pm_runtime_mark_last_busy(spi->controller->dev.parent);
|
|
pm_runtime_put_autosuspend(spi->controller->dev.parent);
|
|
} else {
|
|
- spi_set_cs(spi, false);
|
|
+ spi_set_cs(spi, false, true);
|
|
}
|
|
|
|
mutex_unlock(&spi->controller->io_mutex);
|
|
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
|
|
index 453bb69135505..f1e6b25978534 100644
|
|
--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
|
|
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
|
|
@@ -221,6 +221,9 @@ int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
|
|
unsigned long irqflags;
|
|
int err = 0;
|
|
|
|
+ if (WARN_ON(css_pipe_id >= IA_CSS_PIPE_ID_NUM))
|
|
+ return -EINVAL;
|
|
+
|
|
while (pipe->buffers_in_css < ATOMISP_CSS_Q_DEPTH) {
|
|
struct videobuf_buffer *vb;
|
|
|
|
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
|
|
index c1931eb2540e3..b2f2cb3d6a609 100644
|
|
--- a/drivers/staging/media/imx/imx-media-capture.c
|
|
+++ b/drivers/staging/media/imx/imx-media-capture.c
|
|
@@ -557,7 +557,7 @@ static int capture_validate_fmt(struct capture_priv *priv)
|
|
priv->vdev.fmt.fmt.pix.height != f.fmt.pix.height ||
|
|
priv->vdev.cc->cs != cc->cs ||
|
|
priv->vdev.compose.width != compose.width ||
|
|
- priv->vdev.compose.height != compose.height) ? -EINVAL : 0;
|
|
+ priv->vdev.compose.height != compose.height) ? -EPIPE : 0;
|
|
}
|
|
|
|
static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
|
|
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
|
|
index 4dc8d9165f634..e0179616a29cf 100644
|
|
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
|
|
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
|
|
@@ -686,6 +686,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
|
|
|
|
dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
|
|
|
|
+ css_q = imgu_node_to_queue(node);
|
|
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
|
|
unsigned int inode = imgu_map_node(imgu, i);
|
|
|
|
@@ -693,6 +694,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
|
|
if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
|
|
continue;
|
|
|
|
+ /* CSS expects some format on OUT queue */
|
|
+ if (i != IPU3_CSS_QUEUE_OUT &&
|
|
+ !imgu_pipe->nodes[inode].enabled) {
|
|
+ fmts[i] = NULL;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (i == css_q) {
|
|
+ fmts[i] = &f->fmt.pix_mp;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
if (try) {
|
|
fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
|
|
sizeof(struct v4l2_pix_format_mplane),
|
|
@@ -705,10 +718,6 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
|
|
fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
|
|
}
|
|
|
|
- /* CSS expects some format on OUT queue */
|
|
- if (i != IPU3_CSS_QUEUE_OUT &&
|
|
- !imgu_pipe->nodes[inode].enabled)
|
|
- fmts[i] = NULL;
|
|
}
|
|
|
|
if (!try) {
|
|
@@ -725,16 +734,10 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
|
|
rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
|
|
}
|
|
|
|
- /*
|
|
- * imgu doesn't set the node to the value given by user
|
|
- * before we return success from this function, so set it here.
|
|
- */
|
|
- css_q = imgu_node_to_queue(node);
|
|
if (!fmts[css_q]) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
- *fmts[css_q] = f->fmt.pix_mp;
|
|
|
|
if (try)
|
|
ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
|
|
@@ -745,15 +748,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
|
|
if (ret < 0)
|
|
goto out;
|
|
|
|
- if (try)
|
|
- f->fmt.pix_mp = *fmts[css_q];
|
|
- else
|
|
- f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt;
|
|
+ /*
|
|
+ * imgu doesn't set the node to the value given by user
|
|
+ * before we return success from this function, so set it here.
|
|
+ */
|
|
+ if (!try)
|
|
+ imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
|
|
|
|
out:
|
|
if (try) {
|
|
for (i = 0; i < IPU3_CSS_QUEUES; i++)
|
|
- kfree(fmts[i]);
|
|
+ if (i != css_q)
|
|
+ kfree(fmts[i]);
|
|
}
|
|
|
|
return ret;
|
|
diff --git a/drivers/staging/wimax/i2400m/op-rfkill.c b/drivers/staging/wimax/i2400m/op-rfkill.c
|
|
index fbddf2e18c142..44698a1aae87a 100644
|
|
--- a/drivers/staging/wimax/i2400m/op-rfkill.c
|
|
+++ b/drivers/staging/wimax/i2400m/op-rfkill.c
|
|
@@ -86,7 +86,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
|
|
if (cmd == NULL)
|
|
goto error_alloc;
|
|
cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
|
|
- cmd->hdr.length = sizeof(cmd->sw_rf);
|
|
+ cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf));
|
|
cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
|
|
cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
|
|
cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
|
|
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
|
|
index 0689d550c37ab..328ed12e2d59c 100644
|
|
--- a/drivers/target/target_core_pscsi.c
|
|
+++ b/drivers/target/target_core_pscsi.c
|
|
@@ -620,8 +620,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
|
|
unsigned char *buf;
|
|
|
|
buf = transport_kmap_data_sg(cmd);
|
|
- if (!buf)
|
|
+ if (!buf) {
|
|
; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
|
|
+ }
|
|
|
|
if (cdb[0] == MODE_SENSE_10) {
|
|
if (!(buf[3] & 0x80))
|
|
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
|
|
index cf4718c6d35da..63542c1cc2914 100644
|
|
--- a/drivers/tee/optee/core.c
|
|
+++ b/drivers/tee/optee/core.c
|
|
@@ -79,16 +79,6 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
|
|
return rc;
|
|
p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
|
|
p->u.memref.shm = shm;
|
|
-
|
|
- /* Check that the memref is covered by the shm object */
|
|
- if (p->u.memref.size) {
|
|
- size_t o = p->u.memref.shm_offs +
|
|
- p->u.memref.size - 1;
|
|
-
|
|
- rc = tee_shm_get_pa(shm, o, NULL);
|
|
- if (rc)
|
|
- return rc;
|
|
- }
|
|
break;
|
|
case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
|
|
case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
|
|
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
|
|
index ddc166e3a93eb..3f6a69ccc1737 100644
|
|
--- a/drivers/thermal/cpufreq_cooling.c
|
|
+++ b/drivers/thermal/cpufreq_cooling.c
|
|
@@ -123,7 +123,7 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
|
|
{
|
|
int i;
|
|
|
|
- for (i = cpufreq_cdev->max_level; i >= 0; i--) {
|
|
+ for (i = cpufreq_cdev->max_level; i > 0; i--) {
|
|
if (power >= cpufreq_cdev->em->table[i].power)
|
|
break;
|
|
}
|
|
diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
|
|
index aaa07180ab482..645432ce63659 100644
|
|
--- a/drivers/thermal/gov_fair_share.c
|
|
+++ b/drivers/thermal/gov_fair_share.c
|
|
@@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
|
|
int total_instance = 0;
|
|
int cur_trip_level = get_trip_level(tz);
|
|
|
|
+ mutex_lock(&tz->lock);
|
|
+
|
|
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
|
|
if (instance->trip != trip)
|
|
continue;
|
|
@@ -110,6 +112,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
|
|
mutex_unlock(&instance->cdev->lock);
|
|
thermal_cdev_update(cdev);
|
|
}
|
|
+
|
|
+ mutex_unlock(&tz->lock);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
|
|
index 51dafc06f5414..2406653d38b78 100644
|
|
--- a/drivers/tty/n_gsm.c
|
|
+++ b/drivers/tty/n_gsm.c
|
|
@@ -2384,8 +2384,18 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
|
|
/* Don't register device 0 - this is the control channel and not
|
|
a usable tty interface */
|
|
base = mux_num_to_base(gsm); /* Base for this MUX */
|
|
- for (i = 1; i < NUM_DLCI; i++)
|
|
- tty_register_device(gsm_tty_driver, base + i, NULL);
|
|
+ for (i = 1; i < NUM_DLCI; i++) {
|
|
+ struct device *dev;
|
|
+
|
|
+ dev = tty_register_device(gsm_tty_driver,
|
|
+ base + i, NULL);
|
|
+ if (IS_ERR(dev)) {
|
|
+ for (i--; i >= 1; i--)
|
|
+ tty_unregister_device(gsm_tty_driver,
|
|
+ base + i);
|
|
+ return PTR_ERR(dev);
|
|
+ }
|
|
+ }
|
|
}
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
|
|
index d04a162939a4d..8f88ee2a2c8d0 100644
|
|
--- a/drivers/tty/vt/vt.c
|
|
+++ b/drivers/tty/vt/vt.c
|
|
@@ -1382,6 +1382,7 @@ struct vc_data *vc_deallocate(unsigned int currcons)
|
|
atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m);
|
|
vcs_remove_sysfs(currcons);
|
|
visual_deinit(vc);
|
|
+ con_free_unimap(vc);
|
|
put_pid(vc->vt_pid);
|
|
vc_uniscr_set(vc, NULL);
|
|
kfree(vc->vc_screenbuf);
|
|
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
|
|
index 7f71218cc1e54..404507d1b76f1 100644
|
|
--- a/drivers/usb/core/hub.c
|
|
+++ b/drivers/usb/core/hub.c
|
|
@@ -3556,7 +3556,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
|
|
u16 portchange, portstatus;
|
|
|
|
if (!test_and_set_bit(port1, hub->child_usage_bits)) {
|
|
- status = pm_runtime_get_sync(&port_dev->dev);
|
|
+ status = pm_runtime_resume_and_get(&port_dev->dev);
|
|
if (status < 0) {
|
|
dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
|
|
status);
|
|
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
|
|
index 55f1d14fc4148..800c8b6c55ff1 100644
|
|
--- a/drivers/usb/dwc2/core_intr.c
|
|
+++ b/drivers/usb/dwc2/core_intr.c
|
|
@@ -307,6 +307,7 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
|
|
static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
|
|
{
|
|
int ret;
|
|
+ u32 hprt0;
|
|
|
|
/* Clear interrupt */
|
|
dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
|
|
@@ -327,6 +328,13 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
|
|
* established
|
|
*/
|
|
dwc2_hsotg_disconnect(hsotg);
|
|
+ } else {
|
|
+ /* Turn on the port power bit. */
|
|
+ hprt0 = dwc2_read_hprt0(hsotg);
|
|
+ hprt0 |= HPRT0_PWR;
|
|
+ dwc2_writel(hsotg, hprt0, HPRT0);
|
|
+ /* Connect hcd after port power is set. */
|
|
+ dwc2_hcd_connect(hsotg);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
|
|
index 3101f0dcf6ae8..e07fd5ee8ed95 100644
|
|
--- a/drivers/usb/dwc3/core.c
|
|
+++ b/drivers/usb/dwc3/core.c
|
|
@@ -114,6 +114,8 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
|
|
dwc->current_dr_role = mode;
|
|
}
|
|
|
|
+static int dwc3_core_soft_reset(struct dwc3 *dwc);
|
|
+
|
|
static void __dwc3_set_mode(struct work_struct *work)
|
|
{
|
|
struct dwc3 *dwc = work_to_dwc(work);
|
|
@@ -121,6 +123,8 @@ static void __dwc3_set_mode(struct work_struct *work)
|
|
int ret;
|
|
u32 reg;
|
|
|
|
+ mutex_lock(&dwc->mutex);
|
|
+
|
|
pm_runtime_get_sync(dwc->dev);
|
|
|
|
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
|
|
@@ -154,6 +158,25 @@ static void __dwc3_set_mode(struct work_struct *work)
|
|
break;
|
|
}
|
|
|
|
+ /* For DRD host or device mode only */
|
|
+ if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) {
|
|
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
|
|
+ reg |= DWC3_GCTL_CORESOFTRESET;
|
|
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
|
|
+
|
|
+ /*
|
|
+ * Wait for internal clocks to synchronized. DWC_usb31 and
|
|
+ * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
|
|
+ * keep it consistent across different IPs, let's wait up to
|
|
+ * 100ms before clearing GCTL.CORESOFTRESET.
|
|
+ */
|
|
+ msleep(100);
|
|
+
|
|
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
|
|
+ reg &= ~DWC3_GCTL_CORESOFTRESET;
|
|
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
|
|
+ }
|
|
+
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
|
|
|
dwc3_set_prtcap(dwc, dwc->desired_dr_role);
|
|
@@ -178,6 +201,8 @@ static void __dwc3_set_mode(struct work_struct *work)
|
|
}
|
|
break;
|
|
case DWC3_GCTL_PRTCAP_DEVICE:
|
|
+ dwc3_core_soft_reset(dwc);
|
|
+
|
|
dwc3_event_buffers_setup(dwc);
|
|
|
|
if (dwc->usb2_phy)
|
|
@@ -200,6 +225,7 @@ static void __dwc3_set_mode(struct work_struct *work)
|
|
out:
|
|
pm_runtime_mark_last_busy(dwc->dev);
|
|
pm_runtime_put_autosuspend(dwc->dev);
|
|
+ mutex_unlock(&dwc->mutex);
|
|
}
|
|
|
|
void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
|
|
@@ -1297,6 +1323,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
|
|
"snps,usb3_lpm_capable");
|
|
dwc->usb2_lpm_disable = device_property_read_bool(dev,
|
|
"snps,usb2-lpm-disable");
|
|
+ dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
|
|
+ "snps,usb2-gadget-lpm-disable");
|
|
device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
|
|
&rx_thr_num_pkt_prd);
|
|
device_property_read_u8(dev, "snps,rx-max-burst-prd",
|
|
@@ -1527,6 +1555,7 @@ static int dwc3_probe(struct platform_device *pdev)
|
|
dwc3_cache_hwparams(dwc);
|
|
|
|
spin_lock_init(&dwc->lock);
|
|
+ mutex_init(&dwc->mutex);
|
|
|
|
pm_runtime_set_active(dev);
|
|
pm_runtime_use_autosuspend(dev);
|
|
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
|
|
index 1b241f937d8f4..79e1b82e5e057 100644
|
|
--- a/drivers/usb/dwc3/core.h
|
|
+++ b/drivers/usb/dwc3/core.h
|
|
@@ -13,6 +13,7 @@
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/spinlock.h>
|
|
+#include <linux/mutex.h>
|
|
#include <linux/ioport.h>
|
|
#include <linux/list.h>
|
|
#include <linux/bitops.h>
|
|
@@ -942,6 +943,7 @@ struct dwc3_scratchpad_array {
|
|
* @scratch_addr: dma address of scratchbuf
|
|
* @ep0_in_setup: one control transfer is completed and enter setup phase
|
|
* @lock: for synchronizing
|
|
+ * @mutex: for mode switching
|
|
* @dev: pointer to our struct device
|
|
* @sysdev: pointer to the DMA-capable device
|
|
* @xhci: pointer to our xHCI child
|
|
@@ -1026,7 +1028,8 @@ struct dwc3_scratchpad_array {
|
|
* @dis_start_transfer_quirk: set if start_transfer failure SW workaround is
|
|
* not needed for DWC_usb31 version 1.70a-ea06 and below
|
|
* @usb3_lpm_capable: set if hadrware supports Link Power Management
|
|
- * @usb2_lpm_disable: set to disable usb2 lpm
|
|
+ * @usb2_lpm_disable: set to disable usb2 lpm for host
|
|
+ * @usb2_gadget_lpm_disable: set to disable usb2 lpm for gadget
|
|
* @disable_scramble_quirk: set if we enable the disable scramble quirk
|
|
* @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
|
|
* @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
|
|
@@ -1077,6 +1080,9 @@ struct dwc3 {
|
|
/* device lock */
|
|
spinlock_t lock;
|
|
|
|
+ /* mode switching lock */
|
|
+ struct mutex mutex;
|
|
+
|
|
struct device *dev;
|
|
struct device *sysdev;
|
|
|
|
@@ -1227,6 +1233,7 @@ struct dwc3 {
|
|
unsigned dis_start_transfer_quirk:1;
|
|
unsigned usb3_lpm_capable:1;
|
|
unsigned usb2_lpm_disable:1;
|
|
+ unsigned usb2_gadget_lpm_disable:1;
|
|
|
|
unsigned disable_scramble_quirk:1;
|
|
unsigned u2exit_lfps_quirk:1;
|
|
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
|
index 65ff41e3a18eb..84d1487e9f060 100644
|
|
--- a/drivers/usb/dwc3/gadget.c
|
|
+++ b/drivers/usb/dwc3/gadget.c
|
|
@@ -308,13 +308,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
|
|
}
|
|
|
|
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
|
|
- int needs_wakeup;
|
|
+ int link_state;
|
|
|
|
- needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
|
|
- dwc->link_state == DWC3_LINK_STATE_U2 ||
|
|
- dwc->link_state == DWC3_LINK_STATE_U3);
|
|
-
|
|
- if (unlikely(needs_wakeup)) {
|
|
+ link_state = dwc3_gadget_get_link_state(dwc);
|
|
+ if (link_state == DWC3_LINK_STATE_U1 ||
|
|
+ link_state == DWC3_LINK_STATE_U2 ||
|
|
+ link_state == DWC3_LINK_STATE_U3) {
|
|
ret = __dwc3_gadget_wakeup(dwc);
|
|
dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
|
|
ret);
|
|
@@ -608,12 +607,14 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
|
|
u8 bInterval_m1;
|
|
|
|
/*
|
|
- * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
|
|
- * must be set to 0 when the controller operates in full-speed.
|
|
+ * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
|
|
+ *
|
|
+ * NOTE: The programming guide incorrectly stated bInterval_m1
|
|
+ * must be set to 0 when operating in fullspeed. Internally the
|
|
+ * controller does not have this limitation. See DWC_usb3x
|
|
+ * programming guide section 3.2.2.1.
|
|
*/
|
|
bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
|
|
- if (dwc->gadget->speed == USB_SPEED_FULL)
|
|
- bInterval_m1 = 0;
|
|
|
|
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
|
|
dwc->gadget->speed == USB_SPEED_FULL)
|
|
@@ -1973,6 +1974,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
|
|
case DWC3_LINK_STATE_RESET:
|
|
case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
|
|
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
|
|
+ case DWC3_LINK_STATE_U2: /* in HS, means Sleep (L1) */
|
|
+ case DWC3_LINK_STATE_U1:
|
|
case DWC3_LINK_STATE_RESUME:
|
|
break;
|
|
default:
|
|
@@ -3267,6 +3270,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
|
|
{
|
|
u32 reg;
|
|
|
|
+ /*
|
|
+ * Ideally, dwc3_reset_gadget() would trigger the function
|
|
+ * drivers to stop any active transfers through ep disable.
|
|
+ * However, for functions which defer ep disable, such as mass
|
|
+ * storage, we will need to rely on the call to stop active
|
|
+ * transfers here, and avoid allowing of request queuing.
|
|
+ */
|
|
+ dwc->connected = false;
|
|
+
|
|
/*
|
|
* WORKAROUND: DWC3 revisions <1.88a have an issue which
|
|
* would cause a missing Disconnect Event if there's a
|
|
@@ -3389,6 +3401,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
|
|
/* Enable USB2 LPM Capability */
|
|
|
|
if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
|
|
+ !dwc->usb2_gadget_lpm_disable &&
|
|
(speed != DWC3_DSTS_SUPERSPEED) &&
|
|
(speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
|
|
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
|
|
@@ -3415,6 +3428,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
|
|
|
|
dwc3_gadget_dctl_write_safe(dwc, reg);
|
|
} else {
|
|
+ if (dwc->usb2_gadget_lpm_disable) {
|
|
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
|
|
+ reg &= ~DWC3_DCFG_LPM_CAP;
|
|
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
|
|
+ }
|
|
+
|
|
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
|
|
reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
|
|
dwc3_gadget_dctl_write_safe(dwc, reg);
|
|
@@ -3862,7 +3881,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
|
|
dwc->gadget->speed = USB_SPEED_UNKNOWN;
|
|
dwc->gadget->sg_supported = true;
|
|
dwc->gadget->name = "dwc3-gadget";
|
|
- dwc->gadget->lpm_capable = true;
|
|
+ dwc->gadget->lpm_capable = !dwc->usb2_gadget_lpm_disable;
|
|
|
|
/*
|
|
* FIXME We might be setting max_speed to <SUPER, however versions
|
|
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
|
|
index 2d115353424c2..8bb25773b61e9 100644
|
|
--- a/drivers/usb/gadget/config.c
|
|
+++ b/drivers/usb/gadget/config.c
|
|
@@ -194,9 +194,13 @@ EXPORT_SYMBOL_GPL(usb_assign_descriptors);
|
|
void usb_free_all_descriptors(struct usb_function *f)
|
|
{
|
|
usb_free_descriptors(f->fs_descriptors);
|
|
+ f->fs_descriptors = NULL;
|
|
usb_free_descriptors(f->hs_descriptors);
|
|
+ f->hs_descriptors = NULL;
|
|
usb_free_descriptors(f->ss_descriptors);
|
|
+ f->ss_descriptors = NULL;
|
|
usb_free_descriptors(f->ssp_descriptors);
|
|
+ f->ssp_descriptors = NULL;
|
|
}
|
|
EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
|
|
|
|
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
|
|
index 801a8b668a35a..10a5d9f0f2b90 100644
|
|
--- a/drivers/usb/gadget/function/f_fs.c
|
|
+++ b/drivers/usb/gadget/function/f_fs.c
|
|
@@ -2640,6 +2640,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
|
|
|
|
do { /* lang_count > 0 so we can use do-while */
|
|
unsigned needed = needed_count;
|
|
+ u32 str_per_lang = str_count;
|
|
|
|
if (len < 3)
|
|
goto error_free;
|
|
@@ -2675,7 +2676,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
|
|
|
|
data += length + 1;
|
|
len -= length + 1;
|
|
- } while (--str_count);
|
|
+ } while (--str_per_lang);
|
|
|
|
s->id = 0; /* terminator */
|
|
s->s = NULL;
|
|
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
|
|
index 560382e0a8f38..e65f474ad7b3b 100644
|
|
--- a/drivers/usb/gadget/function/f_uac1.c
|
|
+++ b/drivers/usb/gadget/function/f_uac1.c
|
|
@@ -19,6 +19,9 @@
|
|
#include "u_audio.h"
|
|
#include "u_uac1.h"
|
|
|
|
+/* UAC1 spec: 3.7.2.3 Audio Channel Cluster Format */
|
|
+#define UAC1_CHANNEL_MASK 0x0FFF
|
|
+
|
|
struct f_uac1 {
|
|
struct g_audio g_audio;
|
|
u8 ac_intf, as_in_intf, as_out_intf;
|
|
@@ -30,6 +33,11 @@ static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
|
|
return container_of(f, struct f_uac1, g_audio.func);
|
|
}
|
|
|
|
+static inline struct f_uac1_opts *g_audio_to_uac1_opts(struct g_audio *audio)
|
|
+{
|
|
+ return container_of(audio->func.fi, struct f_uac1_opts, func_inst);
|
|
+}
|
|
+
|
|
/*
|
|
* DESCRIPTORS ... most are static, but strings and full
|
|
* configuration descriptors are built on demand.
|
|
@@ -505,11 +513,42 @@ static void f_audio_disable(struct usb_function *f)
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
+static int f_audio_validate_opts(struct g_audio *audio, struct device *dev)
|
|
+{
|
|
+ struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
|
|
+
|
|
+ if (!opts->p_chmask && !opts->c_chmask) {
|
|
+ dev_err(dev, "Error: no playback and capture channels\n");
|
|
+ return -EINVAL;
|
|
+ } else if (opts->p_chmask & ~UAC1_CHANNEL_MASK) {
|
|
+ dev_err(dev, "Error: unsupported playback channels mask\n");
|
|
+ return -EINVAL;
|
|
+ } else if (opts->c_chmask & ~UAC1_CHANNEL_MASK) {
|
|
+ dev_err(dev, "Error: unsupported capture channels mask\n");
|
|
+ return -EINVAL;
|
|
+ } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
|
|
+ dev_err(dev, "Error: incorrect playback sample size\n");
|
|
+ return -EINVAL;
|
|
+ } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
|
|
+ dev_err(dev, "Error: incorrect capture sample size\n");
|
|
+ return -EINVAL;
|
|
+ } else if (!opts->p_srate) {
|
|
+ dev_err(dev, "Error: incorrect playback sampling rate\n");
|
|
+ return -EINVAL;
|
|
+ } else if (!opts->c_srate) {
|
|
+ dev_err(dev, "Error: incorrect capture sampling rate\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/* audio function driver setup/binding */
|
|
static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
|
|
{
|
|
struct usb_composite_dev *cdev = c->cdev;
|
|
struct usb_gadget *gadget = cdev->gadget;
|
|
+ struct device *dev = &gadget->dev;
|
|
struct f_uac1 *uac1 = func_to_uac1(f);
|
|
struct g_audio *audio = func_to_g_audio(f);
|
|
struct f_uac1_opts *audio_opts;
|
|
@@ -519,6 +558,10 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
|
|
int rate;
|
|
int status;
|
|
|
|
+ status = f_audio_validate_opts(audio, dev);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
|
|
|
|
us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
|
|
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
|
|
index 6f03e944e0e31..dd960cea642f3 100644
|
|
--- a/drivers/usb/gadget/function/f_uac2.c
|
|
+++ b/drivers/usb/gadget/function/f_uac2.c
|
|
@@ -14,6 +14,9 @@
|
|
#include "u_audio.h"
|
|
#include "u_uac2.h"
|
|
|
|
+/* UAC2 spec: 4.1 Audio Channel Cluster Descriptor */
|
|
+#define UAC2_CHANNEL_MASK 0x07FFFFFF
|
|
+
|
|
/*
|
|
* The driver implements a simple UAC_2 topology.
|
|
* USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture
|
|
@@ -604,6 +607,36 @@ static void setup_descriptor(struct f_uac2_opts *opts)
|
|
hs_audio_desc[i] = NULL;
|
|
}
|
|
|
|
+static int afunc_validate_opts(struct g_audio *agdev, struct device *dev)
|
|
+{
|
|
+ struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
|
|
+
|
|
+ if (!opts->p_chmask && !opts->c_chmask) {
|
|
+ dev_err(dev, "Error: no playback and capture channels\n");
|
|
+ return -EINVAL;
|
|
+ } else if (opts->p_chmask & ~UAC2_CHANNEL_MASK) {
|
|
+ dev_err(dev, "Error: unsupported playback channels mask\n");
|
|
+ return -EINVAL;
|
|
+ } else if (opts->c_chmask & ~UAC2_CHANNEL_MASK) {
|
|
+ dev_err(dev, "Error: unsupported capture channels mask\n");
|
|
+ return -EINVAL;
|
|
+ } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
|
|
+ dev_err(dev, "Error: incorrect playback sample size\n");
|
|
+ return -EINVAL;
|
|
+ } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
|
|
+ dev_err(dev, "Error: incorrect capture sample size\n");
|
|
+ return -EINVAL;
|
|
+ } else if (!opts->p_srate) {
|
|
+ dev_err(dev, "Error: incorrect playback sampling rate\n");
|
|
+ return -EINVAL;
|
|
+ } else if (!opts->c_srate) {
|
|
+ dev_err(dev, "Error: incorrect capture sampling rate\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int
|
|
afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
|
|
{
|
|
@@ -612,11 +645,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
|
|
struct usb_composite_dev *cdev = cfg->cdev;
|
|
struct usb_gadget *gadget = cdev->gadget;
|
|
struct device *dev = &gadget->dev;
|
|
- struct f_uac2_opts *uac2_opts;
|
|
+ struct f_uac2_opts *uac2_opts = g_audio_to_uac2_opts(agdev);
|
|
struct usb_string *us;
|
|
int ret;
|
|
|
|
- uac2_opts = container_of(fn->fi, struct f_uac2_opts, func_inst);
|
|
+ ret = afunc_validate_opts(agdev, dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn));
|
|
if (IS_ERR(us))
|
|
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
|
|
index 44b4352a26765..f48a00e497945 100644
|
|
--- a/drivers/usb/gadget/function/f_uvc.c
|
|
+++ b/drivers/usb/gadget/function/f_uvc.c
|
|
@@ -633,7 +633,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
|
|
|
|
uvc_hs_streaming_ep.wMaxPacketSize =
|
|
cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11));
|
|
- uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
|
|
+
|
|
+ /* A high-bandwidth endpoint must specify a bInterval value of 1 */
|
|
+ if (max_packet_mult > 1)
|
|
+ uvc_hs_streaming_ep.bInterval = 1;
|
|
+ else
|
|
+ uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
|
|
|
|
uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size);
|
|
uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
|
|
@@ -817,6 +822,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
|
|
pd->bmControls[0] = 1;
|
|
pd->bmControls[1] = 0;
|
|
pd->iProcessing = 0;
|
|
+ pd->bmVideoStandards = 0;
|
|
|
|
od = &opts->uvc_output_terminal;
|
|
od->bLength = UVC_DT_OUTPUT_TERMINAL_SIZE;
|
|
diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
|
|
index a9f8eb8e1c767..2c9eab2b863d2 100644
|
|
--- a/drivers/usb/gadget/legacy/webcam.c
|
|
+++ b/drivers/usb/gadget/legacy/webcam.c
|
|
@@ -125,6 +125,7 @@ static const struct uvc_processing_unit_descriptor uvc_processing = {
|
|
.bmControls[0] = 1,
|
|
.bmControls[1] = 0,
|
|
.iProcessing = 0,
|
|
+ .bmVideoStandards = 0,
|
|
};
|
|
|
|
static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
|
|
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
|
|
index 57067763b1005..5f474ffe2be1e 100644
|
|
--- a/drivers/usb/gadget/udc/dummy_hcd.c
|
|
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
|
|
@@ -903,6 +903,21 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
|
|
spin_lock_irqsave(&dum->lock, flags);
|
|
dum->pullup = (value != 0);
|
|
set_link_state(dum_hcd);
|
|
+ if (value == 0) {
|
|
+ /*
|
|
+ * Emulate synchronize_irq(): wait for callbacks to finish.
|
|
+ * This seems to be the best place to emulate the call to
|
|
+ * synchronize_irq() that's in usb_gadget_remove_driver().
|
|
+ * Doing it in dummy_udc_stop() would be too late since it
|
|
+ * is called after the unbind callback and unbind shouldn't
|
|
+ * be invoked until all the other callbacks are finished.
|
|
+ */
|
|
+ while (dum->callback_usage > 0) {
|
|
+ spin_unlock_irqrestore(&dum->lock, flags);
|
|
+ usleep_range(1000, 2000);
|
|
+ spin_lock_irqsave(&dum->lock, flags);
|
|
+ }
|
|
+ }
|
|
spin_unlock_irqrestore(&dum->lock, flags);
|
|
|
|
usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
|
|
@@ -1004,14 +1019,6 @@ static int dummy_udc_stop(struct usb_gadget *g)
|
|
spin_lock_irq(&dum->lock);
|
|
dum->ints_enabled = 0;
|
|
stop_activity(dum);
|
|
-
|
|
- /* emulate synchronize_irq(): wait for callbacks to finish */
|
|
- while (dum->callback_usage > 0) {
|
|
- spin_unlock_irq(&dum->lock);
|
|
- usleep_range(1000, 2000);
|
|
- spin_lock_irq(&dum->lock);
|
|
- }
|
|
-
|
|
dum->driver = NULL;
|
|
spin_unlock_irq(&dum->lock);
|
|
|
|
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
|
|
index 580bef8eb4cbc..2319c9737c2bd 100644
|
|
--- a/drivers/usb/gadget/udc/tegra-xudc.c
|
|
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
|
|
@@ -3883,7 +3883,7 @@ static int tegra_xudc_remove(struct platform_device *pdev)
|
|
|
|
pm_runtime_get_sync(xudc->dev);
|
|
|
|
- cancel_delayed_work(&xudc->plc_reset_work);
|
|
+ cancel_delayed_work_sync(&xudc->plc_reset_work);
|
|
cancel_work_sync(&xudc->usb_role_sw_work);
|
|
|
|
usb_del_gadget_udc(&xudc->gadget);
|
|
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
|
|
index 3589b49b6c8b4..1df123db5ef8a 100644
|
|
--- a/drivers/usb/host/xhci-mem.c
|
|
+++ b/drivers/usb/host/xhci-mem.c
|
|
@@ -2142,6 +2142,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
|
|
|
|
if (major_revision == 0x03) {
|
|
rhub = &xhci->usb3_rhub;
|
|
+ /*
|
|
+ * Some hosts incorrectly use sub-minor version for minor
|
|
+ * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
|
|
+ * for bcdUSB 0x310). Since there is no USB release with sub
|
|
+ * minor version 0x301 to 0x309, we can assume that they are
|
|
+ * incorrect and fix it here.
|
|
+ */
|
|
+ if (minor_revision > 0x00 && minor_revision < 0x10)
|
|
+ minor_revision <<= 4;
|
|
} else if (major_revision <= 0x02) {
|
|
rhub = &xhci->usb2_rhub;
|
|
} else {
|
|
@@ -2253,6 +2262,9 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
|
|
return;
|
|
rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
|
|
flags, dev_to_node(dev));
|
|
+ if (!rhub->ports)
|
|
+ return;
|
|
+
|
|
for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
|
|
if (xhci->hw_ports[i].rhub != rhub ||
|
|
xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
|
|
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
|
|
index 2f27dc0d9c6bd..1c331577fca92 100644
|
|
--- a/drivers/usb/host/xhci-mtk.c
|
|
+++ b/drivers/usb/host/xhci-mtk.c
|
|
@@ -397,6 +397,8 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
|
|
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
|
|
if (mtk->lpm_support)
|
|
xhci->quirks |= XHCI_LPM_SUPPORT;
|
|
+ if (mtk->u2_lpm_disable)
|
|
+ xhci->quirks |= XHCI_HW_LPM_DISABLE;
|
|
|
|
/*
|
|
* MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
|
|
@@ -469,6 +471,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
|
|
return ret;
|
|
|
|
mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
|
|
+ mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable");
|
|
/* optional property, ignore the error if it does not exist */
|
|
of_property_read_u32(node, "mediatek,u3p-dis-msk",
|
|
&mtk->u3p_dis_msk);
|
|
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
|
|
index cbb09dfea62e0..080109012b9ac 100644
|
|
--- a/drivers/usb/host/xhci-mtk.h
|
|
+++ b/drivers/usb/host/xhci-mtk.h
|
|
@@ -150,6 +150,7 @@ struct xhci_hcd_mtk {
|
|
struct phy **phys;
|
|
int num_phys;
|
|
bool lpm_support;
|
|
+ bool u2_lpm_disable;
|
|
/* usb remote wakeup */
|
|
bool uwk_en;
|
|
struct regmap *uwk;
|
|
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
|
|
index fd84ca7534e0d..66147f9179e59 100644
|
|
--- a/drivers/usb/host/xhci.c
|
|
+++ b/drivers/usb/host/xhci.c
|
|
@@ -228,6 +228,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
|
|
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
|
int err, i;
|
|
u64 val;
|
|
+ u32 intrs;
|
|
|
|
/*
|
|
* Some Renesas controllers get into a weird state if they are
|
|
@@ -266,7 +267,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
|
|
if (upper_32_bits(val))
|
|
xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
|
|
|
|
- for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
|
|
+ intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
|
|
+ ARRAY_SIZE(xhci->run_regs->ir_set));
|
|
+
|
|
+ for (i = 0; i < intrs; i++) {
|
|
struct xhci_intr_reg __iomem *ir;
|
|
|
|
ir = &xhci->run_regs->ir_set[i];
|
|
@@ -3351,6 +3355,14 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
|
|
|
|
/* config ep command clears toggle if add and drop ep flags are set */
|
|
ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
|
|
+ if (!ctrl_ctx) {
|
|
+ spin_unlock_irqrestore(&xhci->lock, flags);
|
|
+ xhci_free_command(xhci, cfg_cmd);
|
|
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
|
|
+ __func__);
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
|
|
ctrl_ctx, ep_flag, ep_flag);
|
|
xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
|
|
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
|
|
index fc0457db62e1a..8f09a387b7738 100644
|
|
--- a/drivers/usb/musb/musb_core.c
|
|
+++ b/drivers/usb/musb/musb_core.c
|
|
@@ -2070,7 +2070,7 @@ static void musb_irq_work(struct work_struct *data)
|
|
struct musb *musb = container_of(data, struct musb, irq_work.work);
|
|
int error;
|
|
|
|
- error = pm_runtime_get_sync(musb->controller);
|
|
+ error = pm_runtime_resume_and_get(musb->controller);
|
|
if (error < 0) {
|
|
dev_err(musb->controller, "Could not enable: %i\n", error);
|
|
|
|
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
|
|
index bfa4c6ef554e5..c79d2f2387aaa 100644
|
|
--- a/drivers/vhost/vdpa.c
|
|
+++ b/drivers/vhost/vdpa.c
|
|
@@ -993,6 +993,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
|
|
if (vma->vm_end - vma->vm_start != notify.size)
|
|
return -ENOTSUPP;
|
|
|
|
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
|
vma->vm_ops = &vhost_vdpa_vm_ops;
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
|
|
index 3bc7800eb0a93..cd11c57764381 100644
|
|
--- a/drivers/video/backlight/qcom-wled.c
|
|
+++ b/drivers/video/backlight/qcom-wled.c
|
|
@@ -336,19 +336,19 @@ static int wled3_sync_toggle(struct wled *wled)
|
|
unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
|
|
|
|
rc = regmap_update_bits(wled->regmap,
|
|
- wled->ctrl_addr + WLED3_SINK_REG_SYNC,
|
|
+ wled->sink_addr + WLED3_SINK_REG_SYNC,
|
|
mask, mask);
|
|
if (rc < 0)
|
|
return rc;
|
|
|
|
rc = regmap_update_bits(wled->regmap,
|
|
- wled->ctrl_addr + WLED3_SINK_REG_SYNC,
|
|
+ wled->sink_addr + WLED3_SINK_REG_SYNC,
|
|
mask, WLED3_SINK_REG_SYNC_CLEAR);
|
|
|
|
return rc;
|
|
}
|
|
|
|
-static int wled5_sync_toggle(struct wled *wled)
|
|
+static int wled5_mod_sync_toggle(struct wled *wled)
|
|
{
|
|
int rc;
|
|
u8 val;
|
|
@@ -445,10 +445,23 @@ static int wled_update_status(struct backlight_device *bl)
|
|
goto unlock_mutex;
|
|
}
|
|
|
|
- rc = wled->wled_sync_toggle(wled);
|
|
- if (rc < 0) {
|
|
- dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
|
|
- goto unlock_mutex;
|
|
+ if (wled->version < 5) {
|
|
+ rc = wled->wled_sync_toggle(wled);
|
|
+ if (rc < 0) {
|
|
+ dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
+ } else {
|
|
+ /*
|
|
+ * For WLED5 toggling the MOD_SYNC_BIT updates the
|
|
+ * brightness
|
|
+ */
|
|
+ rc = wled5_mod_sync_toggle(wled);
|
|
+ if (rc < 0) {
|
|
+ dev_err(wled->dev, "wled mod sync failed rc:%d\n",
|
|
+ rc);
|
|
+ goto unlock_mutex;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -1459,7 +1472,7 @@ static int wled_configure(struct wled *wled)
|
|
size = ARRAY_SIZE(wled5_opts);
|
|
*cfg = wled5_config_defaults;
|
|
wled->wled_set_brightness = wled5_set_brightness;
|
|
- wled->wled_sync_toggle = wled5_sync_toggle;
|
|
+ wled->wled_sync_toggle = wled3_sync_toggle;
|
|
wled->wled_cabc_config = wled5_cabc_config;
|
|
wled->wled_ovp_delay = wled5_ovp_delay;
|
|
wled->wled_auto_detection_required =
|
|
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
|
|
index 757d5c3f620b7..ff09e57f3c380 100644
|
|
--- a/drivers/video/fbdev/core/fbcmap.c
|
|
+++ b/drivers/video/fbdev/core/fbcmap.c
|
|
@@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
|
|
if (!len)
|
|
return 0;
|
|
|
|
- cmap->red = kmalloc(size, flags);
|
|
+ cmap->red = kzalloc(size, flags);
|
|
if (!cmap->red)
|
|
goto fail;
|
|
- cmap->green = kmalloc(size, flags);
|
|
+ cmap->green = kzalloc(size, flags);
|
|
if (!cmap->green)
|
|
goto fail;
|
|
- cmap->blue = kmalloc(size, flags);
|
|
+ cmap->blue = kzalloc(size, flags);
|
|
if (!cmap->blue)
|
|
goto fail;
|
|
if (transp) {
|
|
- cmap->transp = kmalloc(size, flags);
|
|
+ cmap->transp = kzalloc(size, flags);
|
|
if (!cmap->transp)
|
|
goto fail;
|
|
} else {
|
|
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
|
|
index f1964ea4b8269..e21e1e86ad15f 100644
|
|
--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
|
|
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
|
|
@@ -1524,7 +1524,8 @@ static const struct file_operations ne_enclave_fops = {
|
|
* enclave file descriptor to be further used for enclave
|
|
* resources handling e.g. memory regions and CPUs.
|
|
* @ne_pci_dev : Private data associated with the PCI device.
|
|
- * @slot_uid: Generated unique slot id associated with an enclave.
|
|
+ * @slot_uid: User pointer to store the generated unique slot id
|
|
+ * associated with an enclave to.
|
|
*
|
|
* Context: Process context. This function is called with the ne_pci_dev enclave
|
|
* mutex held.
|
|
@@ -1532,7 +1533,7 @@ static const struct file_operations ne_enclave_fops = {
|
|
* * Enclave fd on success.
|
|
* * Negative return value on failure.
|
|
*/
|
|
-static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
|
|
+static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
|
|
{
|
|
struct ne_pci_dev_cmd_reply cmd_reply = {};
|
|
int enclave_fd = -1;
|
|
@@ -1634,7 +1635,18 @@ static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
|
|
|
|
list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
|
|
|
|
- *slot_uid = ne_enclave->slot_uid;
|
|
+ if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
|
|
+ /*
|
|
+ * As we're holding the only reference to 'enclave_file', fput()
|
|
+ * will call ne_enclave_release() which will do a proper cleanup
|
|
+ * of all so far allocated resources, leaving only the unused fd
|
|
+ * for us to free.
|
|
+ */
|
|
+ fput(enclave_file);
|
|
+ put_unused_fd(enclave_fd);
|
|
+
|
|
+ return -EFAULT;
|
|
+ }
|
|
|
|
fd_install(enclave_fd, enclave_file);
|
|
|
|
@@ -1671,34 +1683,13 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
switch (cmd) {
|
|
case NE_CREATE_VM: {
|
|
int enclave_fd = -1;
|
|
- struct file *enclave_file = NULL;
|
|
struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
|
|
- int rc = -EINVAL;
|
|
- u64 slot_uid = 0;
|
|
+ u64 __user *slot_uid = (void __user *)arg;
|
|
|
|
mutex_lock(&ne_pci_dev->enclaves_list_mutex);
|
|
-
|
|
- enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid);
|
|
- if (enclave_fd < 0) {
|
|
- rc = enclave_fd;
|
|
-
|
|
- mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
|
|
-
|
|
- return rc;
|
|
- }
|
|
-
|
|
+ enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
|
|
mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
|
|
|
|
- if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) {
|
|
- enclave_file = fget(enclave_fd);
|
|
- /* Decrement file refs to have release() called. */
|
|
- fput(enclave_file);
|
|
- fput(enclave_file);
|
|
- put_unused_fd(enclave_fd);
|
|
-
|
|
- return -EFAULT;
|
|
- }
|
|
-
|
|
return enclave_fd;
|
|
}
|
|
|
|
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
|
|
index 5ae3fa0386b76..320aa87d26bf3 100644
|
|
--- a/fs/btrfs/compression.c
|
|
+++ b/fs/btrfs/compression.c
|
|
@@ -80,10 +80,15 @@ static int compression_compress_pages(int type, struct list_head *ws,
|
|
case BTRFS_COMPRESS_NONE:
|
|
default:
|
|
/*
|
|
- * This can't happen, the type is validated several times
|
|
- * before we get here. As a sane fallback, return what the
|
|
- * callers will understand as 'no compression happened'.
|
|
+ * This can happen when compression races with remount setting
|
|
+ * it to 'no compress', while caller doesn't call
|
|
+ * inode_need_compress() to check if we really need to
|
|
+ * compress.
|
|
+ *
|
|
+ * Not a big deal, just need to inform caller that we
|
|
+ * haven't allocated any pages yet.
|
|
*/
|
|
+ *out_pages = 0;
|
|
return -E2BIG;
|
|
}
|
|
}
|
|
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
|
|
index 33fe5d839c110..e7c619eb55e7c 100644
|
|
--- a/fs/btrfs/ctree.c
|
|
+++ b/fs/btrfs/ctree.c
|
|
@@ -1365,10 +1365,30 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
|
|
"failed to read tree block %llu from get_old_root",
|
|
logical);
|
|
} else {
|
|
+ struct tree_mod_elem *tm2;
|
|
+
|
|
btrfs_tree_read_lock(old);
|
|
eb = btrfs_clone_extent_buffer(old);
|
|
+ /*
|
|
+ * After the lookup for the most recent tree mod operation
|
|
+ * above and before we locked and cloned the extent buffer
|
|
+ * 'old', a new tree mod log operation may have been added.
|
|
+ * So lookup for a more recent one to make sure the number
|
|
+ * of mod log operations we replay is consistent with the
|
|
+ * number of items we have in the cloned extent buffer,
|
|
+ * otherwise we can hit a BUG_ON when rewinding the extent
|
|
+ * buffer.
|
|
+ */
|
|
+ tm2 = tree_mod_log_search(fs_info, logical, time_seq);
|
|
btrfs_tree_read_unlock(old);
|
|
free_extent_buffer(old);
|
|
+ ASSERT(tm2);
|
|
+ ASSERT(tm2 == tm || tm2->seq > tm->seq);
|
|
+ if (!tm2 || tm2->seq < tm->seq) {
|
|
+ free_extent_buffer(eb);
|
|
+ return NULL;
|
|
+ }
|
|
+ tm = tm2;
|
|
}
|
|
} else if (old_root) {
|
|
eb_root_owner = btrfs_header_owner(eb_root);
|
|
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
|
|
index 0a4ab121c684b..d06ad9a9abb33 100644
|
|
--- a/fs/btrfs/ioctl.c
|
|
+++ b/fs/btrfs/ioctl.c
|
|
@@ -690,8 +690,6 @@ static noinline int create_subvol(struct inode *dir,
|
|
btrfs_set_root_otransid(root_item, trans->transid);
|
|
|
|
btrfs_tree_unlock(leaf);
|
|
- free_extent_buffer(leaf);
|
|
- leaf = NULL;
|
|
|
|
btrfs_set_root_dirid(root_item, new_dirid);
|
|
|
|
@@ -700,8 +698,22 @@ static noinline int create_subvol(struct inode *dir,
|
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
|
ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
|
|
root_item);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ /*
|
|
+ * Since we don't abort the transaction in this case, free the
|
|
+ * tree block so that we don't leak space and leave the
|
|
+ * filesystem in an inconsistent state (an extent item in the
|
|
+ * extent tree without backreferences). Also no need to have
|
|
+ * the tree block locked since it is not in any tree at this
|
|
+ * point, so no other task can find it and use it.
|
|
+ */
|
|
+ btrfs_free_tree_block(trans, root, leaf, 0, 1);
|
|
+ free_extent_buffer(leaf);
|
|
goto fail;
|
|
+ }
|
|
+
|
|
+ free_extent_buffer(leaf);
|
|
+ leaf = NULL;
|
|
|
|
key.offset = (u64)-1;
|
|
new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
|
|
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
|
|
index c01e0d7bef2c9..efe3ce88b8efa 100644
|
|
--- a/fs/btrfs/relocation.c
|
|
+++ b/fs/btrfs/relocation.c
|
|
@@ -732,10 +732,12 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
|
|
struct extent_buffer *eb;
|
|
struct btrfs_root_item *root_item;
|
|
struct btrfs_key root_key;
|
|
- int ret;
|
|
+ int ret = 0;
|
|
+ bool must_abort = false;
|
|
|
|
root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
|
|
- BUG_ON(!root_item);
|
|
+ if (!root_item)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
|
|
root_key.type = BTRFS_ROOT_ITEM_KEY;
|
|
@@ -747,7 +749,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
|
|
/* called by btrfs_init_reloc_root */
|
|
ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
|
|
BTRFS_TREE_RELOC_OBJECTID);
|
|
- BUG_ON(ret);
|
|
+ if (ret)
|
|
+ goto fail;
|
|
+
|
|
/*
|
|
* Set the last_snapshot field to the generation of the commit
|
|
* root - like this ctree.c:btrfs_block_can_be_shared() behaves
|
|
@@ -768,9 +772,16 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
|
|
*/
|
|
ret = btrfs_copy_root(trans, root, root->node, &eb,
|
|
BTRFS_TREE_RELOC_OBJECTID);
|
|
- BUG_ON(ret);
|
|
+ if (ret)
|
|
+ goto fail;
|
|
}
|
|
|
|
+ /*
|
|
+ * We have changed references at this point, we must abort the
|
|
+ * transaction if anything fails.
|
|
+ */
|
|
+ must_abort = true;
|
|
+
|
|
memcpy(root_item, &root->root_item, sizeof(*root_item));
|
|
btrfs_set_root_bytenr(root_item, eb->start);
|
|
btrfs_set_root_level(root_item, btrfs_header_level(eb));
|
|
@@ -788,14 +799,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
|
|
|
|
ret = btrfs_insert_root(trans, fs_info->tree_root,
|
|
&root_key, root_item);
|
|
- BUG_ON(ret);
|
|
+ if (ret)
|
|
+ goto fail;
|
|
+
|
|
kfree(root_item);
|
|
|
|
reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
|
|
- BUG_ON(IS_ERR(reloc_root));
|
|
+ if (IS_ERR(reloc_root)) {
|
|
+ ret = PTR_ERR(reloc_root);
|
|
+ goto abort;
|
|
+ }
|
|
set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
|
|
reloc_root->last_trans = trans->transid;
|
|
return reloc_root;
|
|
+fail:
|
|
+ kfree(root_item);
|
|
+abort:
|
|
+ if (must_abort)
|
|
+ btrfs_abort_transaction(trans, ret);
|
|
+ return ERR_PTR(ret);
|
|
}
|
|
|
|
/*
|
|
@@ -874,7 +896,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
|
|
int ret;
|
|
|
|
if (!have_reloc_root(root))
|
|
- goto out;
|
|
+ return 0;
|
|
|
|
reloc_root = root->reloc_root;
|
|
root_item = &reloc_root->root_item;
|
|
@@ -907,10 +929,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
|
|
|
|
ret = btrfs_update_root(trans, fs_info->tree_root,
|
|
&reloc_root->root_key, root_item);
|
|
- BUG_ON(ret);
|
|
btrfs_put_root(reloc_root);
|
|
-out:
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
/*
|
|
@@ -1184,8 +1204,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
|
|
int ret;
|
|
int slot;
|
|
|
|
- BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
|
|
- BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
|
|
+ ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
|
|
+ ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
|
|
|
|
last_snapshot = btrfs_root_last_snapshot(&src->root_item);
|
|
again:
|
|
@@ -1216,7 +1236,7 @@ again:
|
|
parent = eb;
|
|
while (1) {
|
|
level = btrfs_header_level(parent);
|
|
- BUG_ON(level < lowest_level);
|
|
+ ASSERT(level >= lowest_level);
|
|
|
|
ret = btrfs_bin_search(parent, &key, &slot);
|
|
if (ret < 0)
|
|
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
|
|
index fbf93067642ac..127570543eb6b 100644
|
|
--- a/fs/btrfs/transaction.c
|
|
+++ b/fs/btrfs/transaction.c
|
|
@@ -1947,7 +1947,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
|
|
*/
|
|
BUG_ON(list_empty(&cur_trans->list));
|
|
|
|
- list_del_init(&cur_trans->list);
|
|
if (cur_trans == fs_info->running_transaction) {
|
|
cur_trans->state = TRANS_STATE_COMMIT_DOING;
|
|
spin_unlock(&fs_info->trans_lock);
|
|
@@ -1956,6 +1955,17 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
|
|
|
|
spin_lock(&fs_info->trans_lock);
|
|
}
|
|
+
|
|
+ /*
|
|
+ * Now that we know no one else is still using the transaction we can
|
|
+ * remove the transaction from the list of transactions. This avoids
|
|
+ * the transaction kthread from cleaning up the transaction while some
|
|
+ * other task is still using it, which could result in a use-after-free
|
|
+ * on things like log trees, as it forces the transaction kthread to
|
|
+ * wait for this transaction to be cleaned up by us.
|
|
+ */
|
|
+ list_del_init(&cur_trans->list);
|
|
+
|
|
spin_unlock(&fs_info->trans_lock);
|
|
|
|
btrfs_cleanup_one_transaction(trans->transaction, fs_info);
|
|
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
|
|
index 8fc877fb369e7..1cb803a55f3a5 100644
|
|
--- a/fs/cifs/cifsfs.c
|
|
+++ b/fs/cifs/cifsfs.c
|
|
@@ -823,7 +823,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
|
|
goto out;
|
|
}
|
|
|
|
- rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC);
|
|
+ rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
|
|
if (rc) {
|
|
root = ERR_PTR(rc);
|
|
goto out;
|
|
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
|
|
index 2b72b8893affa..ee8faaa9e69af 100644
|
|
--- a/fs/cifs/connect.c
|
|
+++ b/fs/cifs/connect.c
|
|
@@ -488,6 +488,7 @@ server_unresponsive(struct TCP_Server_Info *server)
|
|
*/
|
|
if ((server->tcpStatus == CifsGood ||
|
|
server->tcpStatus == CifsNeedNegotiate) &&
|
|
+ (!server->ops->can_echo || server->ops->can_echo(server)) &&
|
|
time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
|
|
cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
|
|
(3 * server->echo_interval) / HZ);
|
|
@@ -3149,17 +3150,29 @@ out:
|
|
int
|
|
cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
|
|
{
|
|
- int rc = 0;
|
|
+ int rc;
|
|
|
|
- smb3_parse_devname(devname, ctx);
|
|
+ if (devname) {
|
|
+ cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
|
|
+ rc = smb3_parse_devname(devname, ctx);
|
|
+ if (rc) {
|
|
+ cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
|
|
+ return rc;
|
|
+ }
|
|
+ }
|
|
|
|
if (mntopts) {
|
|
char *ip;
|
|
|
|
- cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts);
|
|
rc = smb3_parse_opt(mntopts, "ip", &ip);
|
|
- if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip,
|
|
- strlen(ip))) {
|
|
+ if (rc) {
|
|
+ cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
|
|
+ kfree(ip);
|
|
+ if (!rc) {
|
|
cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
@@ -3179,7 +3192,7 @@ cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const c
|
|
return -EINVAL;
|
|
}
|
|
|
|
- return rc;
|
|
+ return 0;
|
|
}
|
|
|
|
static int
|
|
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
|
|
index 3a26ad47b220c..8cb24e6836a04 100644
|
|
--- a/fs/cifs/fs_context.c
|
|
+++ b/fs/cifs/fs_context.c
|
|
@@ -473,6 +473,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
|
|
|
|
/* move "pos" up to delimiter or NULL */
|
|
pos += len;
|
|
+ kfree(ctx->UNC);
|
|
ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL);
|
|
if (!ctx->UNC)
|
|
return -ENOMEM;
|
|
@@ -483,6 +484,9 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
|
|
if (*pos == '/' || *pos == '\\')
|
|
pos++;
|
|
|
|
+ kfree(ctx->prepath);
|
|
+ ctx->prepath = NULL;
|
|
+
|
|
/* If pos is NULL then no prepath */
|
|
if (!*pos)
|
|
return 0;
|
|
@@ -974,6 +978,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
|
|
goto cifs_parse_mount_err;
|
|
}
|
|
ctx->max_channels = result.uint_32;
|
|
+ /* If more than one channel requested ... they want multichan */
|
|
+ if (result.uint_32 > 1)
|
|
+ ctx->multichannel = true;
|
|
break;
|
|
case Opt_handletimeout:
|
|
ctx->handle_timeout = result.uint_32;
|
|
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
|
|
index dea4959989b50..b63f00894b0c3 100644
|
|
--- a/fs/cifs/sess.c
|
|
+++ b/fs/cifs/sess.c
|
|
@@ -97,6 +97,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
|
|
return 0;
|
|
}
|
|
|
|
+ if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
|
|
+ cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
|
|
+ ses->chan_max = 1;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
/*
|
|
* Make a copy of the iface list at the time and use that
|
|
* instead so as to not hold the iface spinlock for opening
|
|
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
|
|
index 7b614a7096cd2..beabdb64eeb00 100644
|
|
--- a/fs/cifs/smb2ops.c
|
|
+++ b/fs/cifs/smb2ops.c
|
|
@@ -1732,18 +1732,14 @@ smb2_ioctl_query_info(const unsigned int xid,
|
|
}
|
|
|
|
iqinf_exit:
|
|
- kfree(vars);
|
|
- kfree(buffer);
|
|
- SMB2_open_free(&rqst[0]);
|
|
- if (qi.flags & PASSTHRU_FSCTL)
|
|
- SMB2_ioctl_free(&rqst[1]);
|
|
- else
|
|
- SMB2_query_info_free(&rqst[1]);
|
|
-
|
|
- SMB2_close_free(&rqst[2]);
|
|
+ cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
|
|
+ cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
|
|
+ cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
|
|
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
|
|
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
|
|
free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
|
|
+ kfree(vars);
|
|
+ kfree(buffer);
|
|
return rc;
|
|
|
|
e_fault:
|
|
@@ -2201,7 +2197,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
|
|
|
|
cifs_sb = CIFS_SB(inode->i_sb);
|
|
|
|
- utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
|
|
+ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
|
|
if (utf16_path == NULL) {
|
|
rc = -ENOMEM;
|
|
goto notify_exit;
|
|
@@ -4117,7 +4113,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
|
|
}
|
|
spin_unlock(&cifs_tcp_ses_lock);
|
|
|
|
- return 1;
|
|
+ return -EAGAIN;
|
|
}
|
|
/*
|
|
* Encrypt or decrypt @rqst message. @rqst[0] has the following format:
|
|
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
|
|
index 6a1af5545f674..ca62b858c71f2 100644
|
|
--- a/fs/cifs/smb2pdu.c
|
|
+++ b/fs/cifs/smb2pdu.c
|
|
@@ -840,6 +840,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
|
|
req->SecurityMode = 0;
|
|
|
|
req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
|
|
+ if (ses->chan_max > 1)
|
|
+ req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
|
|
|
|
/* ClientGUID must be zero for SMB2.02 dialect */
|
|
if (server->vals->protocol_id == SMB20_PROT_ID)
|
|
@@ -1025,6 +1027,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
|
|
|
|
pneg_inbuf->Capabilities =
|
|
cpu_to_le32(server->vals->req_capabilities);
|
|
+ if (tcon->ses->chan_max > 1)
|
|
+ pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
|
|
+
|
|
memcpy(pneg_inbuf->Guid, server->client_guid,
|
|
SMB2_CLIENT_GUID_SIZE);
|
|
|
|
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
|
|
index e63259fdef288..b2f6a1937d239 100644
|
|
--- a/fs/ecryptfs/main.c
|
|
+++ b/fs/ecryptfs/main.c
|
|
@@ -492,6 +492,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
|
|
goto out;
|
|
}
|
|
|
|
+ if (!dev_name) {
|
|
+ rc = -EINVAL;
|
|
+ err = "Device name cannot be null";
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
|
|
if (rc) {
|
|
err = "Error parsing options";
|
|
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
|
|
index 9ad1615f44743..e8d04d808fa62 100644
|
|
--- a/fs/erofs/erofs_fs.h
|
|
+++ b/fs/erofs/erofs_fs.h
|
|
@@ -75,6 +75,9 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
|
|
#define EROFS_I_VERSION_BIT 0
|
|
#define EROFS_I_DATALAYOUT_BIT 1
|
|
|
|
+#define EROFS_I_ALL \
|
|
+ ((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1)
|
|
+
|
|
/* 32-byte reduced form of an ondisk inode */
|
|
struct erofs_inode_compact {
|
|
__le16 i_format; /* inode format hints */
|
|
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
|
|
index 3e21c0e8adae7..0a94a52a119fb 100644
|
|
--- a/fs/erofs/inode.c
|
|
+++ b/fs/erofs/inode.c
|
|
@@ -44,6 +44,13 @@ static struct page *erofs_read_inode(struct inode *inode,
|
|
dic = page_address(page) + *ofs;
|
|
ifmt = le16_to_cpu(dic->i_format);
|
|
|
|
+ if (ifmt & ~EROFS_I_ALL) {
|
|
+ erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
|
|
+ ifmt, vi->nid);
|
|
+ err = -EOPNOTSUPP;
|
|
+ goto err_out;
|
|
+ }
|
|
+
|
|
vi->datalayout = erofs_inode_datalayout(ifmt);
|
|
if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
|
|
erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
|
|
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
|
|
index 3196474cbe24c..e42477fcbfa05 100644
|
|
--- a/fs/eventpoll.c
|
|
+++ b/fs/eventpoll.c
|
|
@@ -657,6 +657,12 @@ static void ep_done_scan(struct eventpoll *ep,
|
|
*/
|
|
list_splice(txlist, &ep->rdllist);
|
|
__pm_relax(ep->ws);
|
|
+
|
|
+ if (!list_empty(&ep->rdllist)) {
|
|
+ if (waitqueue_active(&ep->wq))
|
|
+ wake_up(&ep->wq);
|
|
+ }
|
|
+
|
|
write_unlock_irq(&ep->lock);
|
|
}
|
|
|
|
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
|
|
index a987919686c0d..579c10f57c2b0 100644
|
|
--- a/fs/exfat/balloc.c
|
|
+++ b/fs/exfat/balloc.c
|
|
@@ -141,10 +141,6 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
|
|
kfree(sbi->vol_amap);
|
|
}
|
|
|
|
-/*
|
|
- * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
|
|
- * the cluster heap.
|
|
- */
|
|
int exfat_set_bitmap(struct inode *inode, unsigned int clu)
|
|
{
|
|
int i, b;
|
|
@@ -162,10 +158,6 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
|
|
return 0;
|
|
}
|
|
|
|
-/*
|
|
- * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
|
|
- * the cluster heap.
|
|
- */
|
|
void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
|
|
{
|
|
int i, b;
|
|
@@ -186,8 +178,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
|
|
int ret_discard;
|
|
|
|
ret_discard = sb_issue_discard(sb,
|
|
- exfat_cluster_to_sector(sbi, clu +
|
|
- EXFAT_RESERVED_CLUSTERS),
|
|
+ exfat_cluster_to_sector(sbi, clu),
|
|
(1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
|
|
|
|
if (ret_discard == -EOPNOTSUPP) {
|
|
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
|
|
index 62e9e5535fa76..0d3e67e7b00d9 100644
|
|
--- a/fs/ext4/fast_commit.c
|
|
+++ b/fs/ext4/fast_commit.c
|
|
@@ -1093,8 +1093,10 @@ static int ext4_fc_perform_commit(journal_t *journal)
|
|
head.fc_tid = cpu_to_le32(
|
|
sbi->s_journal->j_running_transaction->t_tid);
|
|
if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
|
|
- (u8 *)&head, &crc))
|
|
+ (u8 *)&head, &crc)) {
|
|
+ ret = -ENOSPC;
|
|
goto out;
|
|
+ }
|
|
}
|
|
|
|
spin_lock(&sbi->s_fc_lock);
|
|
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
|
|
index 349b27f0dda0c..3b09ddbe89707 100644
|
|
--- a/fs/ext4/file.c
|
|
+++ b/fs/ext4/file.c
|
|
@@ -372,15 +372,32 @@ truncate:
|
|
static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
|
|
int error, unsigned int flags)
|
|
{
|
|
- loff_t offset = iocb->ki_pos;
|
|
+ loff_t pos = iocb->ki_pos;
|
|
struct inode *inode = file_inode(iocb->ki_filp);
|
|
|
|
if (error)
|
|
return error;
|
|
|
|
- if (size && flags & IOMAP_DIO_UNWRITTEN)
|
|
- return ext4_convert_unwritten_extents(NULL, inode,
|
|
- offset, size);
|
|
+ if (size && flags & IOMAP_DIO_UNWRITTEN) {
|
|
+ error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
|
|
+ if (error < 0)
|
|
+ return error;
|
|
+ }
|
|
+ /*
|
|
+ * If we are extending the file, we have to update i_size here before
|
|
+ * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
|
|
+ * buffered reads could zero out too much from page cache pages. Update
|
|
+ * of on-disk size will happen later in ext4_dio_write_iter() where
|
|
+ * we have enough information to also perform orphan list handling etc.
|
|
+ * Note that we perform all extending writes synchronously under
|
|
+ * i_rwsem held exclusively so i_size update is safe here in that case.
|
|
+ * If the write was not extending, we cannot see pos > i_size here
|
|
+ * because operations reducing i_size like truncate wait for all
|
|
+ * outstanding DIO before updating i_size.
|
|
+ */
|
|
+ pos += size;
|
|
+ if (pos > i_size_read(inode))
|
|
+ i_size_write(inode, pos);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
|
|
index b215c564bc318..c92558ede623e 100644
|
|
--- a/fs/ext4/ialloc.c
|
|
+++ b/fs/ext4/ialloc.c
|
|
@@ -1291,7 +1291,8 @@ got:
|
|
|
|
ei->i_extra_isize = sbi->s_want_extra_isize;
|
|
ei->i_inline_off = 0;
|
|
- if (ext4_has_feature_inline_data(sb))
|
|
+ if (ext4_has_feature_inline_data(sb) &&
|
|
+ (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
|
|
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
|
|
ret = inode;
|
|
err = dquot_alloc_inode(inode);
|
|
@@ -1512,6 +1513,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
|
|
handle_t *handle;
|
|
ext4_fsblk_t blk;
|
|
int num, ret = 0, used_blks = 0;
|
|
+ unsigned long used_inos = 0;
|
|
|
|
/* This should not happen, but just to be sure check this */
|
|
if (sb_rdonly(sb)) {
|
|
@@ -1542,22 +1544,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
|
|
* used inodes so we need to skip blocks with used inodes in
|
|
* inode table.
|
|
*/
|
|
- if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
|
|
- used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
|
|
- ext4_itable_unused_count(sb, gdp)),
|
|
- sbi->s_inodes_per_block);
|
|
-
|
|
- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
|
|
- ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
|
|
- ext4_itable_unused_count(sb, gdp)) <
|
|
- EXT4_FIRST_INO(sb)))) {
|
|
- ext4_error(sb, "Something is wrong with group %u: "
|
|
- "used itable blocks: %d; "
|
|
- "itable unused count: %u",
|
|
- group, used_blks,
|
|
- ext4_itable_unused_count(sb, gdp));
|
|
- ret = 1;
|
|
- goto err_out;
|
|
+ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
|
|
+ used_inos = EXT4_INODES_PER_GROUP(sb) -
|
|
+ ext4_itable_unused_count(sb, gdp);
|
|
+ used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
|
|
+
|
|
+ /* Bogus inode unused count? */
|
|
+ if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
|
|
+ ext4_error(sb, "Something is wrong with group %u: "
|
|
+ "used itable blocks: %d; "
|
|
+ "itable unused count: %u",
|
|
+ group, used_blks,
|
|
+ ext4_itable_unused_count(sb, gdp));
|
|
+ ret = 1;
|
|
+ goto err_out;
|
|
+ }
|
|
+
|
|
+ used_inos += group * EXT4_INODES_PER_GROUP(sb);
|
|
+ /*
|
|
+ * Are there some uninitialized inodes in the inode table
|
|
+ * before the first normal inode?
|
|
+ */
|
|
+ if ((used_blks != sbi->s_itb_per_group) &&
|
|
+ (used_inos < EXT4_FIRST_INO(sb))) {
|
|
+ ext4_error(sb, "Something is wrong with group %u: "
|
|
+ "itable unused count: %u; "
|
|
+ "itables initialized count: %ld",
|
|
+ group, ext4_itable_unused_count(sb, gdp),
|
|
+ used_inos);
|
|
+ ret = 1;
|
|
+ goto err_out;
|
|
+ }
|
|
}
|
|
|
|
blk = ext4_inode_table(sb, gdp) + used_blks;
|
|
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
|
|
index d9665d2f82db8..d79871fcffbec 100644
|
|
--- a/fs/ext4/ioctl.c
|
|
+++ b/fs/ext4/ioctl.c
|
|
@@ -312,6 +312,12 @@ static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
|
|
static bool dax_compatible(struct inode *inode, unsigned int oldflags,
|
|
unsigned int flags)
|
|
{
|
|
+ /* Allow the DAX flag to be changed on inline directories */
|
|
+ if (S_ISDIR(inode->i_mode)) {
|
|
+ flags &= ~EXT4_INLINE_DATA_FL;
|
|
+ oldflags &= ~EXT4_INLINE_DATA_FL;
|
|
+ }
|
|
+
|
|
if (flags & EXT4_DAX_FL) {
|
|
if ((oldflags & EXT4_DAX_MUT_EXCL) ||
|
|
ext4_test_inode_state(inode,
|
|
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
|
|
index 795c3ff2907c2..68fbeedd627bc 100644
|
|
--- a/fs/ext4/mmp.c
|
|
+++ b/fs/ext4/mmp.c
|
|
@@ -56,7 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
|
|
wait_on_buffer(bh);
|
|
sb_end_write(sb);
|
|
if (unlikely(!buffer_uptodate(bh)))
|
|
- return 1;
|
|
+ return -EIO;
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
|
index c8cc8175b376b..ce883bed1355a 100644
|
|
--- a/fs/ext4/super.c
|
|
+++ b/fs/ext4/super.c
|
|
@@ -667,9 +667,6 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
|
|
ext4_commit_super(sb);
|
|
}
|
|
|
|
- if (sb_rdonly(sb) || continue_fs)
|
|
- return;
|
|
-
|
|
/*
|
|
* We force ERRORS_RO behavior when system is rebooting. Otherwise we
|
|
* could panic during 'reboot -f' as the underlying device got already
|
|
@@ -679,6 +676,10 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
|
|
panic("EXT4-fs (device %s): panic forced after error\n",
|
|
sb->s_id);
|
|
}
|
|
+
|
|
+ if (sb_rdonly(sb) || continue_fs)
|
|
+ return;
|
|
+
|
|
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
|
|
/*
|
|
* Make sure updated value of ->s_mount_flags will be visible before
|
|
@@ -3023,9 +3024,6 @@ static void ext4_orphan_cleanup(struct super_block *sb,
|
|
sb->s_flags &= ~SB_RDONLY;
|
|
}
|
|
#ifdef CONFIG_QUOTA
|
|
- /* Needed for iput() to work correctly and not trash data */
|
|
- sb->s_flags |= SB_ACTIVE;
|
|
-
|
|
/*
|
|
* Turn on quotas which were not enabled for read-only mounts if
|
|
* filesystem has quota feature, so that they are updated correctly.
|
|
@@ -5561,8 +5559,10 @@ static int ext4_commit_super(struct super_block *sb)
|
|
struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
|
|
int error = 0;
|
|
|
|
- if (!sbh || block_device_ejected(sb))
|
|
- return error;
|
|
+ if (!sbh)
|
|
+ return -EINVAL;
|
|
+ if (block_device_ejected(sb))
|
|
+ return -ENODEV;
|
|
|
|
ext4_update_super(sb);
|
|
|
|
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
|
|
index 3a24423ac65fd..071aa59856aac 100644
|
|
--- a/fs/f2fs/node.c
|
|
+++ b/fs/f2fs/node.c
|
|
@@ -2787,6 +2787,9 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
|
|
struct f2fs_nat_entry raw_ne;
|
|
nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
|
|
|
|
+ if (f2fs_check_nid_range(sbi, nid))
|
|
+ continue;
|
|
+
|
|
raw_ne = nat_in_journal(journal, i);
|
|
|
|
ne = __lookup_nat_cache(nm_i, nid);
|
|
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
|
|
index 054ec852b5ea4..15ba36926fad7 100644
|
|
--- a/fs/f2fs/verity.c
|
|
+++ b/fs/f2fs/verity.c
|
|
@@ -152,40 +152,73 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
|
|
size_t desc_size, u64 merkle_tree_size)
|
|
{
|
|
struct inode *inode = file_inode(filp);
|
|
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
|
|
struct fsverity_descriptor_location dloc = {
|
|
.version = cpu_to_le32(F2FS_VERIFY_VER),
|
|
.size = cpu_to_le32(desc_size),
|
|
.pos = cpu_to_le64(desc_pos),
|
|
};
|
|
- int err = 0;
|
|
+ int err = 0, err2 = 0;
|
|
|
|
- if (desc != NULL) {
|
|
- /* Succeeded; write the verity descriptor. */
|
|
- err = pagecache_write(inode, desc, desc_size, desc_pos);
|
|
+ /*
|
|
+ * If an error already occurred (which fs/verity/ signals by passing
|
|
+ * desc == NULL), then only clean-up is needed.
|
|
+ */
|
|
+ if (desc == NULL)
|
|
+ goto cleanup;
|
|
|
|
- /* Write all pages before clearing FI_VERITY_IN_PROGRESS. */
|
|
- if (!err)
|
|
- err = filemap_write_and_wait(inode->i_mapping);
|
|
- }
|
|
+ /* Append the verity descriptor. */
|
|
+ err = pagecache_write(inode, desc, desc_size, desc_pos);
|
|
+ if (err)
|
|
+ goto cleanup;
|
|
+
|
|
+ /*
|
|
+ * Write all pages (both data and verity metadata). Note that this must
|
|
+ * happen before clearing FI_VERITY_IN_PROGRESS; otherwise pages beyond
|
|
+ * i_size won't be written properly. For crash consistency, this also
|
|
+ * must happen before the verity inode flag gets persisted.
|
|
+ */
|
|
+ err = filemap_write_and_wait(inode->i_mapping);
|
|
+ if (err)
|
|
+ goto cleanup;
|
|
+
|
|
+ /* Set the verity xattr. */
|
|
+ err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
|
|
+ F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
|
|
+ NULL, XATTR_CREATE);
|
|
+ if (err)
|
|
+ goto cleanup;
|
|
|
|
- /* If we failed, truncate anything we wrote past i_size. */
|
|
- if (desc == NULL || err)
|
|
- f2fs_truncate(inode);
|
|
+ /* Finally, set the verity inode flag. */
|
|
+ file_set_verity(inode);
|
|
+ f2fs_set_inode_flags(inode);
|
|
+ f2fs_mark_inode_dirty_sync(inode, true);
|
|
|
|
clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
|
|
+ return 0;
|
|
|
|
- if (desc != NULL && !err) {
|
|
- err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
|
|
- F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
|
|
- NULL, XATTR_CREATE);
|
|
- if (!err) {
|
|
- file_set_verity(inode);
|
|
- f2fs_set_inode_flags(inode);
|
|
- f2fs_mark_inode_dirty_sync(inode, true);
|
|
- }
|
|
+cleanup:
|
|
+ /*
|
|
+ * Verity failed to be enabled, so clean up by truncating any verity
|
|
+ * metadata that was written beyond i_size (both from cache and from
|
|
+ * disk) and clearing FI_VERITY_IN_PROGRESS.
|
|
+ *
|
|
+ * Taking i_gc_rwsem[WRITE] is needed to stop f2fs garbage collection
|
|
+ * from re-instantiating cached pages we are truncating (since unlike
|
|
+ * normal file accesses, garbage collection isn't limited by i_size).
|
|
+ */
|
|
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
|
+ truncate_inode_pages(inode->i_mapping, inode->i_size);
|
|
+ err2 = f2fs_truncate(inode);
|
|
+ if (err2) {
|
|
+ f2fs_err(sbi, "Truncating verity metadata failed (errno=%d)",
|
|
+ err2);
|
|
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
}
|
|
- return err;
|
|
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
|
+ clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
|
|
+ return err ?: err2;
|
|
}
|
|
|
|
static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
|
|
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
|
|
index 8cccecb55fb80..eff4abaa87da0 100644
|
|
--- a/fs/fuse/file.c
|
|
+++ b/fs/fuse/file.c
|
|
@@ -1099,6 +1099,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
|
|
struct fuse_file *ff = file->private_data;
|
|
struct fuse_mount *fm = ff->fm;
|
|
unsigned int offset, i;
|
|
+ bool short_write;
|
|
int err;
|
|
|
|
for (i = 0; i < ap->num_pages; i++)
|
|
@@ -1113,32 +1114,38 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
|
|
if (!err && ia->write.out.size > count)
|
|
err = -EIO;
|
|
|
|
+ short_write = ia->write.out.size < count;
|
|
offset = ap->descs[0].offset;
|
|
count = ia->write.out.size;
|
|
for (i = 0; i < ap->num_pages; i++) {
|
|
struct page *page = ap->pages[i];
|
|
|
|
- if (!err && !offset && count >= PAGE_SIZE)
|
|
- SetPageUptodate(page);
|
|
-
|
|
- if (count > PAGE_SIZE - offset)
|
|
- count -= PAGE_SIZE - offset;
|
|
- else
|
|
- count = 0;
|
|
- offset = 0;
|
|
-
|
|
- unlock_page(page);
|
|
+ if (err) {
|
|
+ ClearPageUptodate(page);
|
|
+ } else {
|
|
+ if (count >= PAGE_SIZE - offset)
|
|
+ count -= PAGE_SIZE - offset;
|
|
+ else {
|
|
+ if (short_write)
|
|
+ ClearPageUptodate(page);
|
|
+ count = 0;
|
|
+ }
|
|
+ offset = 0;
|
|
+ }
|
|
+ if (ia->write.page_locked && (i == ap->num_pages - 1))
|
|
+ unlock_page(page);
|
|
put_page(page);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
-static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
|
|
+static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
|
|
struct address_space *mapping,
|
|
struct iov_iter *ii, loff_t pos,
|
|
unsigned int max_pages)
|
|
{
|
|
+ struct fuse_args_pages *ap = &ia->ap;
|
|
struct fuse_conn *fc = get_fuse_conn(mapping->host);
|
|
unsigned offset = pos & (PAGE_SIZE - 1);
|
|
size_t count = 0;
|
|
@@ -1191,6 +1198,16 @@ static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
|
|
if (offset == PAGE_SIZE)
|
|
offset = 0;
|
|
|
|
+ /* If we copied full page, mark it uptodate */
|
|
+ if (tmp == PAGE_SIZE)
|
|
+ SetPageUptodate(page);
|
|
+
|
|
+ if (PageUptodate(page)) {
|
|
+ unlock_page(page);
|
|
+ } else {
|
|
+ ia->write.page_locked = true;
|
|
+ break;
|
|
+ }
|
|
if (!fc->big_writes)
|
|
break;
|
|
} while (iov_iter_count(ii) && count < fc->max_write &&
|
|
@@ -1234,7 +1251,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
|
|
break;
|
|
}
|
|
|
|
- count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages);
|
|
+ count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
|
|
if (count <= 0) {
|
|
err = count;
|
|
} else {
|
|
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
|
|
index 103dfc2fa62ee..ff7295202d09f 100644
|
|
--- a/fs/fuse/fuse_i.h
|
|
+++ b/fs/fuse/fuse_i.h
|
|
@@ -912,6 +912,7 @@ struct fuse_io_args {
|
|
struct {
|
|
struct fuse_write_in in;
|
|
struct fuse_write_out out;
|
|
+ bool page_locked;
|
|
} write;
|
|
};
|
|
struct fuse_args_pages ap;
|
|
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
|
|
index 4ee6f734ba838..1e5affed158e9 100644
|
|
--- a/fs/fuse/virtio_fs.c
|
|
+++ b/fs/fuse/virtio_fs.c
|
|
@@ -896,6 +896,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
|
|
out_vqs:
|
|
vdev->config->reset(vdev);
|
|
virtio_fs_cleanup_vqs(vdev, fs);
|
|
+ kfree(fs->vqs);
|
|
|
|
out:
|
|
vdev->priv = NULL;
|
|
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
|
|
index 9396666b73145..e8fc45fd751fb 100644
|
|
--- a/fs/jbd2/transaction.c
|
|
+++ b/fs/jbd2/transaction.c
|
|
@@ -349,7 +349,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
|
|
}
|
|
|
|
alloc_transaction:
|
|
- if (!journal->j_running_transaction) {
|
|
+ /*
|
|
+ * This check is racy but it is just an optimization of allocating new
|
|
+ * transaction early if there are high chances we'll need it. If we
|
|
+ * guess wrong, we'll retry or free unused transaction.
|
|
+ */
|
|
+ if (!data_race(journal->j_running_transaction)) {
|
|
/*
|
|
* If __GFP_FS is not present, then we may be being called from
|
|
* inside the fs writeback layer, so we MUST NOT fail.
|
|
@@ -1474,8 +1479,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
|
|
* crucial to catch bugs so let's do a reliable check until the
|
|
* lockless handling is fully proven.
|
|
*/
|
|
- if (jh->b_transaction != transaction &&
|
|
- jh->b_next_transaction != transaction) {
|
|
+ if (data_race(jh->b_transaction != transaction &&
|
|
+ jh->b_next_transaction != transaction)) {
|
|
spin_lock(&jh->b_state_lock);
|
|
J_ASSERT_JH(jh, jh->b_transaction == transaction ||
|
|
jh->b_next_transaction == transaction);
|
|
@@ -1483,8 +1488,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
|
|
}
|
|
if (jh->b_modified == 1) {
|
|
/* If it's in our transaction it must be in BJ_Metadata list. */
|
|
- if (jh->b_transaction == transaction &&
|
|
- jh->b_jlist != BJ_Metadata) {
|
|
+ if (data_race(jh->b_transaction == transaction &&
|
|
+ jh->b_jlist != BJ_Metadata)) {
|
|
spin_lock(&jh->b_state_lock);
|
|
if (jh->b_transaction == transaction &&
|
|
jh->b_jlist != BJ_Metadata)
|
|
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
|
|
index 406d9cc84ba8d..79e771ab624f4 100644
|
|
--- a/fs/jffs2/compr_rtime.c
|
|
+++ b/fs/jffs2/compr_rtime.c
|
|
@@ -37,6 +37,9 @@ static int jffs2_rtime_compress(unsigned char *data_in,
|
|
int outpos = 0;
|
|
int pos=0;
|
|
|
|
+ if (*dstlen <= 3)
|
|
+ return -1;
|
|
+
|
|
memset(positions,0,sizeof(positions));
|
|
|
|
while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
|
|
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
|
|
index f8fb89b10227c..4fc8cd698d1a4 100644
|
|
--- a/fs/jffs2/file.c
|
|
+++ b/fs/jffs2/file.c
|
|
@@ -57,6 +57,7 @@ const struct file_operations jffs2_file_operations =
|
|
.mmap = generic_file_readonly_mmap,
|
|
.fsync = jffs2_fsync,
|
|
.splice_read = generic_file_splice_read,
|
|
+ .splice_write = iter_file_splice_write,
|
|
};
|
|
|
|
/* jffs2_file_inode_operations */
|
|
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
|
|
index db72a9d2d0afb..b676056826beb 100644
|
|
--- a/fs/jffs2/scan.c
|
|
+++ b/fs/jffs2/scan.c
|
|
@@ -1079,7 +1079,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
|
|
memcpy(&fd->name, rd->name, checkedlen);
|
|
fd->name[checkedlen] = 0;
|
|
|
|
- crc = crc32(0, fd->name, rd->nsize);
|
|
+ crc = crc32(0, fd->name, checkedlen);
|
|
if (crc != je32_to_cpu(rd->name_crc)) {
|
|
pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
|
__func__, ofs, je32_to_cpu(rd->name_crc), crc);
|
|
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
|
|
index 06894bcdea2db..8f196e5233b32 100644
|
|
--- a/fs/nfs/fs_context.c
|
|
+++ b/fs/nfs/fs_context.c
|
|
@@ -940,6 +940,15 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
|
|
memset(mntfh->data + mntfh->size, 0,
|
|
sizeof(mntfh->data) - mntfh->size);
|
|
|
|
+ /*
|
|
+ * for proto == XPRT_TRANSPORT_UDP, which is what uses
|
|
+ * to_exponential, implying shift: limit the shift value
|
|
+ * to BITS_PER_LONG (majortimeo is unsigned long)
|
|
+ */
|
|
+ if (!(data->flags & NFS_MOUNT_TCP)) /* this will be UDP */
|
|
+ if (data->retrans >= 64) /* shift value is too large */
|
|
+ goto out_invalid_data;
|
|
+
|
|
/*
|
|
* Translate to nfs_fs_context, which nfs_fill_super
|
|
* can deal with.
|
|
@@ -1040,6 +1049,9 @@ out_no_address:
|
|
|
|
out_invalid_fh:
|
|
return nfs_invalf(fc, "NFS: invalid root filehandle");
|
|
+
|
|
+out_invalid_data:
|
|
+ return nfs_invalf(fc, "NFS: invalid binary mount data");
|
|
}
|
|
|
|
#if IS_ENABLED(CONFIG_NFS_V4)
|
|
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
|
|
index af64b4e6fd1ff..18de8b6981fcb 100644
|
|
--- a/fs/nfs/pnfs.c
|
|
+++ b/fs/nfs/pnfs.c
|
|
@@ -1344,7 +1344,7 @@ _pnfs_return_layout(struct inode *ino)
|
|
}
|
|
valid_layout = pnfs_layout_is_valid(lo);
|
|
pnfs_clear_layoutcommit(ino, &tmp_list);
|
|
- pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
|
|
+ pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
|
|
|
|
if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
|
|
struct pnfs_layout_range range = {
|
|
@@ -2468,6 +2468,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
|
|
|
|
assert_spin_locked(&lo->plh_inode->i_lock);
|
|
|
|
+ if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
|
|
+ tmp_list = &lo->plh_return_segs;
|
|
+
|
|
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
|
|
if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
|
|
dprintk("%s: marking lseg %p iomode %d "
|
|
@@ -2475,6 +2478,8 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
|
|
lseg, lseg->pls_range.iomode,
|
|
lseg->pls_range.offset,
|
|
lseg->pls_range.length);
|
|
+ if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
|
|
+ tmp_list = &lo->plh_return_segs;
|
|
if (mark_lseg_invalid(lseg, tmp_list))
|
|
continue;
|
|
remaining++;
|
|
diff --git a/fs/stat.c b/fs/stat.c
|
|
index dacecdda2e796..1196af4d1ea03 100644
|
|
--- a/fs/stat.c
|
|
+++ b/fs/stat.c
|
|
@@ -77,12 +77,20 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
|
|
/* SB_NOATIME means filesystem supplies dummy atime value */
|
|
if (inode->i_sb->s_flags & SB_NOATIME)
|
|
stat->result_mask &= ~STATX_ATIME;
|
|
+
|
|
+ /*
|
|
+ * Note: If you add another clause to set an attribute flag, please
|
|
+ * update attributes_mask below.
|
|
+ */
|
|
if (IS_AUTOMOUNT(inode))
|
|
stat->attributes |= STATX_ATTR_AUTOMOUNT;
|
|
|
|
if (IS_DAX(inode))
|
|
stat->attributes |= STATX_ATTR_DAX;
|
|
|
|
+ stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
|
|
+ STATX_ATTR_DAX);
|
|
+
|
|
if (inode->i_op->getattr)
|
|
return inode->i_op->getattr(path, stat, request_mask,
|
|
query_flags);
|
|
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
|
|
index 0f8a6a16421b4..1929ec63a0cb6 100644
|
|
--- a/fs/ubifs/replay.c
|
|
+++ b/fs/ubifs/replay.c
|
|
@@ -223,7 +223,8 @@ static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
|
|
*/
|
|
list_for_each_entry_reverse(r, &c->replay_list, list) {
|
|
ubifs_assert(c, r->sqnum >= rino->sqnum);
|
|
- if (key_inum(c, &r->key) == key_inum(c, &rino->key))
|
|
+ if (key_inum(c, &r->key) == key_inum(c, &rino->key) &&
|
|
+ key_type(c, &r->key) == UBIFS_INO_KEY)
|
|
return r->deletion == 0;
|
|
|
|
}
|
|
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
|
|
index fcde59c65a81b..cb3d6b1c655de 100644
|
|
--- a/include/crypto/acompress.h
|
|
+++ b/include/crypto/acompress.h
|
|
@@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
|
|
* crypto_free_acomp() -- free ACOMPRESS tfm handle
|
|
*
|
|
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
|
|
+ *
|
|
+ * If @tfm is a NULL or error pointer, this function does nothing.
|
|
*/
|
|
static inline void crypto_free_acomp(struct crypto_acomp *tfm)
|
|
{
|
|
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
|
|
index fcc12c593ef8b..e728469c4cccb 100644
|
|
--- a/include/crypto/aead.h
|
|
+++ b/include/crypto/aead.h
|
|
@@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
|
|
/**
|
|
* crypto_free_aead() - zeroize and free aead handle
|
|
* @tfm: cipher handle to be freed
|
|
+ *
|
|
+ * If @tfm is a NULL or error pointer, this function does nothing.
|
|
*/
|
|
static inline void crypto_free_aead(struct crypto_aead *tfm)
|
|
{
|
|
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
|
|
index 1d3aa252cabaf..5764b46bd1ec1 100644
|
|
--- a/include/crypto/akcipher.h
|
|
+++ b/include/crypto/akcipher.h
|
|
@@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
|
|
* crypto_free_akcipher() - free AKCIPHER tfm handle
|
|
*
|
|
* @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
|
|
+ *
|
|
+ * If @tfm is a NULL or error pointer, this function does nothing.
|
|
*/
|
|
static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
|
|
{
|
|
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
|
|
index 3a1c72fdb7cf5..dabaee6987186 100644
|
|
--- a/include/crypto/chacha.h
|
|
+++ b/include/crypto/chacha.h
|
|
@@ -47,13 +47,18 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
|
|
hchacha_block_generic(state, out, nrounds);
|
|
}
|
|
|
|
-void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
|
|
-static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
|
|
+static inline void chacha_init_consts(u32 *state)
|
|
{
|
|
state[0] = 0x61707865; /* "expa" */
|
|
state[1] = 0x3320646e; /* "nd 3" */
|
|
state[2] = 0x79622d32; /* "2-by" */
|
|
state[3] = 0x6b206574; /* "te k" */
|
|
+}
|
|
+
|
|
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
|
|
+static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
|
|
+{
|
|
+ chacha_init_consts(state);
|
|
state[4] = key[0];
|
|
state[5] = key[1];
|
|
state[6] = key[2];
|
|
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
|
|
index 13f8a6a54ca87..b2bc1e46e86a7 100644
|
|
--- a/include/crypto/hash.h
|
|
+++ b/include/crypto/hash.h
|
|
@@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
|
|
/**
|
|
* crypto_free_ahash() - zeroize and free the ahash handle
|
|
* @tfm: cipher handle to be freed
|
|
+ *
|
|
+ * If @tfm is a NULL or error pointer, this function does nothing.
|
|
*/
|
|
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
|
|
{
|
|
@@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
|
|
/**
|
|
* crypto_free_shash() - zeroize and free the message digest handle
|
|
* @tfm: cipher handle to be freed
|
|
+ *
|
|
+ * If @tfm is a NULL or error pointer, this function does nothing.
|
|
*/
|
|
static inline void crypto_free_shash(struct crypto_shash *tfm)
|
|
{
|
|
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
|
|
index 88b591215d5c8..cccceadc164b9 100644
|
|
--- a/include/crypto/kpp.h
|
|
+++ b/include/crypto/kpp.h
|
|
@@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
|
|
* crypto_free_kpp() - free KPP tfm handle
|
|
*
|
|
* @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
|
|
+ *
|
|
+ * If @tfm is a NULL or error pointer, this function does nothing.
|
|
*/
|
|
static inline void crypto_free_kpp(struct crypto_kpp *tfm)
|
|
{
|
|
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
|
|
index 8b4b844b4eef8..17bb3673d3c17 100644
|
|
--- a/include/crypto/rng.h
|
|
+++ b/include/crypto/rng.h
|
|
@@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
|
|
/**
|
|
* crypto_free_rng() - zeroize and free RNG handle
|
|
* @tfm: cipher handle to be freed
|
|
+ *
|
|
+ * If @tfm is a NULL or error pointer, this function does nothing.
|
|
*/
|
|
static inline void crypto_free_rng(struct crypto_rng *tfm)
|
|
{
|
|
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
|
|
index 6a733b171a5d0..ef0fc9ed4342e 100644
|
|
--- a/include/crypto/skcipher.h
|
|
+++ b/include/crypto/skcipher.h
|
|
@@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
|
|
/**
|
|
* crypto_free_skcipher() - zeroize and free cipher handle
|
|
* @tfm: cipher handle to be freed
|
|
+ *
|
|
+ * If @tfm is a NULL or error pointer, this function does nothing.
|
|
*/
|
|
static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
|
|
{
|
|
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
|
|
index fe48b7840665a..b53cb1a5b8194 100644
|
|
--- a/include/linux/ioport.h
|
|
+++ b/include/linux/ioport.h
|
|
@@ -331,7 +331,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq)
|
|
{
|
|
res->start = irq;
|
|
res->end = irq;
|
|
- res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
|
|
+ res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
|
|
}
|
|
|
|
#ifdef CONFIG_IO_STRICT_DEVMEM
|
|
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
|
|
index 1dbabf1b3cb81..6e0f66a2e7279 100644
|
|
--- a/include/linux/mfd/da9063/registers.h
|
|
+++ b/include/linux/mfd/da9063/registers.h
|
|
@@ -1037,6 +1037,9 @@
|
|
#define DA9063_NONKEY_PIN_AUTODOWN 0x02
|
|
#define DA9063_NONKEY_PIN_AUTOFLPRT 0x03
|
|
|
|
+/* DA9063_REG_CONFIG_J (addr=0x10F) */
|
|
+#define DA9063_TWOWIRE_TO 0x40
|
|
+
|
|
/* DA9063_REG_MON_REG_5 (addr=0x116) */
|
|
#define DA9063_MON_A8_IDX_MASK 0x07
|
|
#define DA9063_MON_A8_IDX_NONE 0x00
|
|
diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
|
|
index c8ef2f1654a44..06da62c25234f 100644
|
|
--- a/include/linux/mfd/intel-m10-bmc.h
|
|
+++ b/include/linux/mfd/intel-m10-bmc.h
|
|
@@ -11,7 +11,7 @@
|
|
|
|
#define M10BMC_LEGACY_SYS_BASE 0x300400
|
|
#define M10BMC_SYS_BASE 0x300800
|
|
-#define M10BMC_MEM_END 0x200000fc
|
|
+#define M10BMC_MEM_END 0x1fffffff
|
|
|
|
/* Register offset of system registers */
|
|
#define NIOS2_FW_VERSION 0x0
|
|
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
|
|
index 01bba36545c54..7f8b03d5ac5a9 100644
|
|
--- a/include/linux/mmc/host.h
|
|
+++ b/include/linux/mmc/host.h
|
|
@@ -290,9 +290,6 @@ struct mmc_host {
|
|
u32 ocr_avail_sdio; /* SDIO-specific OCR */
|
|
u32 ocr_avail_sd; /* SD-specific OCR */
|
|
u32 ocr_avail_mmc; /* MMC-specific OCR */
|
|
-#ifdef CONFIG_PM_SLEEP
|
|
- struct notifier_block pm_notify;
|
|
-#endif
|
|
struct wakeup_source *ws; /* Enable consume of uevents */
|
|
u32 max_current_330;
|
|
u32 max_current_300;
|
|
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
|
|
index 419a4d77de000..7724c6842beab 100644
|
|
--- a/include/linux/perf_event.h
|
|
+++ b/include/linux/perf_event.h
|
|
@@ -607,6 +607,7 @@ struct swevent_hlist {
|
|
#define PERF_ATTACH_TASK_DATA 0x08
|
|
#define PERF_ATTACH_ITRACE 0x10
|
|
#define PERF_ATTACH_SCHED_CB 0x20
|
|
+#define PERF_ATTACH_CHILD 0x40
|
|
|
|
struct perf_cgroup;
|
|
struct perf_buffer;
|
|
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
|
|
index 111a40d0d3d50..8d5f4f40fb418 100644
|
|
--- a/include/linux/power/bq27xxx_battery.h
|
|
+++ b/include/linux/power/bq27xxx_battery.h
|
|
@@ -53,7 +53,6 @@ struct bq27xxx_reg_cache {
|
|
int capacity;
|
|
int energy;
|
|
int flags;
|
|
- int power_avg;
|
|
int health;
|
|
};
|
|
|
|
diff --git a/include/linux/reset.h b/include/linux/reset.h
|
|
index 439fec7112a95..18a9d6509052f 100644
|
|
--- a/include/linux/reset.h
|
|
+++ b/include/linux/reset.h
|
|
@@ -47,6 +47,11 @@ static inline int reset_control_reset(struct reset_control *rstc)
|
|
return 0;
|
|
}
|
|
|
|
+static inline int reset_control_rearm(struct reset_control *rstc)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static inline int reset_control_assert(struct reset_control *rstc)
|
|
{
|
|
return 0;
|
|
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
|
|
index 167ca8c8424f6..2fe4019b749f6 100644
|
|
--- a/include/media/v4l2-ctrls.h
|
|
+++ b/include/media/v4l2-ctrls.h
|
|
@@ -301,12 +301,14 @@ struct v4l2_ctrl {
|
|
* the control has been applied. This prevents applying controls
|
|
* from a cluster with multiple controls twice (when the first
|
|
* control of a cluster is applied, they all are).
|
|
- * @req: If set, this refers to another request that sets this control.
|
|
+ * @valid_p_req: If set, then p_req contains the control value for the request.
|
|
* @p_req: If the control handler containing this control reference
|
|
* is bound to a media request, then this points to the
|
|
- * value of the control that should be applied when the request
|
|
+ * value of the control that must be applied when the request
|
|
* is executed, or to the value of the control at the time
|
|
- * that the request was completed.
|
|
+ * that the request was completed. If @valid_p_req is false,
|
|
+ * then this control was never set for this request and the
|
|
+ * control will not be updated when this request is applied.
|
|
*
|
|
* Each control handler has a list of these refs. The list_head is used to
|
|
* keep a sorted-by-control-ID list of all controls, while the next pointer
|
|
@@ -319,7 +321,7 @@ struct v4l2_ctrl_ref {
|
|
struct v4l2_ctrl_helper *helper;
|
|
bool from_other_dev;
|
|
bool req_done;
|
|
- struct v4l2_ctrl_ref *req;
|
|
+ bool valid_p_req;
|
|
union v4l2_ctrl_ptr p_req;
|
|
};
|
|
|
|
@@ -346,7 +348,7 @@ struct v4l2_ctrl_ref {
|
|
* @error: The error code of the first failed control addition.
|
|
* @request_is_queued: True if the request was queued.
|
|
* @requests: List to keep track of open control handler request objects.
|
|
- * For the parent control handler (@req_obj.req == NULL) this
|
|
+ * For the parent control handler (@req_obj.ops == NULL) this
|
|
* is the list header. When the parent control handler is
|
|
* removed, it has to unbind and put all these requests since
|
|
* they refer to the parent.
|
|
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
|
|
index 2568cb0627ec0..fac8e89aed81d 100644
|
|
--- a/include/scsi/libfcoe.h
|
|
+++ b/include/scsi/libfcoe.h
|
|
@@ -249,7 +249,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
|
|
struct fc_frame *);
|
|
|
|
/* libfcoe funcs */
|
|
-u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
|
|
+u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int);
|
|
int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
|
|
const struct libfc_function_template *, int init_fcp);
|
|
u32 fcoe_fc_crc(struct fc_frame *fp);
|
|
diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
|
|
index d854cb19c42c3..bfdae12cdacf8 100644
|
|
--- a/include/uapi/linux/usb/video.h
|
|
+++ b/include/uapi/linux/usb/video.h
|
|
@@ -302,9 +302,10 @@ struct uvc_processing_unit_descriptor {
|
|
__u8 bControlSize;
|
|
__u8 bmControls[2];
|
|
__u8 iProcessing;
|
|
+ __u8 bmVideoStandards;
|
|
} __attribute__((__packed__));
|
|
|
|
-#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n))
|
|
+#define UVC_DT_PROCESSING_UNIT_SIZE(n) (10+(n))
|
|
|
|
/* 3.7.2.6. Extension Unit Descriptor */
|
|
struct uvc_extension_unit_descriptor {
|
|
diff --git a/kernel/.gitignore b/kernel/.gitignore
|
|
index 78701ea37c973..5518835ac35c7 100644
|
|
--- a/kernel/.gitignore
|
|
+++ b/kernel/.gitignore
|
|
@@ -1,4 +1,5 @@
|
|
# SPDX-License-Identifier: GPL-2.0-only
|
|
+/config_data
|
|
kheaders.md5
|
|
timeconst.h
|
|
hz.bc
|
|
diff --git a/kernel/Makefile b/kernel/Makefile
|
|
index 320f1f3941b79..605ec3e70cb78 100644
|
|
--- a/kernel/Makefile
|
|
+++ b/kernel/Makefile
|
|
@@ -138,10 +138,15 @@ obj-$(CONFIG_SCF_TORTURE_TEST) += scftorture.o
|
|
|
|
$(obj)/configs.o: $(obj)/config_data.gz
|
|
|
|
-targets += config_data.gz
|
|
-$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
|
|
+targets += config_data config_data.gz
|
|
+$(obj)/config_data.gz: $(obj)/config_data FORCE
|
|
$(call if_changed,gzip)
|
|
|
|
+filechk_cat = cat $<
|
|
+
|
|
+$(obj)/config_data: $(KCONFIG_CONFIG) FORCE
|
|
+ $(call filechk,cat)
|
|
+
|
|
$(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
|
|
|
|
quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index cd88af5554712..41bec6d7e06e3 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -2217,6 +2217,26 @@ out:
|
|
perf_event__header_size(leader);
|
|
}
|
|
|
|
+static void sync_child_event(struct perf_event *child_event);
|
|
+
|
|
+static void perf_child_detach(struct perf_event *event)
|
|
+{
|
|
+ struct perf_event *parent_event = event->parent;
|
|
+
|
|
+ if (!(event->attach_state & PERF_ATTACH_CHILD))
|
|
+ return;
|
|
+
|
|
+ event->attach_state &= ~PERF_ATTACH_CHILD;
|
|
+
|
|
+ if (WARN_ON_ONCE(!parent_event))
|
|
+ return;
|
|
+
|
|
+ lockdep_assert_held(&parent_event->child_mutex);
|
|
+
|
|
+ sync_child_event(event);
|
|
+ list_del_init(&event->child_list);
|
|
+}
|
|
+
|
|
static bool is_orphaned_event(struct perf_event *event)
|
|
{
|
|
return event->state == PERF_EVENT_STATE_DEAD;
|
|
@@ -2324,6 +2344,7 @@ group_sched_out(struct perf_event *group_event,
|
|
}
|
|
|
|
#define DETACH_GROUP 0x01UL
|
|
+#define DETACH_CHILD 0x02UL
|
|
|
|
/*
|
|
* Cross CPU call to remove a performance event
|
|
@@ -2347,6 +2368,8 @@ __perf_remove_from_context(struct perf_event *event,
|
|
event_sched_out(event, cpuctx, ctx);
|
|
if (flags & DETACH_GROUP)
|
|
perf_group_detach(event);
|
|
+ if (flags & DETACH_CHILD)
|
|
+ perf_child_detach(event);
|
|
list_del_event(event, ctx);
|
|
|
|
if (!ctx->nr_events && ctx->is_active) {
|
|
@@ -2375,25 +2398,21 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
|
|
|
|
lockdep_assert_held(&ctx->mutex);
|
|
|
|
- event_function_call(event, __perf_remove_from_context, (void *)flags);
|
|
-
|
|
/*
|
|
- * The above event_function_call() can NO-OP when it hits
|
|
- * TASK_TOMBSTONE. In that case we must already have been detached
|
|
- * from the context (by perf_event_exit_event()) but the grouping
|
|
- * might still be in-tact.
|
|
+ * Because of perf_event_exit_task(), perf_remove_from_context() ought
|
|
+ * to work in the face of TASK_TOMBSTONE, unlike every other
|
|
+ * event_function_call() user.
|
|
*/
|
|
- WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
|
|
- if ((flags & DETACH_GROUP) &&
|
|
- (event->attach_state & PERF_ATTACH_GROUP)) {
|
|
- /*
|
|
- * Since in that case we cannot possibly be scheduled, simply
|
|
- * detach now.
|
|
- */
|
|
- raw_spin_lock_irq(&ctx->lock);
|
|
- perf_group_detach(event);
|
|
+ raw_spin_lock_irq(&ctx->lock);
|
|
+ if (!ctx->is_active) {
|
|
+ __perf_remove_from_context(event, __get_cpu_context(ctx),
|
|
+ ctx, (void *)flags);
|
|
raw_spin_unlock_irq(&ctx->lock);
|
|
+ return;
|
|
}
|
|
+ raw_spin_unlock_irq(&ctx->lock);
|
|
+
|
|
+ event_function_call(event, __perf_remove_from_context, (void *)flags);
|
|
}
|
|
|
|
/*
|
|
@@ -12361,14 +12380,17 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
|
|
}
|
|
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
|
|
|
|
-static void sync_child_event(struct perf_event *child_event,
|
|
- struct task_struct *child)
|
|
+static void sync_child_event(struct perf_event *child_event)
|
|
{
|
|
struct perf_event *parent_event = child_event->parent;
|
|
u64 child_val;
|
|
|
|
- if (child_event->attr.inherit_stat)
|
|
- perf_event_read_event(child_event, child);
|
|
+ if (child_event->attr.inherit_stat) {
|
|
+ struct task_struct *task = child_event->ctx->task;
|
|
+
|
|
+ if (task && task != TASK_TOMBSTONE)
|
|
+ perf_event_read_event(child_event, task);
|
|
+ }
|
|
|
|
child_val = perf_event_count(child_event);
|
|
|
|
@@ -12383,60 +12405,53 @@ static void sync_child_event(struct perf_event *child_event,
|
|
}
|
|
|
|
static void
|
|
-perf_event_exit_event(struct perf_event *child_event,
|
|
- struct perf_event_context *child_ctx,
|
|
- struct task_struct *child)
|
|
+perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
|
|
{
|
|
- struct perf_event *parent_event = child_event->parent;
|
|
+ struct perf_event *parent_event = event->parent;
|
|
+ unsigned long detach_flags = 0;
|
|
|
|
- /*
|
|
- * Do not destroy the 'original' grouping; because of the context
|
|
- * switch optimization the original events could've ended up in a
|
|
- * random child task.
|
|
- *
|
|
- * If we were to destroy the original group, all group related
|
|
- * operations would cease to function properly after this random
|
|
- * child dies.
|
|
- *
|
|
- * Do destroy all inherited groups, we don't care about those
|
|
- * and being thorough is better.
|
|
- */
|
|
- raw_spin_lock_irq(&child_ctx->lock);
|
|
- WARN_ON_ONCE(child_ctx->is_active);
|
|
+ if (parent_event) {
|
|
+ /*
|
|
+ * Do not destroy the 'original' grouping; because of the
|
|
+ * context switch optimization the original events could've
|
|
+ * ended up in a random child task.
|
|
+ *
|
|
+ * If we were to destroy the original group, all group related
|
|
+ * operations would cease to function properly after this
|
|
+ * random child dies.
|
|
+ *
|
|
+ * Do destroy all inherited groups, we don't care about those
|
|
+ * and being thorough is better.
|
|
+ */
|
|
+ detach_flags = DETACH_GROUP | DETACH_CHILD;
|
|
+ mutex_lock(&parent_event->child_mutex);
|
|
+ }
|
|
|
|
- if (parent_event)
|
|
- perf_group_detach(child_event);
|
|
- list_del_event(child_event, child_ctx);
|
|
- perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
|
|
- raw_spin_unlock_irq(&child_ctx->lock);
|
|
+ perf_remove_from_context(event, detach_flags);
|
|
+
|
|
+ raw_spin_lock_irq(&ctx->lock);
|
|
+ if (event->state > PERF_EVENT_STATE_EXIT)
|
|
+ perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
|
|
+ raw_spin_unlock_irq(&ctx->lock);
|
|
|
|
/*
|
|
- * Parent events are governed by their filedesc, retain them.
|
|
+ * Child events can be freed.
|
|
*/
|
|
- if (!parent_event) {
|
|
- perf_event_wakeup(child_event);
|
|
+ if (parent_event) {
|
|
+ mutex_unlock(&parent_event->child_mutex);
|
|
+ /*
|
|
+ * Kick perf_poll() for is_event_hup();
|
|
+ */
|
|
+ perf_event_wakeup(parent_event);
|
|
+ free_event(event);
|
|
+ put_event(parent_event);
|
|
return;
|
|
}
|
|
- /*
|
|
- * Child events can be cleaned up.
|
|
- */
|
|
-
|
|
- sync_child_event(child_event, child);
|
|
|
|
/*
|
|
- * Remove this event from the parent's list
|
|
- */
|
|
- WARN_ON_ONCE(parent_event->ctx->parent_ctx);
|
|
- mutex_lock(&parent_event->child_mutex);
|
|
- list_del_init(&child_event->child_list);
|
|
- mutex_unlock(&parent_event->child_mutex);
|
|
-
|
|
- /*
|
|
- * Kick perf_poll() for is_event_hup().
|
|
+ * Parent events are governed by their filedesc, retain them.
|
|
*/
|
|
- perf_event_wakeup(parent_event);
|
|
- free_event(child_event);
|
|
- put_event(parent_event);
|
|
+ perf_event_wakeup(event);
|
|
}
|
|
|
|
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
|
@@ -12493,7 +12508,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
|
perf_event_task(child, child_ctx, 0);
|
|
|
|
list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
|
|
- perf_event_exit_event(child_event, child_ctx, child);
|
|
+ perf_event_exit_event(child_event, child_ctx);
|
|
|
|
mutex_unlock(&child_ctx->mutex);
|
|
|
|
@@ -12753,6 +12768,7 @@ inherit_event(struct perf_event *parent_event,
|
|
*/
|
|
raw_spin_lock_irqsave(&child_ctx->lock, flags);
|
|
add_event_to_ctx(child_event, child_ctx);
|
|
+ child_event->attach_state |= PERF_ATTACH_CHILD;
|
|
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
|
|
|
|
/*
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index ab3df9e86a1fc..57662f970c06e 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -3712,8 +3712,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
|
|
|
|
if (op & FUTEX_CLOCK_REALTIME) {
|
|
flags |= FLAGS_CLOCKRT;
|
|
- if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
|
|
- cmd != FUTEX_WAIT_REQUEUE_PI)
|
|
+ if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
|
|
return -ENOSYS;
|
|
}
|
|
|
|
@@ -3783,7 +3782,7 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
|
|
t = timespec64_to_ktime(ts);
|
|
if (cmd == FUTEX_WAIT)
|
|
t = ktime_add_safe(ktime_get(), t);
|
|
- else if (!(op & FUTEX_CLOCK_REALTIME))
|
|
+ else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
|
|
t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
|
|
tp = &t;
|
|
}
|
|
@@ -3977,7 +3976,7 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
|
|
t = timespec64_to_ktime(ts);
|
|
if (cmd == FUTEX_WAIT)
|
|
t = ktime_add_safe(ktime_get(), t);
|
|
- else if (!(op & FUTEX_CLOCK_REALTIME))
|
|
+ else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
|
|
t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
|
|
tp = &t;
|
|
}
|
|
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
|
|
index 651a4ad6d711f..8e586858bcf41 100644
|
|
--- a/kernel/irq/matrix.c
|
|
+++ b/kernel/irq/matrix.c
|
|
@@ -423,7 +423,9 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
|
|
if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
|
|
return;
|
|
|
|
- clear_bit(bit, cm->alloc_map);
|
|
+ if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
|
|
+ return;
|
|
+
|
|
cm->allocated--;
|
|
if(managed)
|
|
cm->managed_allocated--;
|
|
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
|
|
index 3bf98db9c702d..23e7acb5c6679 100644
|
|
--- a/kernel/kcsan/core.c
|
|
+++ b/kernel/kcsan/core.c
|
|
@@ -639,8 +639,6 @@ void __init kcsan_init(void)
|
|
|
|
BUG_ON(!in_task());
|
|
|
|
- kcsan_debugfs_init();
|
|
-
|
|
for_each_possible_cpu(cpu)
|
|
per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
|
|
|
|
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
|
|
index 3c8093a371b1c..209ad8dcfcecf 100644
|
|
--- a/kernel/kcsan/debugfs.c
|
|
+++ b/kernel/kcsan/debugfs.c
|
|
@@ -261,7 +261,9 @@ static const struct file_operations debugfs_ops =
|
|
.release = single_release
|
|
};
|
|
|
|
-void __init kcsan_debugfs_init(void)
|
|
+static void __init kcsan_debugfs_init(void)
|
|
{
|
|
debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
|
|
}
|
|
+
|
|
+late_initcall(kcsan_debugfs_init);
|
|
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
|
|
index 8d4bf3431b3cc..87ccdb3b051fd 100644
|
|
--- a/kernel/kcsan/kcsan.h
|
|
+++ b/kernel/kcsan/kcsan.h
|
|
@@ -30,11 +30,6 @@ extern bool kcsan_enabled;
|
|
void kcsan_save_irqtrace(struct task_struct *task);
|
|
void kcsan_restore_irqtrace(struct task_struct *task);
|
|
|
|
-/*
|
|
- * Initialize debugfs file.
|
|
- */
|
|
-void kcsan_debugfs_init(void);
|
|
-
|
|
/*
|
|
* Statistics counters displayed via debugfs; should only be modified in
|
|
* slow-paths.
|
|
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
|
|
index ce17b8477442f..84a3fe09630b3 100644
|
|
--- a/kernel/rcu/tree.c
|
|
+++ b/kernel/rcu/tree.c
|
|
@@ -3439,7 +3439,7 @@ static void fill_page_cache_func(struct work_struct *work)
|
|
|
|
for (i = 0; i < rcu_min_cached_objs; i++) {
|
|
bnode = (struct kvfree_rcu_bulk_data *)
|
|
- __get_free_page(GFP_KERNEL | __GFP_NOWARN);
|
|
+ __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
|
|
|
|
if (bnode) {
|
|
raw_spin_lock_irqsave(&krcp->lock, flags);
|
|
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
|
|
index cdc1b7651c039..939c30ff8e98d 100644
|
|
--- a/kernel/rcu/tree_plugin.h
|
|
+++ b/kernel/rcu/tree_plugin.h
|
|
@@ -1645,7 +1645,11 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
|
|
rcu_nocb_unlock_irqrestore(rdp, flags);
|
|
return false;
|
|
}
|
|
- del_timer(&rdp->nocb_timer);
|
|
+
|
|
+ if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
|
|
+ WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
|
|
+ del_timer(&rdp->nocb_timer);
|
|
+ }
|
|
rcu_nocb_unlock_irqrestore(rdp, flags);
|
|
raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
|
|
if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
|
|
@@ -2166,7 +2170,6 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
|
|
return false;
|
|
}
|
|
ndw = READ_ONCE(rdp->nocb_defer_wakeup);
|
|
- WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
|
|
ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
|
|
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index bbc78794224ac..828978320e447 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -700,7 +700,13 @@ static u64 __sched_period(unsigned long nr_running)
|
|
*/
|
|
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
{
|
|
- u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
|
|
+ unsigned int nr_running = cfs_rq->nr_running;
|
|
+ u64 slice;
|
|
+
|
|
+ if (sched_feat(ALT_PERIOD))
|
|
+ nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
|
|
+
|
|
+ slice = __sched_period(nr_running + !se->on_rq);
|
|
|
|
for_each_sched_entity(se) {
|
|
struct load_weight *load;
|
|
@@ -717,6 +723,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
}
|
|
slice = __calc_delta(slice, se->load.weight, load);
|
|
}
|
|
+
|
|
+ if (sched_feat(BASE_SLICE))
|
|
+ slice = max(slice, (u64)sysctl_sched_min_granularity);
|
|
+
|
|
return slice;
|
|
}
|
|
|
|
@@ -3959,6 +3969,8 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
|
|
trace_sched_util_est_cfs_tp(cfs_rq);
|
|
}
|
|
|
|
+#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
|
|
+
|
|
/*
|
|
* Check if a (signed) value is within a specified (unsigned) margin,
|
|
* based on the observation that:
|
|
@@ -3976,7 +3988,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
|
|
struct task_struct *p,
|
|
bool task_sleep)
|
|
{
|
|
- long last_ewma_diff;
|
|
+ long last_ewma_diff, last_enqueued_diff;
|
|
struct util_est ue;
|
|
|
|
if (!sched_feat(UTIL_EST))
|
|
@@ -3997,6 +4009,8 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
|
|
if (ue.enqueued & UTIL_AVG_UNCHANGED)
|
|
return;
|
|
|
|
+ last_enqueued_diff = ue.enqueued;
|
|
+
|
|
/*
|
|
* Reset EWMA on utilization increases, the moving average is used only
|
|
* to smooth utilization decreases.
|
|
@@ -4010,12 +4024,17 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
|
|
}
|
|
|
|
/*
|
|
- * Skip update of task's estimated utilization when its EWMA is
|
|
+ * Skip update of task's estimated utilization when its members are
|
|
* already ~1% close to its last activation value.
|
|
*/
|
|
last_ewma_diff = ue.enqueued - ue.ewma;
|
|
- if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
|
|
+ last_enqueued_diff -= ue.enqueued;
|
|
+ if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
|
|
+ if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
|
|
+ goto done;
|
|
+
|
|
return;
|
|
+ }
|
|
|
|
/*
|
|
* To avoid overestimation of actual task utilization, skip updates if
|
|
@@ -7568,6 +7587,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
|
|
return 0;
|
|
|
|
+ /* Disregard pcpu kthreads; they are where they need to be. */
|
|
+ if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
|
|
+ return 0;
|
|
+
|
|
if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
|
|
int cpu;
|
|
|
|
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
|
|
index 68d369cba9e45..f1bf5e12d889e 100644
|
|
--- a/kernel/sched/features.h
|
|
+++ b/kernel/sched/features.h
|
|
@@ -90,3 +90,6 @@ SCHED_FEAT(WA_BIAS, true)
|
|
*/
|
|
SCHED_FEAT(UTIL_EST, true)
|
|
SCHED_FEAT(UTIL_EST_FASTUP, true)
|
|
+
|
|
+SCHED_FEAT(ALT_PERIOD, true)
|
|
+SCHED_FEAT(BASE_SLICE, true)
|
|
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
|
|
index 967732c0766c5..651218ded9817 100644
|
|
--- a/kernel/sched/psi.c
|
|
+++ b/kernel/sched/psi.c
|
|
@@ -711,14 +711,15 @@ static void psi_group_change(struct psi_group *group, int cpu,
|
|
for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
|
|
if (!(m & (1 << t)))
|
|
continue;
|
|
- if (groupc->tasks[t] == 0 && !psi_bug) {
|
|
+ if (groupc->tasks[t]) {
|
|
+ groupc->tasks[t]--;
|
|
+ } else if (!psi_bug) {
|
|
printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
|
|
cpu, t, groupc->tasks[0],
|
|
groupc->tasks[1], groupc->tasks[2],
|
|
groupc->tasks[3], clear, set);
|
|
psi_bug = 1;
|
|
}
|
|
- groupc->tasks[t]--;
|
|
}
|
|
|
|
for (t = 0; set; set &= ~(1 << t), t++)
|
|
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
|
|
index 5d3675c7a76be..ab5ebf17f30a6 100644
|
|
--- a/kernel/sched/topology.c
|
|
+++ b/kernel/sched/topology.c
|
|
@@ -723,35 +723,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
|
|
for (tmp = sd; tmp; tmp = tmp->parent)
|
|
numa_distance += !!(tmp->flags & SD_NUMA);
|
|
|
|
- /*
|
|
- * FIXME: Diameter >=3 is misrepresented.
|
|
- *
|
|
- * Smallest diameter=3 topology is:
|
|
- *
|
|
- * node 0 1 2 3
|
|
- * 0: 10 20 30 40
|
|
- * 1: 20 10 20 30
|
|
- * 2: 30 20 10 20
|
|
- * 3: 40 30 20 10
|
|
- *
|
|
- * 0 --- 1 --- 2 --- 3
|
|
- *
|
|
- * NUMA-3 0-3 N/A N/A 0-3
|
|
- * groups: {0-2},{1-3} {1-3},{0-2}
|
|
- *
|
|
- * NUMA-2 0-2 0-3 0-3 1-3
|
|
- * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2}
|
|
- *
|
|
- * NUMA-1 0-1 0-2 1-3 2-3
|
|
- * groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2}
|
|
- *
|
|
- * NUMA-0 0 1 2 3
|
|
- *
|
|
- * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
|
|
- * group span isn't a subset of the domain span.
|
|
- */
|
|
- WARN_ONCE(numa_distance > 2, "Shortest NUMA path spans too many nodes\n");
|
|
-
|
|
sched_domain_debug(sd, cpu);
|
|
|
|
rq_attach_root(rq, rd);
|
|
@@ -982,6 +953,31 @@ static void init_overlap_sched_group(struct sched_domain *sd,
|
|
sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
|
|
}
|
|
|
|
+static struct sched_domain *
|
|
+find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
|
|
+{
|
|
+ /*
|
|
+ * The proper descendant would be the one whose child won't span out
|
|
+ * of sd
|
|
+ */
|
|
+ while (sibling->child &&
|
|
+ !cpumask_subset(sched_domain_span(sibling->child),
|
|
+ sched_domain_span(sd)))
|
|
+ sibling = sibling->child;
|
|
+
|
|
+ /*
|
|
+ * As we are referencing sgc across different topology level, we need
|
|
+ * to go down to skip those sched_domains which don't contribute to
|
|
+ * scheduling because they will be degenerated in cpu_attach_domain
|
|
+ */
|
|
+ while (sibling->child &&
|
|
+ cpumask_equal(sched_domain_span(sibling->child),
|
|
+ sched_domain_span(sibling)))
|
|
+ sibling = sibling->child;
|
|
+
|
|
+ return sibling;
|
|
+}
|
|
+
|
|
static int
|
|
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|
{
|
|
@@ -1015,6 +1011,41 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|
if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
|
|
continue;
|
|
|
|
+ /*
|
|
+ * Usually we build sched_group by sibling's child sched_domain
|
|
+ * But for machines whose NUMA diameter are 3 or above, we move
|
|
+ * to build sched_group by sibling's proper descendant's child
|
|
+ * domain because sibling's child sched_domain will span out of
|
|
+ * the sched_domain being built as below.
|
|
+ *
|
|
+ * Smallest diameter=3 topology is:
|
|
+ *
|
|
+ * node 0 1 2 3
|
|
+ * 0: 10 20 30 40
|
|
+ * 1: 20 10 20 30
|
|
+ * 2: 30 20 10 20
|
|
+ * 3: 40 30 20 10
|
|
+ *
|
|
+ * 0 --- 1 --- 2 --- 3
|
|
+ *
|
|
+ * NUMA-3 0-3 N/A N/A 0-3
|
|
+ * groups: {0-2},{1-3} {1-3},{0-2}
|
|
+ *
|
|
+ * NUMA-2 0-2 0-3 0-3 1-3
|
|
+ * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2}
|
|
+ *
|
|
+ * NUMA-1 0-1 0-2 1-3 2-3
|
|
+ * groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2}
|
|
+ *
|
|
+ * NUMA-0 0 1 2 3
|
|
+ *
|
|
+ * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
|
|
+ * group span isn't a subset of the domain span.
|
|
+ */
|
|
+ if (sibling->child &&
|
|
+ !cpumask_subset(sched_domain_span(sibling->child), span))
|
|
+ sibling = find_descended_sibling(sd, sibling);
|
|
+
|
|
sg = build_group_from_child_sched_domain(sibling, cpu);
|
|
if (!sg)
|
|
goto fail;
|
|
@@ -1022,7 +1053,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|
sg_span = sched_group_span(sg);
|
|
cpumask_or(covered, covered, sg_span);
|
|
|
|
- init_overlap_sched_group(sd, sg);
|
|
+ init_overlap_sched_group(sibling, sg);
|
|
|
|
if (!first)
|
|
first = sg;
|
|
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
|
|
index bf540f5a4115a..dd5697d7347b1 100644
|
|
--- a/kernel/time/posix-timers.c
|
|
+++ b/kernel/time/posix-timers.c
|
|
@@ -1191,8 +1191,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
|
|
|
|
err = do_clock_adjtime(which_clock, &ktx);
|
|
|
|
- if (err >= 0)
|
|
- err = put_old_timex32(utp, &ktx);
|
|
+ if (err >= 0 && put_old_timex32(utp, &ktx))
|
|
+ return -EFAULT;
|
|
|
|
return err;
|
|
}
|
|
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
|
|
index 3ba52d4e13142..826b88b727a62 100644
|
|
--- a/kernel/trace/ftrace.c
|
|
+++ b/kernel/trace/ftrace.c
|
|
@@ -5631,7 +5631,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
|
|
|
|
parser = &iter->parser;
|
|
if (trace_parser_loaded(parser)) {
|
|
- ftrace_match_records(iter->hash, parser->buffer, parser->idx);
|
|
+ int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
|
|
+
|
|
+ ftrace_process_regex(iter, parser->buffer,
|
|
+ parser->idx, enable);
|
|
}
|
|
|
|
trace_parser_put(parser);
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index c27b05aeb7d2d..f0f50f59c3ae8 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -2387,14 +2387,13 @@ static void tracing_stop_tr(struct trace_array *tr)
|
|
|
|
static int trace_save_cmdline(struct task_struct *tsk)
|
|
{
|
|
- unsigned pid, idx;
|
|
+ unsigned tpid, idx;
|
|
|
|
/* treat recording of idle task as a success */
|
|
if (!tsk->pid)
|
|
return 1;
|
|
|
|
- if (unlikely(tsk->pid > PID_MAX_DEFAULT))
|
|
- return 0;
|
|
+ tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
|
|
|
|
/*
|
|
* It's not the end of the world if we don't get
|
|
@@ -2405,26 +2404,15 @@ static int trace_save_cmdline(struct task_struct *tsk)
|
|
if (!arch_spin_trylock(&trace_cmdline_lock))
|
|
return 0;
|
|
|
|
- idx = savedcmd->map_pid_to_cmdline[tsk->pid];
|
|
+ idx = savedcmd->map_pid_to_cmdline[tpid];
|
|
if (idx == NO_CMDLINE_MAP) {
|
|
idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
|
|
|
|
- /*
|
|
- * Check whether the cmdline buffer at idx has a pid
|
|
- * mapped. We are going to overwrite that entry so we
|
|
- * need to clear the map_pid_to_cmdline. Otherwise we
|
|
- * would read the new comm for the old pid.
|
|
- */
|
|
- pid = savedcmd->map_cmdline_to_pid[idx];
|
|
- if (pid != NO_CMDLINE_MAP)
|
|
- savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
|
|
-
|
|
- savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
|
|
- savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
|
|
-
|
|
+ savedcmd->map_pid_to_cmdline[tpid] = idx;
|
|
savedcmd->cmdline_idx = idx;
|
|
}
|
|
|
|
+ savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
|
|
set_cmdline(idx, tsk->comm);
|
|
|
|
arch_spin_unlock(&trace_cmdline_lock);
|
|
@@ -2435,6 +2423,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
|
|
static void __trace_find_cmdline(int pid, char comm[])
|
|
{
|
|
unsigned map;
|
|
+ int tpid;
|
|
|
|
if (!pid) {
|
|
strcpy(comm, "<idle>");
|
|
@@ -2446,16 +2435,16 @@ static void __trace_find_cmdline(int pid, char comm[])
|
|
return;
|
|
}
|
|
|
|
- if (pid > PID_MAX_DEFAULT) {
|
|
- strcpy(comm, "<...>");
|
|
- return;
|
|
+ tpid = pid & (PID_MAX_DEFAULT - 1);
|
|
+ map = savedcmd->map_pid_to_cmdline[tpid];
|
|
+ if (map != NO_CMDLINE_MAP) {
|
|
+ tpid = savedcmd->map_cmdline_to_pid[map];
|
|
+ if (tpid == pid) {
|
|
+ strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
|
|
+ return;
|
|
+ }
|
|
}
|
|
-
|
|
- map = savedcmd->map_pid_to_cmdline[pid];
|
|
- if (map != NO_CMDLINE_MAP)
|
|
- strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
|
|
- else
|
|
- strcpy(comm, "<...>");
|
|
+ strcpy(comm, "<...>");
|
|
}
|
|
|
|
void trace_find_cmdline(int pid, char comm[])
|
|
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
|
|
index aaf6793ededaa..c1637f90c8a38 100644
|
|
--- a/kernel/trace/trace_clock.c
|
|
+++ b/kernel/trace/trace_clock.c
|
|
@@ -95,33 +95,49 @@ u64 notrace trace_clock_global(void)
|
|
{
|
|
unsigned long flags;
|
|
int this_cpu;
|
|
- u64 now;
|
|
+ u64 now, prev_time;
|
|
|
|
raw_local_irq_save(flags);
|
|
|
|
this_cpu = raw_smp_processor_id();
|
|
- now = sched_clock_cpu(this_cpu);
|
|
+
|
|
/*
|
|
- * If in an NMI context then dont risk lockups and return the
|
|
- * cpu_clock() time:
|
|
+ * The global clock "guarantees" that the events are ordered
|
|
+ * between CPUs. But if two events on two different CPUS call
|
|
+ * trace_clock_global at roughly the same time, it really does
|
|
+ * not matter which one gets the earlier time. Just make sure
|
|
+ * that the same CPU will always show a monotonic clock.
|
|
+ *
|
|
+ * Use a read memory barrier to get the latest written
|
|
+ * time that was recorded.
|
|
*/
|
|
- if (unlikely(in_nmi()))
|
|
- goto out;
|
|
+ smp_rmb();
|
|
+ prev_time = READ_ONCE(trace_clock_struct.prev_time);
|
|
+ now = sched_clock_cpu(this_cpu);
|
|
|
|
- arch_spin_lock(&trace_clock_struct.lock);
|
|
+ /* Make sure that now is always greater than prev_time */
|
|
+ if ((s64)(now - prev_time) < 0)
|
|
+ now = prev_time + 1;
|
|
|
|
/*
|
|
- * TODO: if this happens often then maybe we should reset
|
|
- * my_scd->clock to prev_time+1, to make sure
|
|
- * we start ticking with the local clock from now on?
|
|
+ * If in an NMI context then dont risk lockups and simply return
|
|
+ * the current time.
|
|
*/
|
|
- if ((s64)(now - trace_clock_struct.prev_time) < 0)
|
|
- now = trace_clock_struct.prev_time + 1;
|
|
+ if (unlikely(in_nmi()))
|
|
+ goto out;
|
|
|
|
- trace_clock_struct.prev_time = now;
|
|
+ /* Tracing can cause strange recursion, always use a try lock */
|
|
+ if (arch_spin_trylock(&trace_clock_struct.lock)) {
|
|
+ /* Reread prev_time in case it was already updated */
|
|
+ prev_time = READ_ONCE(trace_clock_struct.prev_time);
|
|
+ if ((s64)(now - prev_time) < 0)
|
|
+ now = prev_time + 1;
|
|
|
|
- arch_spin_unlock(&trace_clock_struct.lock);
|
|
+ trace_clock_struct.prev_time = now;
|
|
|
|
+ /* The unlock acts as the wmb for the above rmb */
|
|
+ arch_spin_unlock(&trace_clock_struct.lock);
|
|
+ }
|
|
out:
|
|
raw_local_irq_restore(flags);
|
|
|
|
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
|
|
index c70d6347afa2b..921d0a654243c 100644
|
|
--- a/lib/dynamic_debug.c
|
|
+++ b/lib/dynamic_debug.c
|
|
@@ -396,7 +396,7 @@ static int ddebug_parse_query(char *words[], int nwords,
|
|
/* tail :$info is function or line-range */
|
|
fline = strchr(query->filename, ':');
|
|
if (!fline)
|
|
- break;
|
|
+ continue;
|
|
*fline++ = '\0';
|
|
if (isalpha(*fline) || *fline == '*' || *fline == '?') {
|
|
/* take as function name */
|
|
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
|
|
index 3b53c73580c57..455f8271fd493 100644
|
|
--- a/lib/vsprintf.c
|
|
+++ b/lib/vsprintf.c
|
|
@@ -3103,8 +3103,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
|
|
switch (*fmt) {
|
|
case 'S':
|
|
case 's':
|
|
- case 'F':
|
|
- case 'f':
|
|
case 'x':
|
|
case 'K':
|
|
case 'e':
|
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
index a723e81a5da2f..d65d4481c40c5 100644
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -764,32 +764,36 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
|
|
*/
|
|
void init_mem_debugging_and_hardening(void)
|
|
{
|
|
+ bool page_poisoning_requested = false;
|
|
+
|
|
+#ifdef CONFIG_PAGE_POISONING
|
|
+ /*
|
|
+ * Page poisoning is debug page alloc for some arches. If
|
|
+ * either of those options are enabled, enable poisoning.
|
|
+ */
|
|
+ if (page_poisoning_enabled() ||
|
|
+ (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
|
|
+ debug_pagealloc_enabled())) {
|
|
+ static_branch_enable(&_page_poisoning_enabled);
|
|
+ page_poisoning_requested = true;
|
|
+ }
|
|
+#endif
|
|
+
|
|
if (_init_on_alloc_enabled_early) {
|
|
- if (page_poisoning_enabled())
|
|
+ if (page_poisoning_requested)
|
|
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
|
|
"will take precedence over init_on_alloc\n");
|
|
else
|
|
static_branch_enable(&init_on_alloc);
|
|
}
|
|
if (_init_on_free_enabled_early) {
|
|
- if (page_poisoning_enabled())
|
|
+ if (page_poisoning_requested)
|
|
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
|
|
"will take precedence over init_on_free\n");
|
|
else
|
|
static_branch_enable(&init_on_free);
|
|
}
|
|
|
|
-#ifdef CONFIG_PAGE_POISONING
|
|
- /*
|
|
- * Page poisoning is debug page alloc for some arches. If
|
|
- * either of those options are enabled, enable poisoning.
|
|
- */
|
|
- if (page_poisoning_enabled() ||
|
|
- (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
|
|
- debug_pagealloc_enabled()))
|
|
- static_branch_enable(&_page_poisoning_enabled);
|
|
-#endif
|
|
-
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
if (!debug_pagealloc_enabled())
|
|
return;
|
|
diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h
|
|
index a6f8d03d4aaf6..830723971cf83 100644
|
|
--- a/net/bluetooth/ecdh_helper.h
|
|
+++ b/net/bluetooth/ecdh_helper.h
|
|
@@ -25,6 +25,6 @@
|
|
|
|
int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 pair_public_key[64],
|
|
u8 secret[32]);
|
|
-int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 *private_key);
|
|
+int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]);
|
|
int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]);
|
|
int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]);
|
|
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
|
|
index ca44c327baced..79641c4afee93 100644
|
|
--- a/net/ceph/auth_x.c
|
|
+++ b/net/ceph/auth_x.c
|
|
@@ -526,7 +526,7 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
- auth->struct_v = 2; /* nautilus+ */
|
|
+ auth->struct_v = 3; /* nautilus+ */
|
|
auth->key = 0;
|
|
for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
|
|
auth->key ^= *(__le64 *)u;
|
|
diff --git a/net/ceph/decode.c b/net/ceph/decode.c
|
|
index b44f7651be04b..bc109a1a4616f 100644
|
|
--- a/net/ceph/decode.c
|
|
+++ b/net/ceph/decode.c
|
|
@@ -4,6 +4,7 @@
|
|
#include <linux/inet.h>
|
|
|
|
#include <linux/ceph/decode.h>
|
|
+#include <linux/ceph/messenger.h> /* for ceph_pr_addr() */
|
|
|
|
static int
|
|
ceph_decode_entity_addr_versioned(void **p, void *end,
|
|
@@ -110,6 +111,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
|
|
}
|
|
|
|
ceph_decode_32_safe(p, end, addr_cnt, e_inval);
|
|
+ dout("%s addr_cnt %d\n", __func__, addr_cnt);
|
|
|
|
found = false;
|
|
for (i = 0; i < addr_cnt; i++) {
|
|
@@ -117,6 +119,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ dout("%s i %d addr %s\n", __func__, i, ceph_pr_addr(&tmp_addr));
|
|
if (tmp_addr.type == my_type) {
|
|
if (found) {
|
|
pr_err("another match of type %d in addrvec\n",
|
|
@@ -128,13 +131,18 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
|
|
found = true;
|
|
}
|
|
}
|
|
- if (!found && addr_cnt != 0) {
|
|
- pr_err("no match of type %d in addrvec\n",
|
|
- le32_to_cpu(my_type));
|
|
- return -ENOENT;
|
|
- }
|
|
|
|
- return 0;
|
|
+ if (found)
|
|
+ return 0;
|
|
+
|
|
+ if (!addr_cnt)
|
|
+ return 0; /* normal -- e.g. unused OSD id/slot */
|
|
+
|
|
+ if (addr_cnt == 1 && !memchr_inv(&tmp_addr, 0, sizeof(tmp_addr)))
|
|
+ return 0; /* weird but effectively the same as !addr_cnt */
|
|
+
|
|
+ pr_err("no match of type %d in addrvec\n", le32_to_cpu(my_type));
|
|
+ return -ENOENT;
|
|
|
|
e_inval:
|
|
return -EINVAL;
|
|
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
|
|
index e8902a7e60f24..fc487f9812fc5 100644
|
|
--- a/net/openvswitch/actions.c
|
|
+++ b/net/openvswitch/actions.c
|
|
@@ -827,17 +827,17 @@ static void ovs_fragment(struct net *net, struct vport *vport,
|
|
}
|
|
|
|
if (key->eth.type == htons(ETH_P_IP)) {
|
|
- struct dst_entry ovs_dst;
|
|
+ struct rtable ovs_rt = { 0 };
|
|
unsigned long orig_dst;
|
|
|
|
prepare_frag(vport, skb, orig_network_offset,
|
|
ovs_key_mac_proto(key));
|
|
- dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
|
|
+ dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
|
|
DST_OBSOLETE_NONE, DST_NOCOUNT);
|
|
- ovs_dst.dev = vport->dev;
|
|
+ ovs_rt.dst.dev = vport->dev;
|
|
|
|
orig_dst = skb->_skb_refdst;
|
|
- skb_dst_set_noref(skb, &ovs_dst);
|
|
+ skb_dst_set_noref(skb, &ovs_rt.dst);
|
|
IPCB(skb)->frag_max_size = mru;
|
|
|
|
ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
|
|
diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
|
|
index e1e77d3fb6c02..8c06381391d6f 100644
|
|
--- a/net/sched/sch_frag.c
|
|
+++ b/net/sched/sch_frag.c
|
|
@@ -90,16 +90,16 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
|
|
}
|
|
|
|
if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
|
|
- struct dst_entry sch_frag_dst;
|
|
+ struct rtable sch_frag_rt = { 0 };
|
|
unsigned long orig_dst;
|
|
|
|
sch_frag_prepare_frag(skb, xmit);
|
|
- dst_init(&sch_frag_dst, &sch_frag_dst_ops, NULL, 1,
|
|
+ dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
|
|
DST_OBSOLETE_NONE, DST_NOCOUNT);
|
|
- sch_frag_dst.dev = skb->dev;
|
|
+ sch_frag_rt.dst.dev = skb->dev;
|
|
|
|
orig_dst = skb->_skb_refdst;
|
|
- skb_dst_set_noref(skb, &sch_frag_dst);
|
|
+ skb_dst_set_noref(skb, &sch_frag_rt.dst);
|
|
IPCB(skb)->frag_max_size = mru;
|
|
|
|
ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
|
|
diff --git a/security/commoncap.c b/security/commoncap.c
|
|
index 26c1cb725dcbe..2bdeacd32e3fc 100644
|
|
--- a/security/commoncap.c
|
|
+++ b/security/commoncap.c
|
|
@@ -391,7 +391,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
|
|
&tmpbuf, size, GFP_NOFS);
|
|
dput(dentry);
|
|
|
|
- if (ret < 0)
|
|
+ if (ret < 0 || !tmpbuf)
|
|
return ret;
|
|
|
|
fs_ns = inode->i_sb->s_user_ns;
|
|
diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
|
|
index 0aa545ac6e60c..1c90421a88dcd 100644
|
|
--- a/sound/isa/sb/emu8000.c
|
|
+++ b/sound/isa/sb/emu8000.c
|
|
@@ -1029,8 +1029,10 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
|
|
|
|
memset(emu->controls, 0, sizeof(emu->controls));
|
|
for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
|
|
- if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0)
|
|
+ if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) {
|
|
+ emu->controls[i] = NULL;
|
|
goto __error;
|
|
+ }
|
|
}
|
|
return 0;
|
|
|
|
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
|
|
index 270af863e198b..1528e04a4d28e 100644
|
|
--- a/sound/isa/sb/sb16_csp.c
|
|
+++ b/sound/isa/sb/sb16_csp.c
|
|
@@ -1045,10 +1045,14 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p)
|
|
|
|
spin_lock_init(&p->q_lock);
|
|
|
|
- if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0)
|
|
+ if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) {
|
|
+ p->qsound_switch = NULL;
|
|
goto __error;
|
|
- if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0)
|
|
+ }
|
|
+ if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) {
|
|
+ p->qsound_space = NULL;
|
|
goto __error;
|
|
+ }
|
|
|
|
return 0;
|
|
|
|
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
|
|
index 7aa9062f4f838..8098088b00568 100644
|
|
--- a/sound/pci/hda/patch_conexant.c
|
|
+++ b/sound/pci/hda/patch_conexant.c
|
|
@@ -930,18 +930,18 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
|
|
- SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
|
|
- SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
|
|
- SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
|
|
- SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
|
|
- SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
|
|
SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
|
|
SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
|
|
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
|
|
SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
|
|
- SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
|
|
- SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
|
|
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
|
|
+ SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
|
|
+ SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
|
|
+ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
|
|
+ SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
|
|
+ SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
|
|
SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index a7544b77d3f7c..d05d16ddbdf2c 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -2552,8 +2552,10 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
|
SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
|
SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
|
+ SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
|
SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
|
SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
|
+ SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
|
SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
|
SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
|
|
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
|
|
@@ -4438,6 +4440,25 @@ static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
|
|
alc236_fixup_hp_coef_micmute_led(codec, fix, action);
|
|
}
|
|
|
|
+static void alc236_fixup_hp_micmute_led_vref(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix, int action)
|
|
+{
|
|
+ struct alc_spec *spec = codec->spec;
|
|
+
|
|
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
|
|
+ spec->cap_mute_led_nid = 0x1a;
|
|
+ snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set);
|
|
+ codec->power_filter = led_power_filter;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix, int action)
|
|
+{
|
|
+ alc236_fixup_hp_mute_led_coefbit(codec, fix, action);
|
|
+ alc236_fixup_hp_micmute_led_vref(codec, fix, action);
|
|
+}
|
|
+
|
|
#if IS_REACHABLE(CONFIG_INPUT)
|
|
static void gpio2_mic_hotkey_event(struct hda_codec *codec,
|
|
struct hda_jack_callback *event)
|
|
@@ -6400,6 +6421,7 @@ enum {
|
|
ALC285_FIXUP_HP_MUTE_LED,
|
|
ALC236_FIXUP_HP_GPIO_LED,
|
|
ALC236_FIXUP_HP_MUTE_LED,
|
|
+ ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
|
|
ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
|
|
ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
|
|
ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
|
|
@@ -6415,6 +6437,8 @@ enum {
|
|
ALC269_FIXUP_LEMOTE_A1802,
|
|
ALC269_FIXUP_LEMOTE_A190X,
|
|
ALC256_FIXUP_INTEL_NUC8_RUGGED,
|
|
+ ALC233_FIXUP_INTEL_NUC8_DMIC,
|
|
+ ALC233_FIXUP_INTEL_NUC8_BOOST,
|
|
ALC256_FIXUP_INTEL_NUC10,
|
|
ALC255_FIXUP_XIAOMI_HEADSET_MIC,
|
|
ALC274_FIXUP_HP_MIC,
|
|
@@ -7136,6 +7160,16 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc233_fixup_lenovo_line2_mic_hotkey,
|
|
},
|
|
+ [ALC233_FIXUP_INTEL_NUC8_DMIC] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc_fixup_inv_dmic,
|
|
+ .chained = true,
|
|
+ .chain_id = ALC233_FIXUP_INTEL_NUC8_BOOST,
|
|
+ },
|
|
+ [ALC233_FIXUP_INTEL_NUC8_BOOST] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc269_fixup_limit_int_mic_boost
|
|
+ },
|
|
[ALC255_FIXUP_DELL_SPK_NOISE] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc_fixup_disable_aamix,
|
|
@@ -7646,6 +7680,10 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc236_fixup_hp_mute_led,
|
|
},
|
|
+ [ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc236_fixup_hp_mute_led_micmute_vref,
|
|
+ },
|
|
[ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
|
|
.type = HDA_FIXUP_VERBS,
|
|
.v.verbs = (const struct hda_verb[]) {
|
|
@@ -8051,6 +8089,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
|
|
+ SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
|
SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
|
|
SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
|
|
@@ -8063,6 +8103,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
|
|
+ SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
|
|
SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
|
|
SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
|
|
@@ -8113,6 +8154,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
|
|
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
|
|
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
|
|
SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
|
|
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
|
|
@@ -8279,6 +8321,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
|
|
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
|
|
+ SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
|
|
SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
|
|
SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
|
|
|
|
@@ -8733,12 +8776,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
|
{0x12, 0x90a60130},
|
|
{0x19, 0x03a11020},
|
|
{0x21, 0x0321101f}),
|
|
- SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
|
|
- {0x14, 0x90170110},
|
|
- {0x19, 0x04a11040},
|
|
- {0x21, 0x04211020}),
|
|
SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
|
|
- {0x12, 0x90a60130},
|
|
{0x14, 0x90170110},
|
|
{0x19, 0x04a11040},
|
|
{0x21, 0x04211020}),
|
|
@@ -8909,6 +8947,10 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
|
|
SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
|
|
{0x19, 0x40000000},
|
|
{0x1a, 0x40000000}),
|
|
+ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
|
|
+ {0x14, 0x90170110},
|
|
+ {0x19, 0x04a11040},
|
|
+ {0x21, 0x04211020}),
|
|
{}
|
|
};
|
|
|
|
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
|
|
index 771b652329571..a2901b6ee1baa 100644
|
|
--- a/sound/usb/clock.c
|
|
+++ b/sound/usb/clock.c
|
|
@@ -296,7 +296,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
|
|
|
|
selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id);
|
|
if (selector) {
|
|
- int ret, i, cur;
|
|
+ int ret, i, cur, err;
|
|
|
|
/* the entity ID we are looking for is a selector.
|
|
* find out what it currently selects */
|
|
@@ -318,13 +318,17 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
|
|
ret = __uac_clock_find_source(chip, fmt,
|
|
selector->baCSourceID[ret - 1],
|
|
visited, validate);
|
|
+ if (ret > 0) {
|
|
+ err = uac_clock_selector_set_val(chip, entity_id, cur);
|
|
+ if (err < 0)
|
|
+ return err;
|
|
+ }
|
|
+
|
|
if (!validate || ret > 0 || !chip->autoclock)
|
|
return ret;
|
|
|
|
/* The current clock source is invalid, try others. */
|
|
for (i = 1; i <= selector->bNrInPins; i++) {
|
|
- int err;
|
|
-
|
|
if (i == cur)
|
|
continue;
|
|
|
|
@@ -390,7 +394,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
|
|
|
|
selector = snd_usb_find_clock_selector_v3(chip->ctrl_intf, entity_id);
|
|
if (selector) {
|
|
- int ret, i, cur;
|
|
+ int ret, i, cur, err;
|
|
|
|
/* the entity ID we are looking for is a selector.
|
|
* find out what it currently selects */
|
|
@@ -412,6 +416,12 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
|
|
ret = __uac3_clock_find_source(chip, fmt,
|
|
selector->baCSourceID[ret - 1],
|
|
visited, validate);
|
|
+ if (ret > 0) {
|
|
+ err = uac_clock_selector_set_val(chip, entity_id, cur);
|
|
+ if (err < 0)
|
|
+ return err;
|
|
+ }
|
|
+
|
|
if (!validate || ret > 0 || !chip->autoclock)
|
|
return ret;
|
|
|
|
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
|
|
index 646deb6244b15..c5794e83fd800 100644
|
|
--- a/sound/usb/mixer_maps.c
|
|
+++ b/sound/usb/mixer_maps.c
|
|
@@ -337,6 +337,13 @@ static const struct usbmix_name_map bose_companion5_map[] = {
|
|
{ 0 } /* terminator */
|
|
};
|
|
|
|
+/* Sennheiser Communications Headset [PC 8], the dB value is reported as -6 negative maximum */
|
|
+static const struct usbmix_dB_map sennheiser_pc8_dB = {-9500, 0};
|
|
+static const struct usbmix_name_map sennheiser_pc8_map[] = {
|
|
+ { 9, NULL, .dB = &sennheiser_pc8_dB },
|
|
+ { 0 } /* terminator */
|
|
+};
|
|
+
|
|
/*
|
|
* Dell usb dock with ALC4020 codec had a firmware problem where it got
|
|
* screwed up when zero volume is passed; just skip it as a workaround
|
|
@@ -593,6 +600,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
|
|
.id = USB_ID(0x17aa, 0x1046),
|
|
.map = lenovo_p620_rear_map,
|
|
},
|
|
+ {
|
|
+ /* Sennheiser Communications Headset [PC 8] */
|
|
+ .id = USB_ID(0x1395, 0x0025),
|
|
+ .map = sennheiser_pc8_map,
|
|
+ },
|
|
{ 0 } /* terminator */
|
|
};
|
|
|
|
diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
|
|
index e105fece47b61..f32ce0362eb7b 100644
|
|
--- a/tools/power/x86/intel-speed-select/isst-display.c
|
|
+++ b/tools/power/x86/intel-speed-select/isst-display.c
|
|
@@ -25,10 +25,14 @@ static void printcpulist(int str_len, char *str, int mask_size,
|
|
index = snprintf(&str[curr_index],
|
|
str_len - curr_index, ",");
|
|
curr_index += index;
|
|
+ if (curr_index >= str_len)
|
|
+ break;
|
|
}
|
|
index = snprintf(&str[curr_index], str_len - curr_index, "%d",
|
|
i);
|
|
curr_index += index;
|
|
+ if (curr_index >= str_len)
|
|
+ break;
|
|
first = 0;
|
|
}
|
|
}
|
|
@@ -64,10 +68,14 @@ static void printcpumask(int str_len, char *str, int mask_size,
|
|
index = snprintf(&str[curr_index], str_len - curr_index, "%08x",
|
|
mask[i]);
|
|
curr_index += index;
|
|
+ if (curr_index >= str_len)
|
|
+ break;
|
|
if (i) {
|
|
strncat(&str[curr_index], ",", str_len - curr_index);
|
|
curr_index++;
|
|
}
|
|
+ if (curr_index >= str_len)
|
|
+ break;
|
|
}
|
|
|
|
free(mask);
|
|
@@ -185,7 +193,7 @@ static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
|
|
int disp_level)
|
|
{
|
|
char header[256];
|
|
- char value[256];
|
|
+ char value[512];
|
|
|
|
snprintf(header, sizeof(header), "speed-select-base-freq-properties");
|
|
format_and_print(outf, disp_level, header, NULL);
|
|
@@ -349,7 +357,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
|
|
struct isst_pkg_ctdp *pkg_dev)
|
|
{
|
|
char header[256];
|
|
- char value[256];
|
|
+ char value[512];
|
|
static int level;
|
|
int i;
|
|
|
|
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
|
|
index a7c4f0772e534..490c9a496fe28 100644
|
|
--- a/tools/power/x86/turbostat/turbostat.c
|
|
+++ b/tools/power/x86/turbostat/turbostat.c
|
|
@@ -291,13 +291,16 @@ struct msr_sum_array {
|
|
/* The percpu MSR sum array.*/
|
|
struct msr_sum_array *per_cpu_msr_sum;
|
|
|
|
-int idx_to_offset(int idx)
|
|
+off_t idx_to_offset(int idx)
|
|
{
|
|
- int offset;
|
|
+ off_t offset;
|
|
|
|
switch (idx) {
|
|
case IDX_PKG_ENERGY:
|
|
- offset = MSR_PKG_ENERGY_STATUS;
|
|
+ if (do_rapl & RAPL_AMD_F17H)
|
|
+ offset = MSR_PKG_ENERGY_STAT;
|
|
+ else
|
|
+ offset = MSR_PKG_ENERGY_STATUS;
|
|
break;
|
|
case IDX_DRAM_ENERGY:
|
|
offset = MSR_DRAM_ENERGY_STATUS;
|
|
@@ -320,12 +323,13 @@ int idx_to_offset(int idx)
|
|
return offset;
|
|
}
|
|
|
|
-int offset_to_idx(int offset)
|
|
+int offset_to_idx(off_t offset)
|
|
{
|
|
int idx;
|
|
|
|
switch (offset) {
|
|
case MSR_PKG_ENERGY_STATUS:
|
|
+ case MSR_PKG_ENERGY_STAT:
|
|
idx = IDX_PKG_ENERGY;
|
|
break;
|
|
case MSR_DRAM_ENERGY_STATUS:
|
|
@@ -353,7 +357,7 @@ int idx_valid(int idx)
|
|
{
|
|
switch (idx) {
|
|
case IDX_PKG_ENERGY:
|
|
- return do_rapl & RAPL_PKG;
|
|
+ return do_rapl & (RAPL_PKG | RAPL_AMD_F17H);
|
|
case IDX_DRAM_ENERGY:
|
|
return do_rapl & RAPL_DRAM;
|
|
case IDX_PP0_ENERGY:
|
|
@@ -3272,7 +3276,7 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
|
|
|
|
for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) {
|
|
unsigned long long msr_cur, msr_last;
|
|
- int offset;
|
|
+ off_t offset;
|
|
|
|
if (!idx_valid(i))
|
|
continue;
|
|
@@ -3281,7 +3285,8 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
|
|
continue;
|
|
ret = get_msr(cpu, offset, &msr_cur);
|
|
if (ret) {
|
|
- fprintf(outf, "Can not update msr(0x%x)\n", offset);
|
|
+ fprintf(outf, "Can not update msr(0x%llx)\n",
|
|
+ (unsigned long long)offset);
|
|
continue;
|
|
}
|
|
|
|
diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile
|
|
index 0b3af552632a6..df15d44aeb8d4 100644
|
|
--- a/tools/testing/selftests/arm64/mte/Makefile
|
|
+++ b/tools/testing/selftests/arm64/mte/Makefile
|
|
@@ -6,9 +6,7 @@ SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
|
|
PROGS := $(patsubst %.c,%,$(SRCS))
|
|
|
|
#Add mte compiler option
|
|
-ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
|
|
CFLAGS += -march=armv8.5-a+memtag
|
|
-endif
|
|
|
|
#check if the compiler works well
|
|
mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
|
|
diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
|
|
index 39f8908988eab..70665ba88cbb1 100644
|
|
--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
|
|
+++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
|
|
@@ -278,22 +278,13 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
|
|
return 0;
|
|
}
|
|
|
|
-#define ID_AA64PFR1_MTE_SHIFT 8
|
|
-#define ID_AA64PFR1_MTE 2
|
|
-
|
|
int mte_default_setup(void)
|
|
{
|
|
- unsigned long hwcaps = getauxval(AT_HWCAP);
|
|
+ unsigned long hwcaps2 = getauxval(AT_HWCAP2);
|
|
unsigned long en = 0;
|
|
int ret;
|
|
|
|
- if (!(hwcaps & HWCAP_CPUID)) {
|
|
- ksft_print_msg("FAIL: CPUID registers unavailable\n");
|
|
- return KSFT_FAIL;
|
|
- }
|
|
- /* Read ID_AA64PFR1_EL1 register */
|
|
- asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
|
|
- if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
|
|
+ if (!(hwcaps2 & HWCAP2_MTE)) {
|
|
ksft_print_msg("FAIL: MTE features unavailable\n");
|
|
return KSFT_SKIP;
|
|
}
|
|
diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
|
|
index d585cc1948cc7..6bcee2ec91a9c 100644
|
|
--- a/tools/testing/selftests/resctrl/Makefile
|
|
+++ b/tools/testing/selftests/resctrl/Makefile
|
|
@@ -1,5 +1,5 @@
|
|
CC = $(CROSS_COMPILE)gcc
|
|
-CFLAGS = -g -Wall
|
|
+CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
|
|
SRCS=$(wildcard *.c)
|
|
OBJS=$(SRCS:.c=.o)
|
|
|
|
diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
|
|
index 38dbf4962e333..5922cc1b03867 100644
|
|
--- a/tools/testing/selftests/resctrl/cache.c
|
|
+++ b/tools/testing/selftests/resctrl/cache.c
|
|
@@ -182,7 +182,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
|
|
/*
|
|
* Measure cache miss from perf.
|
|
*/
|
|
- if (!strcmp(param->resctrl_val, "cat")) {
|
|
+ if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
|
|
ret = get_llc_perf(&llc_perf_miss);
|
|
if (ret < 0)
|
|
return ret;
|
|
@@ -192,7 +192,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
|
|
/*
|
|
* Measure llc occupancy from resctrl.
|
|
*/
|
|
- if (!strcmp(param->resctrl_val, "cqm")) {
|
|
+ if (!strncmp(param->resctrl_val, CQM_STR, sizeof(CQM_STR))) {
|
|
ret = get_llc_occu_resctrl(&llc_occu_resc);
|
|
if (ret < 0)
|
|
return ret;
|
|
@@ -234,7 +234,7 @@ int cat_val(struct resctrl_val_param *param)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- if ((strcmp(resctrl_val, "cat") == 0)) {
|
|
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
|
|
ret = initialize_llc_perf();
|
|
if (ret)
|
|
return ret;
|
|
@@ -242,7 +242,7 @@ int cat_val(struct resctrl_val_param *param)
|
|
|
|
/* Test runs until the callback setup() tells the test to stop. */
|
|
while (1) {
|
|
- if (strcmp(resctrl_val, "cat") == 0) {
|
|
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
|
|
ret = param->setup(1, param);
|
|
if (ret) {
|
|
ret = 0;
|
|
diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
|
|
index 5da43767b9731..20823725daca5 100644
|
|
--- a/tools/testing/selftests/resctrl/cat_test.c
|
|
+++ b/tools/testing/selftests/resctrl/cat_test.c
|
|
@@ -17,10 +17,10 @@
|
|
#define MAX_DIFF_PERCENT 4
|
|
#define MAX_DIFF 1000000
|
|
|
|
-int count_of_bits;
|
|
-char cbm_mask[256];
|
|
-unsigned long long_mask;
|
|
-unsigned long cache_size;
|
|
+static int count_of_bits;
|
|
+static char cbm_mask[256];
|
|
+static unsigned long long_mask;
|
|
+static unsigned long cache_size;
|
|
|
|
/*
|
|
* Change schemata. Write schemata to specified
|
|
@@ -136,7 +136,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
|
|
return -1;
|
|
|
|
/* Get default cbm mask for L3/L2 cache */
|
|
- ret = get_cbm_mask(cache_type);
|
|
+ ret = get_cbm_mask(cache_type, cbm_mask);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -164,7 +164,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
|
|
return -1;
|
|
|
|
struct resctrl_val_param param = {
|
|
- .resctrl_val = "cat",
|
|
+ .resctrl_val = CAT_STR,
|
|
.cpu_no = cpu_no,
|
|
.mum_resctrlfs = 0,
|
|
.setup = cat_setup,
|
|
diff --git a/tools/testing/selftests/resctrl/cqm_test.c b/tools/testing/selftests/resctrl/cqm_test.c
|
|
index c8756152bd615..271752e9ef5be 100644
|
|
--- a/tools/testing/selftests/resctrl/cqm_test.c
|
|
+++ b/tools/testing/selftests/resctrl/cqm_test.c
|
|
@@ -16,10 +16,10 @@
|
|
#define MAX_DIFF 2000000
|
|
#define MAX_DIFF_PERCENT 15
|
|
|
|
-int count_of_bits;
|
|
-char cbm_mask[256];
|
|
-unsigned long long_mask;
|
|
-unsigned long cache_size;
|
|
+static int count_of_bits;
|
|
+static char cbm_mask[256];
|
|
+static unsigned long long_mask;
|
|
+static unsigned long cache_size;
|
|
|
|
static int cqm_setup(int num, ...)
|
|
{
|
|
@@ -86,7 +86,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
|
|
return errno;
|
|
}
|
|
|
|
- while (fgets(temp, 1024, fp)) {
|
|
+ while (fgets(temp, sizeof(temp), fp)) {
|
|
char *token = strtok(temp, ":\t");
|
|
int fields = 0;
|
|
|
|
@@ -125,7 +125,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
|
|
if (!validate_resctrl_feature_request("cqm"))
|
|
return -1;
|
|
|
|
- ret = get_cbm_mask("L3");
|
|
+ ret = get_cbm_mask("L3", cbm_mask);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -145,7 +145,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
|
|
}
|
|
|
|
struct resctrl_val_param param = {
|
|
- .resctrl_val = "cqm",
|
|
+ .resctrl_val = CQM_STR,
|
|
.ctrlgrp = "c1",
|
|
.mongrp = "m1",
|
|
.cpu_no = cpu_no,
|
|
diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
|
|
index 79c611c99a3dd..51e5cf22632f7 100644
|
|
--- a/tools/testing/selftests/resctrl/fill_buf.c
|
|
+++ b/tools/testing/selftests/resctrl/fill_buf.c
|
|
@@ -115,7 +115,7 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
|
|
|
|
while (1) {
|
|
ret = fill_one_span_read(start_ptr, end_ptr);
|
|
- if (!strcmp(resctrl_val, "cat"))
|
|
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
|
|
break;
|
|
}
|
|
|
|
@@ -134,7 +134,7 @@ static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
|
|
{
|
|
while (1) {
|
|
fill_one_span_write(start_ptr, end_ptr);
|
|
- if (!strcmp(resctrl_val, "cat"))
|
|
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
|
|
break;
|
|
}
|
|
|
|
diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
|
|
index 7bf8eaa6204bf..6449fbd96096a 100644
|
|
--- a/tools/testing/selftests/resctrl/mba_test.c
|
|
+++ b/tools/testing/selftests/resctrl/mba_test.c
|
|
@@ -141,7 +141,7 @@ void mba_test_cleanup(void)
|
|
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
|
|
{
|
|
struct resctrl_val_param param = {
|
|
- .resctrl_val = "mba",
|
|
+ .resctrl_val = MBA_STR,
|
|
.ctrlgrp = "c1",
|
|
.mongrp = "m1",
|
|
.cpu_no = cpu_no,
|
|
diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
|
|
index 4700f7453f811..ec6cfe01c9c26 100644
|
|
--- a/tools/testing/selftests/resctrl/mbm_test.c
|
|
+++ b/tools/testing/selftests/resctrl/mbm_test.c
|
|
@@ -114,7 +114,7 @@ void mbm_test_cleanup(void)
|
|
int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
|
|
{
|
|
struct resctrl_val_param param = {
|
|
- .resctrl_val = "mbm",
|
|
+ .resctrl_val = MBM_STR,
|
|
.ctrlgrp = "c1",
|
|
.mongrp = "m1",
|
|
.span = span,
|
|
diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
|
|
index 39bf59c6b9c56..9dcc96e1ad3d7 100644
|
|
--- a/tools/testing/selftests/resctrl/resctrl.h
|
|
+++ b/tools/testing/selftests/resctrl/resctrl.h
|
|
@@ -28,6 +28,10 @@
|
|
#define RESCTRL_PATH "/sys/fs/resctrl"
|
|
#define PHYS_ID_PATH "/sys/devices/system/cpu/cpu"
|
|
#define CBM_MASK_PATH "/sys/fs/resctrl/info"
|
|
+#define L3_PATH "/sys/fs/resctrl/info/L3"
|
|
+#define MB_PATH "/sys/fs/resctrl/info/MB"
|
|
+#define L3_MON_PATH "/sys/fs/resctrl/info/L3_MON"
|
|
+#define L3_MON_FEATURES_PATH "/sys/fs/resctrl/info/L3_MON/mon_features"
|
|
|
|
#define PARENT_EXIT(err_msg) \
|
|
do { \
|
|
@@ -62,11 +66,16 @@ struct resctrl_val_param {
|
|
int (*setup)(int num, ...);
|
|
};
|
|
|
|
-pid_t bm_pid, ppid;
|
|
-int tests_run;
|
|
+#define MBM_STR "mbm"
|
|
+#define MBA_STR "mba"
|
|
+#define CQM_STR "cqm"
|
|
+#define CAT_STR "cat"
|
|
|
|
-char llc_occup_path[1024];
|
|
-bool is_amd;
|
|
+extern pid_t bm_pid, ppid;
|
|
+extern int tests_run;
|
|
+
|
|
+extern char llc_occup_path[1024];
|
|
+extern bool is_amd;
|
|
|
|
bool check_resctrlfs_support(void);
|
|
int filter_dmesg(void);
|
|
@@ -74,7 +83,7 @@ int remount_resctrlfs(bool mum_resctrlfs);
|
|
int get_resource_id(int cpu_no, int *resource_id);
|
|
int umount_resctrlfs(void);
|
|
int validate_bw_report_request(char *bw_report);
|
|
-bool validate_resctrl_feature_request(char *resctrl_val);
|
|
+bool validate_resctrl_feature_request(const char *resctrl_val);
|
|
char *fgrep(FILE *inf, const char *str);
|
|
int taskset_benchmark(pid_t bm_pid, int cpu_no);
|
|
void run_benchmark(int signum, siginfo_t *info, void *ucontext);
|
|
@@ -92,7 +101,7 @@ void tests_cleanup(void);
|
|
void mbm_test_cleanup(void);
|
|
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
|
|
void mba_test_cleanup(void);
|
|
-int get_cbm_mask(char *cache_type);
|
|
+int get_cbm_mask(char *cache_type, char *cbm_mask);
|
|
int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
|
|
void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
|
|
int cat_val(struct resctrl_val_param *param);
|
|
diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
|
|
index 425cc85ac8836..ac2269610aa9d 100644
|
|
--- a/tools/testing/selftests/resctrl/resctrl_tests.c
|
|
+++ b/tools/testing/selftests/resctrl/resctrl_tests.c
|
|
@@ -73,7 +73,7 @@ int main(int argc, char **argv)
|
|
}
|
|
}
|
|
|
|
- while ((c = getopt(argc_new, argv, "ht:b:")) != -1) {
|
|
+ while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
|
|
char *token;
|
|
|
|
switch (c) {
|
|
@@ -85,13 +85,13 @@ int main(int argc, char **argv)
|
|
cqm_test = false;
|
|
cat_test = false;
|
|
while (token) {
|
|
- if (!strcmp(token, "mbm")) {
|
|
+ if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
|
|
mbm_test = true;
|
|
- } else if (!strcmp(token, "mba")) {
|
|
+ } else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
|
|
mba_test = true;
|
|
- } else if (!strcmp(token, "cqm")) {
|
|
+ } else if (!strncmp(token, CQM_STR, sizeof(CQM_STR))) {
|
|
cqm_test = true;
|
|
- } else if (!strcmp(token, "cat")) {
|
|
+ } else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
|
|
cat_test = true;
|
|
} else {
|
|
printf("invalid argument\n");
|
|
@@ -161,7 +161,7 @@ int main(int argc, char **argv)
|
|
if (!is_amd && mbm_test) {
|
|
printf("# Starting MBM BW change ...\n");
|
|
if (!has_ben)
|
|
- sprintf(benchmark_cmd[5], "%s", "mba");
|
|
+ sprintf(benchmark_cmd[5], "%s", MBA_STR);
|
|
res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
|
|
printf("%sok MBM: bw change\n", res ? "not " : "");
|
|
mbm_test_cleanup();
|
|
@@ -181,7 +181,7 @@ int main(int argc, char **argv)
|
|
if (cqm_test) {
|
|
printf("# Starting CQM test ...\n");
|
|
if (!has_ben)
|
|
- sprintf(benchmark_cmd[5], "%s", "cqm");
|
|
+ sprintf(benchmark_cmd[5], "%s", CQM_STR);
|
|
res = cqm_resctrl_val(cpu_no, no_of_bits, benchmark_cmd);
|
|
printf("%sok CQM: test\n", res ? "not " : "");
|
|
cqm_test_cleanup();
|
|
diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
|
|
index 520fea3606d17..8df557894059a 100644
|
|
--- a/tools/testing/selftests/resctrl/resctrl_val.c
|
|
+++ b/tools/testing/selftests/resctrl/resctrl_val.c
|
|
@@ -221,8 +221,8 @@ static int read_from_imc_dir(char *imc_dir, int count)
|
|
*/
|
|
static int num_of_imcs(void)
|
|
{
|
|
+ char imc_dir[512], *temp;
|
|
unsigned int count = 0;
|
|
- char imc_dir[512];
|
|
struct dirent *ep;
|
|
int ret;
|
|
DIR *dp;
|
|
@@ -230,7 +230,25 @@ static int num_of_imcs(void)
|
|
dp = opendir(DYN_PMU_PATH);
|
|
if (dp) {
|
|
while ((ep = readdir(dp))) {
|
|
- if (strstr(ep->d_name, UNCORE_IMC)) {
|
|
+ temp = strstr(ep->d_name, UNCORE_IMC);
|
|
+ if (!temp)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * imc counters are named as "uncore_imc_<n>", hence
|
|
+ * increment the pointer to point to <n>. Note that
|
|
+ * sizeof(UNCORE_IMC) would count for null character as
|
|
+ * well and hence the last underscore character in
|
|
+ * uncore_imc'_' need not be counted.
|
|
+ */
|
|
+ temp = temp + sizeof(UNCORE_IMC);
|
|
+
|
|
+ /*
|
|
+ * Some directories under "DYN_PMU_PATH" could have
|
|
+ * names like "uncore_imc_free_running", hence, check if
|
|
+ * first character is a numerical digit or not.
|
|
+ */
|
|
+ if (temp[0] >= '0' && temp[0] <= '9') {
|
|
sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
|
|
ep->d_name);
|
|
ret = read_from_imc_dir(imc_dir, count);
|
|
@@ -282,9 +300,9 @@ static int initialize_mem_bw_imc(void)
|
|
* Memory B/W utilized by a process on a socket can be calculated using
|
|
* iMC counters. Perf events are used to read these counters.
|
|
*
|
|
- * Return: >= 0 on success. < 0 on failure.
|
|
+ * Return: = 0 on success. < 0 on failure.
|
|
*/
|
|
-static float get_mem_bw_imc(int cpu_no, char *bw_report)
|
|
+static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
|
|
{
|
|
float reads, writes, of_mul_read, of_mul_write;
|
|
int imc, j, ret;
|
|
@@ -355,13 +373,18 @@ static float get_mem_bw_imc(int cpu_no, char *bw_report)
|
|
close(imc_counters_config[imc][WRITE].fd);
|
|
}
|
|
|
|
- if (strcmp(bw_report, "reads") == 0)
|
|
- return reads;
|
|
+ if (strcmp(bw_report, "reads") == 0) {
|
|
+ *bw_imc = reads;
|
|
+ return 0;
|
|
+ }
|
|
|
|
- if (strcmp(bw_report, "writes") == 0)
|
|
- return writes;
|
|
+ if (strcmp(bw_report, "writes") == 0) {
|
|
+ *bw_imc = writes;
|
|
+ return 0;
|
|
+ }
|
|
|
|
- return (reads + writes);
|
|
+ *bw_imc = reads + writes;
|
|
+ return 0;
|
|
}
|
|
|
|
void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
|
|
@@ -397,10 +420,10 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
|
|
return;
|
|
}
|
|
|
|
- if (strcmp(resctrl_val, "mbm") == 0)
|
|
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
|
|
set_mbm_path(ctrlgrp, mongrp, resource_id);
|
|
|
|
- if ((strcmp(resctrl_val, "mba") == 0)) {
|
|
+ if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
|
|
if (ctrlgrp)
|
|
sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
|
|
RESCTRL_PATH, ctrlgrp, resource_id);
|
|
@@ -420,9 +443,8 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
|
|
* 1. If con_mon grp is given, then read from it
|
|
* 2. If con_mon grp is not given, then read from root con_mon grp
|
|
*/
|
|
-static unsigned long get_mem_bw_resctrl(void)
|
|
+static int get_mem_bw_resctrl(unsigned long *mbm_total)
|
|
{
|
|
- unsigned long mbm_total = 0;
|
|
FILE *fp;
|
|
|
|
fp = fopen(mbm_total_path, "r");
|
|
@@ -431,7 +453,7 @@ static unsigned long get_mem_bw_resctrl(void)
|
|
|
|
return -1;
|
|
}
|
|
- if (fscanf(fp, "%lu", &mbm_total) <= 0) {
|
|
+ if (fscanf(fp, "%lu", mbm_total) <= 0) {
|
|
perror("Could not get mbm local bytes");
|
|
fclose(fp);
|
|
|
|
@@ -439,7 +461,7 @@ static unsigned long get_mem_bw_resctrl(void)
|
|
}
|
|
fclose(fp);
|
|
|
|
- return mbm_total;
|
|
+ return 0;
|
|
}
|
|
|
|
pid_t bm_pid, ppid;
|
|
@@ -524,14 +546,15 @@ static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
|
|
return;
|
|
}
|
|
|
|
- if (strcmp(resctrl_val, "cqm") == 0)
|
|
+ if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
|
|
set_cqm_path(ctrlgrp, mongrp, resource_id);
|
|
}
|
|
|
|
static int
|
|
measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
|
|
{
|
|
- unsigned long bw_imc, bw_resc, bw_resc_end;
|
|
+ unsigned long bw_resc, bw_resc_end;
|
|
+ float bw_imc;
|
|
int ret;
|
|
|
|
/*
|
|
@@ -541,13 +564,13 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
|
|
* Compare the two values to validate resctrl value.
|
|
* It takes 1sec to measure the data.
|
|
*/
|
|
- bw_imc = get_mem_bw_imc(param->cpu_no, param->bw_report);
|
|
- if (bw_imc <= 0)
|
|
- return bw_imc;
|
|
+ ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
|
|
- bw_resc_end = get_mem_bw_resctrl();
|
|
- if (bw_resc_end <= 0)
|
|
- return bw_resc_end;
|
|
+ ret = get_mem_bw_resctrl(&bw_resc_end);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
|
|
bw_resc = (bw_resc_end - *bw_resc_start) / MB;
|
|
ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
|
|
@@ -579,8 +602,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
|
|
if (strcmp(param->filename, "") == 0)
|
|
sprintf(param->filename, "stdio");
|
|
|
|
- if ((strcmp(resctrl_val, "mba")) == 0 ||
|
|
- (strcmp(resctrl_val, "mbm")) == 0) {
|
|
+ if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
|
|
+ !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
|
|
ret = validate_bw_report_request(param->bw_report);
|
|
if (ret)
|
|
return ret;
|
|
@@ -674,15 +697,15 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
|
|
if (ret)
|
|
goto out;
|
|
|
|
- if ((strcmp(resctrl_val, "mbm") == 0) ||
|
|
- (strcmp(resctrl_val, "mba") == 0)) {
|
|
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
|
|
+ !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
|
|
ret = initialize_mem_bw_imc();
|
|
if (ret)
|
|
goto out;
|
|
|
|
initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
|
|
param->cpu_no, resctrl_val);
|
|
- } else if (strcmp(resctrl_val, "cqm") == 0)
|
|
+ } else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
|
|
initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
|
|
param->cpu_no, resctrl_val);
|
|
|
|
@@ -710,8 +733,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
|
|
|
|
/* Test runs until the callback setup() tells the test to stop. */
|
|
while (1) {
|
|
- if ((strcmp(resctrl_val, "mbm") == 0) ||
|
|
- (strcmp(resctrl_val, "mba") == 0)) {
|
|
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
|
|
+ !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
|
|
ret = param->setup(1, param);
|
|
if (ret) {
|
|
ret = 0;
|
|
@@ -721,7 +744,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
|
|
ret = measure_vals(param, &bw_resc_start);
|
|
if (ret)
|
|
break;
|
|
- } else if (strcmp(resctrl_val, "cqm") == 0) {
|
|
+ } else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR))) {
|
|
ret = param->setup(1, param);
|
|
if (ret) {
|
|
ret = 0;
|
|
diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
|
|
index 19c0ec4045a40..b57170f53861d 100644
|
|
--- a/tools/testing/selftests/resctrl/resctrlfs.c
|
|
+++ b/tools/testing/selftests/resctrl/resctrlfs.c
|
|
@@ -49,8 +49,6 @@ static int find_resctrl_mount(char *buffer)
|
|
return -ENOENT;
|
|
}
|
|
|
|
-char cbm_mask[256];
|
|
-
|
|
/*
|
|
* remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
|
|
* @mum_resctrlfs: Should the resctrl FS be remounted?
|
|
@@ -205,16 +203,18 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
|
|
/*
|
|
* get_cbm_mask - Get cbm mask for given cache
|
|
* @cache_type: Cache level L2/L3
|
|
- *
|
|
- * Mask is stored in cbm_mask which is global variable.
|
|
+ * @cbm_mask: cbm_mask returned as a string
|
|
*
|
|
* Return: = 0 on success, < 0 on failure.
|
|
*/
|
|
-int get_cbm_mask(char *cache_type)
|
|
+int get_cbm_mask(char *cache_type, char *cbm_mask)
|
|
{
|
|
char cbm_mask_path[1024];
|
|
FILE *fp;
|
|
|
|
+ if (!cbm_mask)
|
|
+ return -1;
|
|
+
|
|
sprintf(cbm_mask_path, "%s/%s/cbm_mask", CBM_MASK_PATH, cache_type);
|
|
|
|
fp = fopen(cbm_mask_path, "r");
|
|
@@ -334,7 +334,7 @@ void run_benchmark(int signum, siginfo_t *info, void *ucontext)
|
|
operation = atoi(benchmark_cmd[4]);
|
|
sprintf(resctrl_val, "%s", benchmark_cmd[5]);
|
|
|
|
- if (strcmp(resctrl_val, "cqm") != 0)
|
|
+ if (strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
|
|
buffer_span = span * MB;
|
|
else
|
|
buffer_span = span;
|
|
@@ -459,8 +459,8 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
|
|
goto out;
|
|
|
|
/* Create mon grp and write pid into it for "mbm" and "cqm" test */
|
|
- if ((strcmp(resctrl_val, "cqm") == 0) ||
|
|
- (strcmp(resctrl_val, "mbm") == 0)) {
|
|
+ if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)) ||
|
|
+ !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
|
|
if (strlen(mongrp)) {
|
|
sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
|
|
sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
|
|
@@ -505,9 +505,9 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
|
|
int resource_id, ret = 0;
|
|
FILE *fp;
|
|
|
|
- if ((strcmp(resctrl_val, "mba") != 0) &&
|
|
- (strcmp(resctrl_val, "cat") != 0) &&
|
|
- (strcmp(resctrl_val, "cqm") != 0))
|
|
+ if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
|
|
+ strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
|
|
+ strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
|
|
return -ENOENT;
|
|
|
|
if (!schemata) {
|
|
@@ -528,9 +528,10 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
|
|
else
|
|
sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
|
|
|
|
- if (!strcmp(resctrl_val, "cat") || !strcmp(resctrl_val, "cqm"))
|
|
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
|
|
+ !strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
|
|
sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
|
|
- if (strcmp(resctrl_val, "mba") == 0)
|
|
+ if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)))
|
|
sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
|
|
|
|
fp = fopen(controlgroup, "w");
|
|
@@ -615,26 +616,56 @@ char *fgrep(FILE *inf, const char *str)
|
|
* validate_resctrl_feature_request - Check if requested feature is valid.
|
|
* @resctrl_val: Requested feature
|
|
*
|
|
- * Return: 0 on success, non-zero on failure
|
|
+ * Return: True if the feature is supported, else false
|
|
*/
|
|
-bool validate_resctrl_feature_request(char *resctrl_val)
|
|
+bool validate_resctrl_feature_request(const char *resctrl_val)
|
|
{
|
|
- FILE *inf = fopen("/proc/cpuinfo", "r");
|
|
+ struct stat statbuf;
|
|
bool found = false;
|
|
char *res;
|
|
+ FILE *inf;
|
|
|
|
- if (!inf)
|
|
+ if (!resctrl_val)
|
|
return false;
|
|
|
|
- res = fgrep(inf, "flags");
|
|
-
|
|
- if (res) {
|
|
- char *s = strchr(res, ':');
|
|
+ if (remount_resctrlfs(false))
|
|
+ return false;
|
|
|
|
- found = s && !strstr(s, resctrl_val);
|
|
- free(res);
|
|
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
|
|
+ if (!stat(L3_PATH, &statbuf))
|
|
+ return true;
|
|
+ } else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
|
|
+ if (!stat(MB_PATH, &statbuf))
|
|
+ return true;
|
|
+ } else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
|
|
+ !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
|
|
+ if (!stat(L3_MON_PATH, &statbuf)) {
|
|
+ inf = fopen(L3_MON_FEATURES_PATH, "r");
|
|
+ if (!inf)
|
|
+ return false;
|
|
+
|
|
+ if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
|
|
+ res = fgrep(inf, "llc_occupancy");
|
|
+ if (res) {
|
|
+ found = true;
|
|
+ free(res);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
|
|
+ res = fgrep(inf, "mbm_total_bytes");
|
|
+ if (res) {
|
|
+ free(res);
|
|
+ res = fgrep(inf, "mbm_local_bytes");
|
|
+ if (res) {
|
|
+ found = true;
|
|
+ free(res);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ fclose(inf);
|
|
+ }
|
|
}
|
|
- fclose(inf);
|
|
|
|
return found;
|
|
}
|