mirror of
https://github.com/armbian/build.git
synced 2025-09-19 12:41:39 +02:00
19134 lines
629 KiB
Diff
19134 lines
629 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index c5345f3ebed0d..38657b3dda2cd 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 6
|
|
PATCHLEVEL = 1
|
|
-SUBLEVEL = 82
|
|
+SUBLEVEL = 83
|
|
EXTRAVERSION =
|
|
NAME = Curry Ramen
|
|
|
|
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
|
|
index efed325af88d2..d99bac02232b3 100644
|
|
--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
|
|
+++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
|
|
@@ -451,7 +451,7 @@ pb1176_serial3: serial@1010f000 {
|
|
|
|
/* Direct-mapped development chip ROM */
|
|
pb1176_rom@10200000 {
|
|
- compatible = "direct-mapped";
|
|
+ compatible = "mtd-rom";
|
|
reg = <0x10200000 0x4000>;
|
|
bank-width = <1>;
|
|
};
|
|
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
|
|
index aacbf317feea6..4b7aee8958923 100644
|
|
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
|
|
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
|
|
@@ -106,8 +106,6 @@ &fec {
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&pinctrl_enet>;
|
|
phy-mode = "rgmii-id";
|
|
- phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
|
|
- phy-reset-duration = <20>;
|
|
phy-supply = <&sw2_reg>;
|
|
status = "okay";
|
|
|
|
@@ -120,17 +118,10 @@ mdio {
|
|
#address-cells = <1>;
|
|
#size-cells = <0>;
|
|
|
|
- phy_port2: phy@1 {
|
|
- reg = <1>;
|
|
- };
|
|
-
|
|
- phy_port3: phy@2 {
|
|
- reg = <2>;
|
|
- };
|
|
-
|
|
switch@10 {
|
|
compatible = "qca,qca8334";
|
|
- reg = <10>;
|
|
+ reg = <0x10>;
|
|
+ reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
|
|
|
|
switch_ports: ports {
|
|
#address-cells = <1>;
|
|
@@ -151,15 +142,30 @@ fixed-link {
|
|
eth2: port@2 {
|
|
reg = <2>;
|
|
label = "eth2";
|
|
+ phy-mode = "internal";
|
|
phy-handle = <&phy_port2>;
|
|
};
|
|
|
|
eth1: port@3 {
|
|
reg = <3>;
|
|
label = "eth1";
|
|
+ phy-mode = "internal";
|
|
phy-handle = <&phy_port3>;
|
|
};
|
|
};
|
|
+
|
|
+ mdio {
|
|
+ #address-cells = <1>;
|
|
+ #size-cells = <0>;
|
|
+
|
|
+ phy_port2: ethernet-phy@1 {
|
|
+ reg = <1>;
|
|
+ };
|
|
+
|
|
+ phy_port3: ethernet-phy@2 {
|
|
+ reg = <2>;
|
|
+ };
|
|
+ };
|
|
};
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
|
|
index c4b2e9ac24940..5ea45e486ed54 100644
|
|
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
|
|
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
|
|
@@ -1134,7 +1134,7 @@ restart@fc4ab000 {
|
|
|
|
qfprom: qfprom@fc4bc000 {
|
|
compatible = "qcom,msm8974-qfprom", "qcom,qfprom";
|
|
- reg = <0xfc4bc000 0x1000>;
|
|
+ reg = <0xfc4bc000 0x2100>;
|
|
#address-cells = <1>;
|
|
#size-cells = <1>;
|
|
tsens_calib: calib@d0 {
|
|
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
|
|
index e81a7213d3047..4282bafbb5043 100644
|
|
--- a/arch/arm/boot/dts/r8a73a4-ape6evm.dts
|
|
+++ b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
|
|
@@ -209,6 +209,18 @@ &cmt1 {
|
|
status = "okay";
|
|
};
|
|
|
|
+&extal1_clk {
|
|
+ clock-frequency = <26000000>;
|
|
+};
|
|
+
|
|
+&extal2_clk {
|
|
+ clock-frequency = <48000000>;
|
|
+};
|
|
+
|
|
+&extalr_clk {
|
|
+ clock-frequency = <32768>;
|
|
+};
|
|
+
|
|
&pfc {
|
|
scifa0_pins: scifa0 {
|
|
groups = "scifa0_data";
|
|
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
|
|
index c39066967053f..d1f4cbd099efb 100644
|
|
--- a/arch/arm/boot/dts/r8a73a4.dtsi
|
|
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
|
|
@@ -450,17 +450,20 @@ clocks {
|
|
extalr_clk: extalr {
|
|
compatible = "fixed-clock";
|
|
#clock-cells = <0>;
|
|
- clock-frequency = <32768>;
|
|
+ /* This value must be overridden by the board. */
|
|
+ clock-frequency = <0>;
|
|
};
|
|
extal1_clk: extal1 {
|
|
compatible = "fixed-clock";
|
|
#clock-cells = <0>;
|
|
- clock-frequency = <25000000>;
|
|
+ /* This value must be overridden by the board. */
|
|
+ clock-frequency = <0>;
|
|
};
|
|
extal2_clk: extal2 {
|
|
compatible = "fixed-clock";
|
|
#clock-cells = <0>;
|
|
- clock-frequency = <48000000>;
|
|
+ /* This value must be overridden by the board. */
|
|
+ clock-frequency = <0>;
|
|
};
|
|
fsiack_clk: fsiack {
|
|
compatible = "fixed-clock";
|
|
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
|
|
index 433ee4ddce6c8..f85933fdec75f 100644
|
|
--- a/arch/arm/crypto/sha256_glue.c
|
|
+++ b/arch/arm/crypto/sha256_glue.c
|
|
@@ -24,8 +24,8 @@
|
|
|
|
#include "sha256_glue.h"
|
|
|
|
-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
|
|
- unsigned int num_blks);
|
|
+asmlinkage void sha256_block_data_order(struct sha256_state *state,
|
|
+ const u8 *data, int num_blks);
|
|
|
|
int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
|
|
unsigned int len)
|
|
@@ -33,23 +33,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
|
|
/* make sure casting to sha256_block_fn() is safe */
|
|
BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
|
|
|
|
- return sha256_base_do_update(desc, data, len,
|
|
- (sha256_block_fn *)sha256_block_data_order);
|
|
+ return sha256_base_do_update(desc, data, len, sha256_block_data_order);
|
|
}
|
|
EXPORT_SYMBOL(crypto_sha256_arm_update);
|
|
|
|
static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out)
|
|
{
|
|
- sha256_base_do_finalize(desc,
|
|
- (sha256_block_fn *)sha256_block_data_order);
|
|
+ sha256_base_do_finalize(desc, sha256_block_data_order);
|
|
return sha256_base_finish(desc, out);
|
|
}
|
|
|
|
int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
|
|
unsigned int len, u8 *out)
|
|
{
|
|
- sha256_base_do_update(desc, data, len,
|
|
- (sha256_block_fn *)sha256_block_data_order);
|
|
+ sha256_base_do_update(desc, data, len, sha256_block_data_order);
|
|
return crypto_sha256_arm_final(desc, out);
|
|
}
|
|
EXPORT_SYMBOL(crypto_sha256_arm_finup);
|
|
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
|
|
index 0635a65aa488b..1be5bd498af36 100644
|
|
--- a/arch/arm/crypto/sha512-glue.c
|
|
+++ b/arch/arm/crypto/sha512-glue.c
|
|
@@ -25,27 +25,25 @@ MODULE_ALIAS_CRYPTO("sha512");
|
|
MODULE_ALIAS_CRYPTO("sha384-arm");
|
|
MODULE_ALIAS_CRYPTO("sha512-arm");
|
|
|
|
-asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
|
|
+asmlinkage void sha512_block_data_order(struct sha512_state *state,
|
|
+ u8 const *src, int blocks);
|
|
|
|
int sha512_arm_update(struct shash_desc *desc, const u8 *data,
|
|
unsigned int len)
|
|
{
|
|
- return sha512_base_do_update(desc, data, len,
|
|
- (sha512_block_fn *)sha512_block_data_order);
|
|
+ return sha512_base_do_update(desc, data, len, sha512_block_data_order);
|
|
}
|
|
|
|
static int sha512_arm_final(struct shash_desc *desc, u8 *out)
|
|
{
|
|
- sha512_base_do_finalize(desc,
|
|
- (sha512_block_fn *)sha512_block_data_order);
|
|
+ sha512_base_do_finalize(desc, sha512_block_data_order);
|
|
return sha512_base_finish(desc, out);
|
|
}
|
|
|
|
int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
|
|
unsigned int len, u8 *out)
|
|
{
|
|
- sha512_base_do_update(desc, data, len,
|
|
- (sha512_block_fn *)sha512_block_data_order);
|
|
+ sha512_base_do_update(desc, data, len, sha512_block_data_order);
|
|
return sha512_arm_final(desc, out);
|
|
}
|
|
|
|
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
|
|
index 9ec49ac2f6fd5..381d58cea092d 100644
|
|
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
|
|
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
|
|
@@ -291,6 +291,8 @@ sw {
|
|
};
|
|
|
|
&spdif {
|
|
+ pinctrl-names = "default";
|
|
+ pinctrl-0 = <&spdif_tx_pin>;
|
|
status = "okay";
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
|
|
index 4903d6358112d..855b7d43bc503 100644
|
|
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
|
|
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
|
|
@@ -166,6 +166,8 @@ &r_ir {
|
|
};
|
|
|
|
&spdif {
|
|
+ pinctrl-names = "default";
|
|
+ pinctrl-0 = <&spdif_tx_pin>;
|
|
status = "okay";
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
|
|
index ca1d287a0a01d..d11e5041bae9a 100644
|
|
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
|
|
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
|
|
@@ -406,6 +406,7 @@ spi1_cs_pin: spi1-cs-pin {
|
|
function = "spi1";
|
|
};
|
|
|
|
+ /omit-if-no-ref/
|
|
spdif_tx_pin: spdif-tx-pin {
|
|
pins = "PH7";
|
|
function = "spdif";
|
|
@@ -655,10 +656,8 @@ spdif: spdif@5093000 {
|
|
clocks = <&ccu CLK_BUS_SPDIF>, <&ccu CLK_SPDIF>;
|
|
clock-names = "apb", "spdif";
|
|
resets = <&ccu RST_BUS_SPDIF>;
|
|
- dmas = <&dma 2>;
|
|
- dma-names = "tx";
|
|
- pinctrl-names = "default";
|
|
- pinctrl-0 = <&spdif_tx_pin>;
|
|
+ dmas = <&dma 2>, <&dma 2>;
|
|
+ dma-names = "rx", "tx";
|
|
status = "disabled";
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
|
|
index 4eb2cd14e00b0..9b6da84deae7a 100644
|
|
--- a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
|
|
+++ b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
|
|
@@ -145,7 +145,6 @@ pci@fbc00000 {
|
|
msix: msix@fbe00000 {
|
|
compatible = "al,alpine-msix";
|
|
reg = <0x0 0xfbe00000 0x0 0x100000>;
|
|
- interrupt-controller;
|
|
msi-controller;
|
|
al,msi-base-spi = <160>;
|
|
al,msi-num-spis = <160>;
|
|
diff --git a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
|
|
index 73a352ea8fd5c..b30014d4dc29c 100644
|
|
--- a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
|
|
+++ b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
|
|
@@ -351,7 +351,6 @@ pcie@fbd00000 {
|
|
msix: msix@fbe00000 {
|
|
compatible = "al,alpine-msix";
|
|
reg = <0x0 0xfbe00000 0x0 0x100000>;
|
|
- interrupt-controller;
|
|
msi-controller;
|
|
al,msi-base-spi = <336>;
|
|
al,msi-num-spis = <959>;
|
|
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
|
|
index df71348542064..a4c5a38905b03 100644
|
|
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
|
|
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
|
|
@@ -180,9 +180,6 @@ ethernet-switch@0 {
|
|
brcm,num-gphy = <5>;
|
|
brcm,num-rgmii-ports = <2>;
|
|
|
|
- #address-cells = <1>;
|
|
- #size-cells = <0>;
|
|
-
|
|
ports: ports {
|
|
#address-cells = <1>;
|
|
#size-cells = <0>;
|
|
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
|
|
index fda97c47f4e97..d5778417455c0 100644
|
|
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
|
|
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
|
|
@@ -584,6 +584,7 @@ gpio_g: gpio@660a0000 {
|
|
#gpio-cells = <2>;
|
|
gpio-controller;
|
|
interrupt-controller;
|
|
+ #interrupt-cells = <2>;
|
|
interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>;
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
|
|
index 8f8c25e51194d..473d7d0ddf369 100644
|
|
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
|
|
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
|
|
@@ -442,6 +442,7 @@ gpio_hsls: gpio@d0000 {
|
|
#gpio-cells = <2>;
|
|
gpio-controller;
|
|
interrupt-controller;
|
|
+ #interrupt-cells = <2>;
|
|
interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
|
|
gpio-ranges = <&pinmux 0 0 16>,
|
|
<&pinmux 16 71 2>,
|
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
|
|
index 8b16bd68576c0..d9fa0deea7002 100644
|
|
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
|
|
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
|
|
@@ -294,8 +294,8 @@ MX8MM_IOMUXC_SAI3_MCLK_GPIO5_IO2 0x19
|
|
|
|
pinctrl_i2c4: i2c4grp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
|
|
- MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
|
|
+ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x40000083
|
|
+ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x40000083
|
|
>;
|
|
};
|
|
|
|
@@ -313,19 +313,19 @@ MX8MM_IOMUXC_SAI5_MCLK_GPIO3_IO25 0x19
|
|
|
|
pinctrl_uart1: uart1grp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x140
|
|
- MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x140
|
|
- MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x140
|
|
- MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x140
|
|
+ MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x0
|
|
+ MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x0
|
|
+ MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x0
|
|
+ MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x0
|
|
>;
|
|
};
|
|
|
|
pinctrl_uart2: uart2grp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x140
|
|
- MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x140
|
|
- MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x140
|
|
- MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x140
|
|
+ MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x0
|
|
+ MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x0
|
|
+ MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x0
|
|
+ MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x0
|
|
>;
|
|
};
|
|
|
|
@@ -337,40 +337,40 @@ MX8MM_IOMUXC_NAND_CE1_B_GPIO3_IO2 0x19
|
|
|
|
pinctrl_usdhc2: usdhc2grp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
|
|
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90
|
|
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
|
|
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
|
|
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
|
|
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
|
|
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
|
|
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
|
|
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
|
|
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
|
|
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
|
|
>;
|
|
};
|
|
|
|
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
|
|
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94
|
|
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
|
|
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
|
|
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
|
|
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
|
|
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
|
|
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
|
|
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
|
|
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
|
|
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
|
|
>;
|
|
};
|
|
|
|
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
|
|
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96
|
|
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
|
|
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
|
|
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
|
|
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
|
|
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
|
|
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
|
|
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
|
|
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
|
|
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
|
|
>;
|
|
};
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
|
|
index a079322a37931..d54cddd65b526 100644
|
|
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
|
|
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
|
|
@@ -277,8 +277,8 @@ MX8MM_IOMUXC_SAI3_MCLK_GPIO5_IO2 0x19
|
|
|
|
pinctrl_i2c4: i2c4grp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
|
|
- MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
|
|
+ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x40000083
|
|
+ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x40000083
|
|
>;
|
|
};
|
|
|
|
@@ -290,19 +290,19 @@ MX8MM_IOMUXC_SPDIF_RX_PWM2_OUT 0x19
|
|
|
|
pinctrl_uart1: uart1grp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x140
|
|
- MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x140
|
|
- MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x140
|
|
- MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x140
|
|
+ MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x0
|
|
+ MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x0
|
|
+ MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x0
|
|
+ MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x0
|
|
>;
|
|
};
|
|
|
|
pinctrl_uart2: uart2grp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x140
|
|
- MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x140
|
|
- MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x140
|
|
- MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x140
|
|
+ MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x0
|
|
+ MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x0
|
|
+ MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x0
|
|
+ MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x0
|
|
>;
|
|
};
|
|
|
|
@@ -314,40 +314,40 @@ MX8MM_IOMUXC_NAND_CE1_B_GPIO3_IO2 0x19
|
|
|
|
pinctrl_usdhc2: usdhc2grp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
|
|
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90
|
|
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
|
|
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
|
|
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
|
|
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
|
|
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
|
|
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
|
|
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
|
|
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
|
|
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
|
|
>;
|
|
};
|
|
|
|
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
|
|
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94
|
|
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
|
|
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
|
|
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
|
|
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
|
|
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
|
|
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
|
|
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
|
|
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
|
|
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
|
|
>;
|
|
};
|
|
|
|
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
|
|
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96
|
|
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
|
|
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
|
|
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
|
|
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
|
|
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
|
|
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
|
|
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
|
|
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
|
|
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
|
|
>;
|
|
};
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
|
|
index 8d10f5b412978..d5199ecb3f6c1 100644
|
|
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
|
|
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
|
|
@@ -205,7 +205,7 @@ rtc@52 {
|
|
reg = <0x52>;
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&pinctrl_rtc>;
|
|
- interrupts-extended = <&gpio4 1 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupts-extended = <&gpio4 1 IRQ_TYPE_LEVEL_LOW>;
|
|
trickle-diode-disable;
|
|
};
|
|
};
|
|
@@ -247,8 +247,8 @@ MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x19
|
|
|
|
pinctrl_i2c1: i2c1grp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
|
|
- MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
|
|
+ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000083
|
|
+ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x40000083
|
|
>;
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
|
|
index 0679728d24899..884ae2ad35114 100644
|
|
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
|
|
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
|
|
@@ -237,8 +237,8 @@ MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x19
|
|
|
|
pinctrl_i2c1: i2c1grp {
|
|
fsl,pins = <
|
|
- MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
|
|
- MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
|
|
+ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000083
|
|
+ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x40000083
|
|
>;
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
|
|
index c557dbf4dcd60..2e90466db89a0 100644
|
|
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
|
|
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
|
|
@@ -47,17 +47,6 @@ pps {
|
|
gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
|
|
status = "okay";
|
|
};
|
|
-
|
|
- reg_usb_otg1_vbus: regulator-usb-otg1 {
|
|
- pinctrl-names = "default";
|
|
- pinctrl-0 = <&pinctrl_reg_usb1_en>;
|
|
- compatible = "regulator-fixed";
|
|
- regulator-name = "usb_otg1_vbus";
|
|
- gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>;
|
|
- enable-active-high;
|
|
- regulator-min-microvolt = <5000000>;
|
|
- regulator-max-microvolt = <5000000>;
|
|
- };
|
|
};
|
|
|
|
/* off-board header */
|
|
@@ -146,9 +135,10 @@ &uart3 {
|
|
};
|
|
|
|
&usbotg1 {
|
|
+ pinctrl-names = "default";
|
|
+ pinctrl-0 = <&pinctrl_usbotg1>;
|
|
dr_mode = "otg";
|
|
over-current-active-low;
|
|
- vbus-supply = <®_usb_otg1_vbus>;
|
|
status = "okay";
|
|
};
|
|
|
|
@@ -206,14 +196,6 @@ MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x41
|
|
>;
|
|
};
|
|
|
|
- pinctrl_reg_usb1_en: regusb1grp {
|
|
- fsl,pins = <
|
|
- MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x41
|
|
- MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141
|
|
- MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
|
|
- >;
|
|
- };
|
|
-
|
|
pinctrl_spi2: spi2grp {
|
|
fsl,pins = <
|
|
MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
|
|
@@ -236,4 +218,11 @@ MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x140
|
|
MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x140
|
|
>;
|
|
};
|
|
+
|
|
+ pinctrl_usbotg1: usbotg1grp {
|
|
+ fsl,pins = <
|
|
+ MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141
|
|
+ MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
|
|
+ >;
|
|
+ };
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
|
|
index 78ae73d0cf365..98ff17b14b2a5 100644
|
|
--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
|
|
+++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
|
|
@@ -124,7 +124,6 @@ eth0: ethernet@c1b00000 {
|
|
amba {
|
|
#address-cells = <2>;
|
|
#size-cells = <1>;
|
|
- #interrupt-cells = <3>;
|
|
|
|
compatible = "simple-bus";
|
|
interrupt-parent = <&gic>;
|
|
diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi
|
|
index 2173316573bee..8e9410d8f46c0 100644
|
|
--- a/arch/arm64/boot/dts/lg/lg1313.dtsi
|
|
+++ b/arch/arm64/boot/dts/lg/lg1313.dtsi
|
|
@@ -124,7 +124,6 @@ eth0: ethernet@c3700000 {
|
|
amba {
|
|
#address-cells = <2>;
|
|
#size-cells = <1>;
|
|
- #interrupt-cells = <3>;
|
|
|
|
compatible = "simple-bus";
|
|
interrupt-parent = <&gic>;
|
|
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
index df152c72276b8..cd28e1c45b70a 100644
|
|
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
@@ -426,14 +426,14 @@ xor11 {
|
|
crypto: crypto@90000 {
|
|
compatible = "inside-secure,safexcel-eip97ies";
|
|
reg = <0x90000 0x20000>;
|
|
- interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
|
|
- interrupt-names = "mem", "ring0", "ring1",
|
|
- "ring2", "ring3", "eip";
|
|
+ <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupt-names = "ring0", "ring1", "ring2",
|
|
+ "ring3", "eip", "mem";
|
|
clocks = <&nb_periph_clk 15>;
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
|
|
index a06a0a889c43f..73d8803b54d8b 100644
|
|
--- a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
|
|
@@ -133,7 +133,6 @@ pmu {
|
|
|
|
odmi: odmi@300000 {
|
|
compatible = "marvell,odmi-controller";
|
|
- interrupt-controller;
|
|
msi-controller;
|
|
marvell,odmi-frames = <4>;
|
|
reg = <0x300000 0x4000>,
|
|
diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
|
|
index d6c0990a267d9..218c059b16d9c 100644
|
|
--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
|
|
@@ -506,14 +506,14 @@ CP11X_LABEL(sdhci0): mmc@780000 {
|
|
CP11X_LABEL(crypto): crypto@800000 {
|
|
compatible = "inside-secure,safexcel-eip197b";
|
|
reg = <0x800000 0x200000>;
|
|
- interrupts = <87 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <88 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ interrupts = <88 IRQ_TYPE_LEVEL_HIGH>,
|
|
<89 IRQ_TYPE_LEVEL_HIGH>,
|
|
<90 IRQ_TYPE_LEVEL_HIGH>,
|
|
<91 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <92 IRQ_TYPE_LEVEL_HIGH>;
|
|
- interrupt-names = "mem", "ring0", "ring1",
|
|
- "ring2", "ring3", "eip";
|
|
+ <92 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <87 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupt-names = "ring0", "ring1", "ring2", "ring3",
|
|
+ "eip", "mem";
|
|
clock-names = "core", "reg";
|
|
clocks = <&CP11X_LABEL(clk) 1 26>,
|
|
<&CP11X_LABEL(clk) 1 17>;
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
|
|
index 2c35ed0734a47..b1ddc491d2936 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
|
|
@@ -74,6 +74,7 @@ led-1 {
|
|
|
|
memory@40000000 {
|
|
reg = <0 0x40000000 0 0x40000000>;
|
|
+ device_type = "memory";
|
|
};
|
|
|
|
reg_1p8v: regulator-1p8v {
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
|
|
index f9313b697ac12..527dcb279ba52 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
|
|
@@ -56,6 +56,7 @@ key-wps {
|
|
|
|
memory@40000000 {
|
|
reg = <0 0x40000000 0 0x20000000>;
|
|
+ device_type = "memory";
|
|
};
|
|
|
|
reg_1p8v: regulator-1p8v {
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
|
|
index fc338bd497f51..108931e796465 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
|
|
@@ -110,6 +110,7 @@ infracfg: infracfg@10001000 {
|
|
compatible = "mediatek,mt7986-infracfg", "syscon";
|
|
reg = <0 0x10001000 0 0x1000>;
|
|
#clock-cells = <1>;
|
|
+ #reset-cells = <1>;
|
|
};
|
|
|
|
wed_pcie: wed-pcie@10003000 {
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
|
|
index dccf367c7ec6c..3d95625f1b0b4 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
|
|
@@ -4,6 +4,8 @@
|
|
*/
|
|
|
|
#include "mt8183-kukui.dtsi"
|
|
+/* Must come after mt8183-kukui.dtsi to modify cros_ec */
|
|
+#include <arm/cros-ec-keyboard.dtsi>
|
|
|
|
/ {
|
|
panel: panel {
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
|
|
index 50a0dd36b5fb3..0d3c7b8162ff0 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
|
|
@@ -372,6 +372,16 @@ pen_eject {
|
|
};
|
|
};
|
|
|
|
+&cros_ec {
|
|
+ cbas {
|
|
+ compatible = "google,cros-cbas";
|
|
+ };
|
|
+
|
|
+ keyboard-controller {
|
|
+ compatible = "google,cros-ec-keyb-switches";
|
|
+ };
|
|
+};
|
|
+
|
|
&qca_wifi {
|
|
qcom,ath10k-calibration-variant = "GO_KAKADU";
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
|
|
index 06f8c80bf5536..e73113cb51f53 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
|
|
@@ -339,6 +339,16 @@ touch_pin_reset: pin_reset {
|
|
};
|
|
};
|
|
|
|
+&cros_ec {
|
|
+ cbas {
|
|
+ compatible = "google,cros-cbas";
|
|
+ };
|
|
+
|
|
+ keyboard-controller {
|
|
+ compatible = "google,cros-ec-keyb-switches";
|
|
+ };
|
|
+};
|
|
+
|
|
&qca_wifi {
|
|
qcom,ath10k-calibration-variant = "GO_KODAMA";
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
|
|
index a7b0cb3ff7b0a..181da69d18f46 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
|
|
@@ -343,6 +343,16 @@ rst_pin {
|
|
};
|
|
};
|
|
|
|
+&cros_ec {
|
|
+ cbas {
|
|
+ compatible = "google,cros-cbas";
|
|
+ };
|
|
+
|
|
+ keyboard-controller {
|
|
+ compatible = "google,cros-ec-keyb-switches";
|
|
+ };
|
|
+};
|
|
+
|
|
&qca_wifi {
|
|
qcom,ath10k-calibration-variant = "LE_Krane";
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
|
|
index a428a581c93a8..1db97d94658b9 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
|
|
@@ -896,10 +896,6 @@ usbc_extcon: extcon0 {
|
|
google,usb-port-id = <0>;
|
|
};
|
|
|
|
- cbas {
|
|
- compatible = "google,cros-cbas";
|
|
- };
|
|
-
|
|
typec {
|
|
compatible = "google,cros-ec-typec";
|
|
#address-cells = <1>;
|
|
@@ -999,5 +995,4 @@ hub@1 {
|
|
};
|
|
};
|
|
|
|
-#include <arm/cros-ec-keyboard.dtsi>
|
|
#include <arm/cros-ec-sbs.dtsi>
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
|
|
index 50367da93cd79..c6080af1e4a30 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
|
|
@@ -819,10 +819,6 @@ cros_ec: ec@0 {
|
|
#address-cells = <1>;
|
|
#size-cells = <0>;
|
|
|
|
- base_detection: cbas {
|
|
- compatible = "google,cros-cbas";
|
|
- };
|
|
-
|
|
cros_ec_pwm: pwm {
|
|
compatible = "google,cros-ec-pwm";
|
|
#pwm-cells = <1>;
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
|
|
index 2f40c6cc407c1..4ed8a0f187583 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
|
|
@@ -1539,7 +1539,7 @@ vcodec_enc: vcodec@17020000 {
|
|
mediatek,scp = <&scp>;
|
|
power-domains = <&spm MT8192_POWER_DOMAIN_VENC>;
|
|
clocks = <&vencsys CLK_VENC_SET1_VENC>;
|
|
- clock-names = "venc-set1";
|
|
+ clock-names = "venc_sel";
|
|
assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>;
|
|
assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D4>;
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
|
|
index 3348ba69ff6cf..d86d193e5a75e 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
|
|
@@ -13,3 +13,7 @@ / {
|
|
&ts_10 {
|
|
status = "okay";
|
|
};
|
|
+
|
|
+&watchdog {
|
|
+ /delete-property/ mediatek,disable-extrst;
|
|
+};
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
|
|
index 4669e9d917f8c..5356f53308e24 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
|
|
@@ -33,3 +33,7 @@ pins-low-power-pcie0-disable {
|
|
&ts_10 {
|
|
status = "okay";
|
|
};
|
|
+
|
|
+&watchdog {
|
|
+ /delete-property/ mediatek,disable-extrst;
|
|
+};
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
|
|
index 5021edd02f7c1..fca3606cb951e 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
|
|
@@ -34,3 +34,7 @@ pins-low-power-pcie0-disable {
|
|
&ts_10 {
|
|
status = "okay";
|
|
};
|
|
+
|
|
+&watchdog {
|
|
+ /delete-property/ mediatek,disable-extrst;
|
|
+};
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
|
|
index 5117b2e7985af..998c2e78168a6 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
|
|
@@ -111,6 +111,7 @@ mt6360: pmic@34 {
|
|
compatible = "mediatek,mt6360";
|
|
reg = <0x34>;
|
|
interrupt-controller;
|
|
+ #interrupt-cells = <1>;
|
|
interrupts-extended = <&pio 101 IRQ_TYPE_EDGE_FALLING>;
|
|
interrupt-names = "IRQB";
|
|
|
|
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
|
|
index f094011be9ed9..8099dc04ed2e1 100644
|
|
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
|
|
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
|
|
@@ -2024,7 +2024,7 @@ ethernet@6800000 {
|
|
status = "okay";
|
|
|
|
phy-handle = <&mgbe0_phy>;
|
|
- phy-mode = "usxgmii";
|
|
+ phy-mode = "10gbase-r";
|
|
|
|
mdio {
|
|
#address-cells = <1>;
|
|
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
|
|
index 135ff4368c4a6..5c04c91b0ee2b 100644
|
|
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
|
|
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
|
|
@@ -532,7 +532,7 @@ &mss_pil {
|
|
&pcie0 {
|
|
status = "okay";
|
|
perst-gpios = <&tlmm 35 GPIO_ACTIVE_LOW>;
|
|
- enable-gpio = <&tlmm 134 GPIO_ACTIVE_HIGH>;
|
|
+ wake-gpios = <&tlmm 134 GPIO_ACTIVE_HIGH>;
|
|
|
|
vddpe-3v3-supply = <&pcie0_3p3v_dual>;
|
|
|
|
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
|
|
index eb1a9369926d2..9dccecd9fcaef 100644
|
|
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
|
|
@@ -1822,8 +1822,8 @@ pcie0: pci@1c00000 {
|
|
phys = <&pcie0_lane>;
|
|
phy-names = "pciephy";
|
|
|
|
- perst-gpio = <&tlmm 35 GPIO_ACTIVE_HIGH>;
|
|
- enable-gpio = <&tlmm 37 GPIO_ACTIVE_HIGH>;
|
|
+ perst-gpios = <&tlmm 35 GPIO_ACTIVE_HIGH>;
|
|
+ wake-gpios = <&tlmm 37 GPIO_ACTIVE_HIGH>;
|
|
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&pcie0_default_state>;
|
|
@@ -1925,7 +1925,7 @@ pcie1: pci@1c08000 {
|
|
phys = <&pcie1_lane>;
|
|
phy-names = "pciephy";
|
|
|
|
- perst-gpio = <&tlmm 102 GPIO_ACTIVE_HIGH>;
|
|
+ perst-gpios = <&tlmm 102 GPIO_ACTIVE_HIGH>;
|
|
enable-gpio = <&tlmm 104 GPIO_ACTIVE_HIGH>;
|
|
|
|
pinctrl-names = "default";
|
|
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
|
|
index ed9400f903c9e..b677ef6705d94 100644
|
|
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
|
|
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
|
|
@@ -656,8 +656,8 @@ channel7 {
|
|
|
|
avb0: ethernet@e6800000 {
|
|
compatible = "renesas,etheravb-r8a779a0",
|
|
- "renesas,etheravb-rcar-gen3";
|
|
- reg = <0 0xe6800000 0 0x800>;
|
|
+ "renesas,etheravb-rcar-gen4";
|
|
+ reg = <0 0xe6800000 0 0x1000>;
|
|
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
|
|
@@ -704,8 +704,8 @@ avb0: ethernet@e6800000 {
|
|
|
|
avb1: ethernet@e6810000 {
|
|
compatible = "renesas,etheravb-r8a779a0",
|
|
- "renesas,etheravb-rcar-gen3";
|
|
- reg = <0 0xe6810000 0 0x800>;
|
|
+ "renesas,etheravb-rcar-gen4";
|
|
+ reg = <0 0xe6810000 0 0x1000>;
|
|
interrupts = <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
|
|
@@ -752,7 +752,7 @@ avb1: ethernet@e6810000 {
|
|
|
|
avb2: ethernet@e6820000 {
|
|
compatible = "renesas,etheravb-r8a779a0",
|
|
- "renesas,etheravb-rcar-gen3";
|
|
+ "renesas,etheravb-rcar-gen4";
|
|
reg = <0 0xe6820000 0 0x1000>;
|
|
interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
|
|
@@ -800,7 +800,7 @@ avb2: ethernet@e6820000 {
|
|
|
|
avb3: ethernet@e6830000 {
|
|
compatible = "renesas,etheravb-r8a779a0",
|
|
- "renesas,etheravb-rcar-gen3";
|
|
+ "renesas,etheravb-rcar-gen4";
|
|
reg = <0 0xe6830000 0 0x1000>;
|
|
interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
|
|
@@ -848,7 +848,7 @@ avb3: ethernet@e6830000 {
|
|
|
|
avb4: ethernet@e6840000 {
|
|
compatible = "renesas,etheravb-r8a779a0",
|
|
- "renesas,etheravb-rcar-gen3";
|
|
+ "renesas,etheravb-rcar-gen4";
|
|
reg = <0 0xe6840000 0 0x1000>;
|
|
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>,
|
|
@@ -896,7 +896,7 @@ avb4: ethernet@e6840000 {
|
|
|
|
avb5: ethernet@e6850000 {
|
|
compatible = "renesas,etheravb-r8a779a0",
|
|
- "renesas,etheravb-rcar-gen3";
|
|
+ "renesas,etheravb-rcar-gen4";
|
|
reg = <0 0xe6850000 0 0x1000>;
|
|
interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
|
|
@@ -1019,7 +1019,7 @@ tpu: pwm@e6e80000 {
|
|
|
|
msiof0: spi@e6e90000 {
|
|
compatible = "renesas,msiof-r8a779a0",
|
|
- "renesas,rcar-gen3-msiof";
|
|
+ "renesas,rcar-gen4-msiof";
|
|
reg = <0 0xe6e90000 0 0x0064>;
|
|
interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&cpg CPG_MOD 618>;
|
|
@@ -1034,7 +1034,7 @@ msiof0: spi@e6e90000 {
|
|
|
|
msiof1: spi@e6ea0000 {
|
|
compatible = "renesas,msiof-r8a779a0",
|
|
- "renesas,rcar-gen3-msiof";
|
|
+ "renesas,rcar-gen4-msiof";
|
|
reg = <0 0xe6ea0000 0 0x0064>;
|
|
interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&cpg CPG_MOD 619>;
|
|
@@ -1049,7 +1049,7 @@ msiof1: spi@e6ea0000 {
|
|
|
|
msiof2: spi@e6c00000 {
|
|
compatible = "renesas,msiof-r8a779a0",
|
|
- "renesas,rcar-gen3-msiof";
|
|
+ "renesas,rcar-gen4-msiof";
|
|
reg = <0 0xe6c00000 0 0x0064>;
|
|
interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&cpg CPG_MOD 620>;
|
|
@@ -1064,7 +1064,7 @@ msiof2: spi@e6c00000 {
|
|
|
|
msiof3: spi@e6c10000 {
|
|
compatible = "renesas,msiof-r8a779a0",
|
|
- "renesas,rcar-gen3-msiof";
|
|
+ "renesas,rcar-gen4-msiof";
|
|
reg = <0 0xe6c10000 0 0x0064>;
|
|
interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&cpg CPG_MOD 621>;
|
|
@@ -1079,7 +1079,7 @@ msiof3: spi@e6c10000 {
|
|
|
|
msiof4: spi@e6c20000 {
|
|
compatible = "renesas,msiof-r8a779a0",
|
|
- "renesas,rcar-gen3-msiof";
|
|
+ "renesas,rcar-gen4-msiof";
|
|
reg = <0 0xe6c20000 0 0x0064>;
|
|
interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&cpg CPG_MOD 622>;
|
|
@@ -1094,7 +1094,7 @@ msiof4: spi@e6c20000 {
|
|
|
|
msiof5: spi@e6c28000 {
|
|
compatible = "renesas,msiof-r8a779a0",
|
|
- "renesas,rcar-gen3-msiof";
|
|
+ "renesas,rcar-gen4-msiof";
|
|
reg = <0 0xe6c28000 0 0x0064>;
|
|
interrupts = <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&cpg CPG_MOD 623>;
|
|
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
|
|
index d58b18802cb01..868d1a3cbdf61 100644
|
|
--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
|
|
+++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
|
|
@@ -337,7 +337,7 @@ hscif0: serial@e6540000 {
|
|
avb0: ethernet@e6800000 {
|
|
compatible = "renesas,etheravb-r8a779g0",
|
|
"renesas,etheravb-rcar-gen4";
|
|
- reg = <0 0xe6800000 0 0x800>;
|
|
+ reg = <0 0xe6800000 0 0x1000>;
|
|
interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
|
|
@@ -384,7 +384,7 @@ avb0: ethernet@e6800000 {
|
|
avb1: ethernet@e6810000 {
|
|
compatible = "renesas,etheravb-r8a779g0",
|
|
"renesas,etheravb-rcar-gen4";
|
|
- reg = <0 0xe6810000 0 0x800>;
|
|
+ reg = <0 0xe6810000 0 0x1000>;
|
|
interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>,
|
|
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
|
|
index a4738842f0646..7f88395ff7997 100644
|
|
--- a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
|
|
+++ b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
|
|
@@ -1,6 +1,6 @@
|
|
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
|
/*
|
|
- * Device Tree Source for the RZ/G2UL SoC
|
|
+ * Device Tree Source for the RZ/Five and RZ/G2UL SoCs
|
|
*
|
|
* Copyright (C) 2022 Renesas Electronics Corp.
|
|
*/
|
|
@@ -68,36 +68,8 @@ opp-1000000000 {
|
|
};
|
|
};
|
|
|
|
- cpus {
|
|
- #address-cells = <1>;
|
|
- #size-cells = <0>;
|
|
-
|
|
- cpu0: cpu@0 {
|
|
- compatible = "arm,cortex-a55";
|
|
- reg = <0>;
|
|
- device_type = "cpu";
|
|
- #cooling-cells = <2>;
|
|
- next-level-cache = <&L3_CA55>;
|
|
- enable-method = "psci";
|
|
- clocks = <&cpg CPG_CORE R9A07G043_CLK_I>;
|
|
- operating-points-v2 = <&cluster0_opp>;
|
|
- };
|
|
-
|
|
- L3_CA55: cache-controller-0 {
|
|
- compatible = "cache";
|
|
- cache-unified;
|
|
- cache-size = <0x40000>;
|
|
- };
|
|
- };
|
|
-
|
|
- psci {
|
|
- compatible = "arm,psci-1.0", "arm,psci-0.2";
|
|
- method = "smc";
|
|
- };
|
|
-
|
|
soc: soc {
|
|
compatible = "simple-bus";
|
|
- interrupt-parent = <&gic>;
|
|
#address-cells = <2>;
|
|
#size-cells = <2>;
|
|
ranges;
|
|
@@ -545,12 +517,6 @@ cpg: clock-controller@11010000 {
|
|
sysc: system-controller@11020000 {
|
|
compatible = "renesas,r9a07g043-sysc";
|
|
reg = <0 0x11020000 0 0x10000>;
|
|
- interrupts = <SOC_PERIPHERAL_IRQ(42) IRQ_TYPE_LEVEL_HIGH>,
|
|
- <SOC_PERIPHERAL_IRQ(43) IRQ_TYPE_LEVEL_HIGH>,
|
|
- <SOC_PERIPHERAL_IRQ(44) IRQ_TYPE_LEVEL_HIGH>,
|
|
- <SOC_PERIPHERAL_IRQ(45) IRQ_TYPE_LEVEL_HIGH>;
|
|
- interrupt-names = "lpm_int", "ca55stbydone_int",
|
|
- "cm33stbyr_int", "ca55_deny";
|
|
status = "disabled";
|
|
};
|
|
|
|
@@ -603,16 +569,6 @@ dmac: dma-controller@11820000 {
|
|
dma-channels = <16>;
|
|
};
|
|
|
|
- gic: interrupt-controller@11900000 {
|
|
- compatible = "arm,gic-v3";
|
|
- #interrupt-cells = <3>;
|
|
- #address-cells = <0>;
|
|
- interrupt-controller;
|
|
- reg = <0x0 0x11900000 0 0x40000>,
|
|
- <0x0 0x11940000 0 0x60000>;
|
|
- interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
|
|
- };
|
|
-
|
|
sdhi0: mmc@11c00000 {
|
|
compatible = "renesas,sdhi-r9a07g043",
|
|
"renesas,rcar-gen3-sdhi";
|
|
@@ -893,12 +849,4 @@ target: trip-point {
|
|
};
|
|
};
|
|
};
|
|
-
|
|
- timer {
|
|
- compatible = "arm,armv8-timer";
|
|
- interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
|
|
- <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
|
|
- <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
|
|
- <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
|
|
- };
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
|
|
index 96f935bc2d4d1..011d4c88f4ed9 100644
|
|
--- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
|
|
+++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
|
|
@@ -10,3 +10,139 @@
|
|
#define SOC_PERIPHERAL_IRQ(nr) GIC_SPI nr
|
|
|
|
#include "r9a07g043.dtsi"
|
|
+
|
|
+/ {
|
|
+ cpus {
|
|
+ #address-cells = <1>;
|
|
+ #size-cells = <0>;
|
|
+
|
|
+ cpu0: cpu@0 {
|
|
+ compatible = "arm,cortex-a55";
|
|
+ reg = <0>;
|
|
+ device_type = "cpu";
|
|
+ #cooling-cells = <2>;
|
|
+ next-level-cache = <&L3_CA55>;
|
|
+ enable-method = "psci";
|
|
+ clocks = <&cpg CPG_CORE R9A07G043_CLK_I>;
|
|
+ operating-points-v2 = <&cluster0_opp>;
|
|
+ };
|
|
+
|
|
+ L3_CA55: cache-controller-0 {
|
|
+ compatible = "cache";
|
|
+ cache-unified;
|
|
+ cache-size = <0x40000>;
|
|
+ };
|
|
+ };
|
|
+
|
|
+ psci {
|
|
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
|
|
+ method = "smc";
|
|
+ };
|
|
+
|
|
+ timer {
|
|
+ compatible = "arm,armv8-timer";
|
|
+ interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
|
|
+ <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
|
|
+ <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
|
|
+ <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
|
|
+ };
|
|
+};
|
|
+
|
|
+&soc {
|
|
+ interrupt-parent = <&gic>;
|
|
+
|
|
+ irqc: interrupt-controller@110a0000 {
|
|
+ compatible = "renesas,r9a07g043u-irqc",
|
|
+ "renesas,rzg2l-irqc";
|
|
+ reg = <0 0x110a0000 0 0x10000>;
|
|
+ #interrupt-cells = <2>;
|
|
+ #address-cells = <0>;
|
|
+ interrupt-controller;
|
|
+ interrupts = <SOC_PERIPHERAL_IRQ(0) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(1) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(2) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(3) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(4) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(5) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(6) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(7) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(8) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(444) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(445) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(446) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(447) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(448) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(449) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(450) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(451) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(452) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(453) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(454) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(455) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(456) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(457) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(458) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(459) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(460) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(461) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(462) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(463) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(464) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(465) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(466) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(467) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(468) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(469) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(470) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(471) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(472) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(473) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(474) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(475) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(25) IRQ_TYPE_EDGE_RISING>,
|
|
+ <SOC_PERIPHERAL_IRQ(34) IRQ_TYPE_EDGE_RISING>,
|
|
+ <SOC_PERIPHERAL_IRQ(35) IRQ_TYPE_EDGE_RISING>,
|
|
+ <SOC_PERIPHERAL_IRQ(36) IRQ_TYPE_EDGE_RISING>,
|
|
+ <SOC_PERIPHERAL_IRQ(37) IRQ_TYPE_EDGE_RISING>,
|
|
+ <SOC_PERIPHERAL_IRQ(38) IRQ_TYPE_EDGE_RISING>,
|
|
+ <SOC_PERIPHERAL_IRQ(39) IRQ_TYPE_EDGE_RISING>;
|
|
+ interrupt-names = "nmi",
|
|
+ "irq0", "irq1", "irq2", "irq3",
|
|
+ "irq4", "irq5", "irq6", "irq7",
|
|
+ "tint0", "tint1", "tint2", "tint3",
|
|
+ "tint4", "tint5", "tint6", "tint7",
|
|
+ "tint8", "tint9", "tint10", "tint11",
|
|
+ "tint12", "tint13", "tint14", "tint15",
|
|
+ "tint16", "tint17", "tint18", "tint19",
|
|
+ "tint20", "tint21", "tint22", "tint23",
|
|
+ "tint24", "tint25", "tint26", "tint27",
|
|
+ "tint28", "tint29", "tint30", "tint31",
|
|
+ "bus-err", "ec7tie1-0", "ec7tie2-0",
|
|
+ "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
|
|
+ "ec7tiovf-1";
|
|
+ clocks = <&cpg CPG_MOD R9A07G043_IA55_CLK>,
|
|
+ <&cpg CPG_MOD R9A07G043_IA55_PCLK>;
|
|
+ clock-names = "clk", "pclk";
|
|
+ power-domains = <&cpg>;
|
|
+ resets = <&cpg R9A07G043_IA55_RESETN>;
|
|
+ };
|
|
+
|
|
+ gic: interrupt-controller@11900000 {
|
|
+ compatible = "arm,gic-v3";
|
|
+ #interrupt-cells = <3>;
|
|
+ #address-cells = <0>;
|
|
+ interrupt-controller;
|
|
+ reg = <0x0 0x11900000 0 0x40000>,
|
|
+ <0x0 0x11940000 0 0x60000>;
|
|
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
|
|
+ };
|
|
+};
|
|
+
|
|
+&sysc {
|
|
+ interrupts = <SOC_PERIPHERAL_IRQ(42) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(43) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(44) IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <SOC_PERIPHERAL_IRQ(45) IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupt-names = "lpm_int", "ca55stbydone_int",
|
|
+ "cm33stbyr_int", "ca55_deny";
|
|
+};
|
|
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
|
|
index 7dbf6a6292f49..d26488b5a82df 100644
|
|
--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
|
|
+++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
|
|
@@ -698,7 +698,27 @@ irqc: interrupt-controller@110a0000 {
|
|
<GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>;
|
|
+ interrupt-names = "nmi", "irq0", "irq1", "irq2", "irq3",
|
|
+ "irq4", "irq5", "irq6", "irq7",
|
|
+ "tint0", "tint1", "tint2", "tint3",
|
|
+ "tint4", "tint5", "tint6", "tint7",
|
|
+ "tint8", "tint9", "tint10", "tint11",
|
|
+ "tint12", "tint13", "tint14", "tint15",
|
|
+ "tint16", "tint17", "tint18", "tint19",
|
|
+ "tint20", "tint21", "tint22", "tint23",
|
|
+ "tint24", "tint25", "tint26", "tint27",
|
|
+ "tint28", "tint29", "tint30", "tint31",
|
|
+ "bus-err", "ec7tie1-0", "ec7tie2-0",
|
|
+ "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
|
|
+ "ec7tiovf-1";
|
|
clocks = <&cpg CPG_MOD R9A07G044_IA55_CLK>,
|
|
<&cpg CPG_MOD R9A07G044_IA55_PCLK>;
|
|
clock-names = "clk", "pclk";
|
|
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
|
|
index e000510b90a42..b3d37ca942ee3 100644
|
|
--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
|
|
+++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
|
|
@@ -704,7 +704,27 @@ irqc: interrupt-controller@110a0000 {
|
|
<GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>,
|
|
+ <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>;
|
|
+ interrupt-names = "nmi", "irq0", "irq1", "irq2", "irq3",
|
|
+ "irq4", "irq5", "irq6", "irq7",
|
|
+ "tint0", "tint1", "tint2", "tint3",
|
|
+ "tint4", "tint5", "tint6", "tint7",
|
|
+ "tint8", "tint9", "tint10", "tint11",
|
|
+ "tint12", "tint13", "tint14", "tint15",
|
|
+ "tint16", "tint17", "tint18", "tint19",
|
|
+ "tint20", "tint21", "tint22", "tint23",
|
|
+ "tint24", "tint25", "tint26", "tint27",
|
|
+ "tint28", "tint29", "tint30", "tint31",
|
|
+ "bus-err", "ec7tie1-0", "ec7tie2-0",
|
|
+ "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
|
|
+ "ec7tiovf-1";
|
|
clocks = <&cpg CPG_MOD R9A07G054_IA55_CLK>,
|
|
<&cpg CPG_MOD R9A07G054_IA55_PCLK>;
|
|
clock-names = "clk", "pclk";
|
|
diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
|
|
index 588b14b66b6fb..f37abfc13fe59 100644
|
|
--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
|
|
+++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
|
|
@@ -251,6 +251,7 @@ gpio_exp_74: gpio@74 {
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
+ #interrupt-cells = <2>;
|
|
interrupt-parent = <&gpio6>;
|
|
interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
|
|
|
|
@@ -311,6 +312,7 @@ gpio_exp_75: gpio@75 {
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
+ #interrupt-cells = <2>;
|
|
interrupt-parent = <&gpio6>;
|
|
interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
|
|
};
|
|
@@ -331,6 +333,7 @@ gpio_exp_76: gpio@76 {
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
+ #interrupt-cells = <2>;
|
|
interrupt-parent = <&gpio7>;
|
|
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
|
|
};
|
|
@@ -341,6 +344,7 @@ gpio_exp_77: gpio@77 {
|
|
gpio-controller;
|
|
#gpio-cells = <2>;
|
|
interrupt-controller;
|
|
+ #interrupt-cells = <2>;
|
|
interrupt-parent = <&gpio5>;
|
|
interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
|
|
index f4d6dbbbddcd4..99ad6fc51b584 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
|
|
@@ -596,6 +596,7 @@ vpu: video-codec@fdea0400 {
|
|
compatible = "rockchip,rk3568-vpu";
|
|
reg = <0x0 0xfdea0000 0x0 0x800>;
|
|
interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupt-names = "vdpu";
|
|
clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>;
|
|
clock-names = "aclk", "hclk";
|
|
iommus = <&vdpu_mmu>;
|
|
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
|
|
index da18413712c04..930b0e6c94622 100644
|
|
--- a/arch/arm64/include/asm/fpsimd.h
|
|
+++ b/arch/arm64/include/asm/fpsimd.h
|
|
@@ -36,13 +36,13 @@
|
|
* When we defined the maximum SVE vector length we defined the ABI so
|
|
* that the maximum vector length included all the reserved for future
|
|
* expansion bits in ZCR rather than those just currently defined by
|
|
- * the architecture. While SME follows a similar pattern the fact that
|
|
- * it includes a square matrix means that any allocations that attempt
|
|
- * to cover the maximum potential vector length (such as happen with
|
|
- * the regset used for ptrace) end up being extremely large. Define
|
|
- * the much lower actual limit for use in such situations.
|
|
+ * the architecture. Using this length to allocate worst size buffers
|
|
+ * results in excessively large allocations, and this effect is even
|
|
+ * more pronounced for SME due to ZA. Define more suitable VLs for
|
|
+ * these situations.
|
|
*/
|
|
-#define SME_VQ_MAX 16
|
|
+#define ARCH_SVE_VQ_MAX ((ZCR_ELx_LEN_MASK >> ZCR_ELx_LEN_SHIFT) + 1)
|
|
+#define SME_VQ_MAX ((SMCR_ELx_LEN_MASK >> SMCR_ELx_LEN_SHIFT) + 1)
|
|
|
|
struct task_struct;
|
|
|
|
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
|
|
index e1f6366b7ccdf..d02dd2be17b3b 100644
|
|
--- a/arch/arm64/kernel/ptrace.c
|
|
+++ b/arch/arm64/kernel/ptrace.c
|
|
@@ -1450,7 +1450,8 @@ static const struct user_regset aarch64_regsets[] = {
|
|
#ifdef CONFIG_ARM64_SVE
|
|
[REGSET_SVE] = { /* Scalable Vector Extension */
|
|
.core_note_type = NT_ARM_SVE,
|
|
- .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
|
|
+ .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
|
|
+ SVE_PT_REGS_SVE),
|
|
SVE_VQ_BYTES),
|
|
.size = SVE_VQ_BYTES,
|
|
.align = SVE_VQ_BYTES,
|
|
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
|
|
index daf3cf244ea97..b3e4dd6be7e20 100644
|
|
--- a/arch/mips/include/asm/ptrace.h
|
|
+++ b/arch/mips/include/asm/ptrace.h
|
|
@@ -60,6 +60,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
|
|
unsigned long val)
|
|
{
|
|
regs->cp0_epc = val;
|
|
+ regs->cp0_cause &= ~CAUSEF_BD;
|
|
}
|
|
|
|
/* Query offset/name of register from its name/offset */
|
|
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
|
|
index 4d392e4ed3584..ac7253891d5ed 100644
|
|
--- a/arch/parisc/kernel/ftrace.c
|
|
+++ b/arch/parisc/kernel/ftrace.c
|
|
@@ -78,7 +78,7 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
|
|
#endif
|
|
}
|
|
|
|
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
|
|
int ftrace_enable_ftrace_graph_caller(void)
|
|
{
|
|
static_key_enable(&ftrace_graph_enable.key);
|
|
diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h
|
|
index 4c69ece52a31e..59ed89890c902 100644
|
|
--- a/arch/powerpc/include/asm/vmalloc.h
|
|
+++ b/arch/powerpc/include/asm/vmalloc.h
|
|
@@ -7,14 +7,14 @@
|
|
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
|
|
|
|
#define arch_vmap_pud_supported arch_vmap_pud_supported
|
|
-static inline bool arch_vmap_pud_supported(pgprot_t prot)
|
|
+static __always_inline bool arch_vmap_pud_supported(pgprot_t prot)
|
|
{
|
|
/* HPT does not cope with large pages in the vmalloc area */
|
|
return radix_enabled();
|
|
}
|
|
|
|
#define arch_vmap_pmd_supported arch_vmap_pmd_supported
|
|
-static inline bool arch_vmap_pmd_supported(pgprot_t prot)
|
|
+static __always_inline bool arch_vmap_pmd_supported(pgprot_t prot)
|
|
{
|
|
return radix_enabled();
|
|
}
|
|
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
|
|
index 7ff8ff3509f5f..943248a0e9a9d 100644
|
|
--- a/arch/powerpc/perf/hv-gpci.c
|
|
+++ b/arch/powerpc/perf/hv-gpci.c
|
|
@@ -164,6 +164,20 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
|
|
|
|
ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
|
|
virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
|
|
+
|
|
+ /*
|
|
+ * ret value as 'H_PARAMETER' with detail_rc as 'GEN_BUF_TOO_SMALL',
|
|
+ * specifies that the current buffer size cannot accommodate
|
|
+ * all the information and a partial buffer returned.
|
|
+ * Since in this function we are only accessing data for a given starting index,
|
|
+ * we don't need to accommodate whole data and can get required count by
|
|
+ * accessing first entry data.
|
|
+ * Hence hcall fails only incase the ret value is other than H_SUCCESS or
|
|
+ * H_PARAMETER with detail_rc value as GEN_BUF_TOO_SMALL(0x1B).
|
|
+ */
|
|
+ if (ret == H_PARAMETER && be32_to_cpu(arg->params.detail_rc) == 0x1B)
|
|
+ ret = 0;
|
|
+
|
|
if (ret) {
|
|
pr_devel("hcall failed: 0x%lx\n", ret);
|
|
goto out;
|
|
@@ -228,6 +242,7 @@ static int h_gpci_event_init(struct perf_event *event)
|
|
{
|
|
u64 count;
|
|
u8 length;
|
|
+ unsigned long ret;
|
|
|
|
/* Not our event */
|
|
if (event->attr.type != event->pmu->type)
|
|
@@ -258,13 +273,23 @@ static int h_gpci_event_init(struct perf_event *event)
|
|
}
|
|
|
|
/* check if the request works... */
|
|
- if (single_gpci_request(event_get_request(event),
|
|
+ ret = single_gpci_request(event_get_request(event),
|
|
event_get_starting_index(event),
|
|
event_get_secondary_index(event),
|
|
event_get_counter_info_version(event),
|
|
event_get_offset(event),
|
|
length,
|
|
- &count)) {
|
|
+ &count);
|
|
+
|
|
+ /*
|
|
+ * ret value as H_AUTHORITY implies that partition is not permitted to retrieve
|
|
+ * performance information, and required to set
|
|
+ * "Enable Performance Information Collection" option.
|
|
+ */
|
|
+ if (ret == H_AUTHORITY)
|
|
+ return -EPERM;
|
|
+
|
|
+ if (ret) {
|
|
pr_devel("gpci hcall failed\n");
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
|
|
index 1830e1ac1f8f0..107a8b60ad0c9 100644
|
|
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
|
|
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
|
|
@@ -99,9 +99,6 @@ static void __init linkstation_init_IRQ(void)
|
|
mpic_init(mpic);
|
|
}
|
|
|
|
-extern void avr_uart_configure(void);
|
|
-extern void avr_uart_send(const char);
|
|
-
|
|
static void __noreturn linkstation_restart(char *cmd)
|
|
{
|
|
local_irq_disable();
|
|
diff --git a/arch/powerpc/platforms/embedded6xx/mpc10x.h b/arch/powerpc/platforms/embedded6xx/mpc10x.h
|
|
index 5ad12023e5628..ebc258fa4858d 100644
|
|
--- a/arch/powerpc/platforms/embedded6xx/mpc10x.h
|
|
+++ b/arch/powerpc/platforms/embedded6xx/mpc10x.h
|
|
@@ -156,4 +156,7 @@ int mpc10x_disable_store_gathering(struct pci_controller *hose);
|
|
/* For MPC107 boards that use the built-in openpic */
|
|
void mpc10x_set_openpic(void);
|
|
|
|
+void avr_uart_configure(void);
|
|
+void avr_uart_send(const char c);
|
|
+
|
|
#endif /* __PPC_KERNEL_MPC10X_H */
|
|
diff --git a/arch/powerpc/platforms/pseries/papr_platform_attributes.c b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
|
|
index 526c621b098be..eea2041b270b5 100644
|
|
--- a/arch/powerpc/platforms/pseries/papr_platform_attributes.c
|
|
+++ b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
|
|
@@ -101,10 +101,12 @@ static int papr_get_attr(u64 id, struct energy_scale_attribute *esi)
|
|
esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * max_esi_attrs);
|
|
|
|
temp_buf = krealloc(buf, esi_buf_size, GFP_KERNEL);
|
|
- if (temp_buf)
|
|
+ if (temp_buf) {
|
|
buf = temp_buf;
|
|
- else
|
|
- return -ENOMEM;
|
|
+ } else {
|
|
+ ret = -ENOMEM;
|
|
+ goto out_buf;
|
|
+ }
|
|
|
|
goto retry;
|
|
}
|
|
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
|
|
index 07387f9c135ca..72b87b08ab444 100644
|
|
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
|
|
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
|
|
@@ -123,6 +123,7 @@ pmic@58 {
|
|
interrupt-parent = <&gpio>;
|
|
interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
|
|
interrupt-controller;
|
|
+ #interrupt-cells = <2>;
|
|
|
|
onkey {
|
|
compatible = "dlg,da9063-onkey";
|
|
diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
|
|
index 93d1ccd3304c7..9c49c3d67cd56 100644
|
|
--- a/arch/s390/include/uapi/asm/dasd.h
|
|
+++ b/arch/s390/include/uapi/asm/dasd.h
|
|
@@ -78,6 +78,7 @@ typedef struct dasd_information2_t {
|
|
* 0x040: give access to raw eckd data
|
|
* 0x080: enable discard support
|
|
* 0x100: enable autodisable for IFCC errors (default)
|
|
+ * 0x200: enable requeue of all requests on autoquiesce
|
|
*/
|
|
#define DASD_FEATURE_READONLY 0x001
|
|
#define DASD_FEATURE_USEDIAG 0x002
|
|
@@ -88,6 +89,7 @@ typedef struct dasd_information2_t {
|
|
#define DASD_FEATURE_USERAW 0x040
|
|
#define DASD_FEATURE_DISCARD 0x080
|
|
#define DASD_FEATURE_PATH_AUTODISABLE 0x100
|
|
+#define DASD_FEATURE_REQUEUEQUIESCE 0x200
|
|
#define DASD_FEATURE_DEFAULT DASD_FEATURE_PATH_AUTODISABLE
|
|
|
|
#define DASD_PARTN_BITS 2
|
|
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
|
|
index 7ee3651d00abe..732024ca005ad 100644
|
|
--- a/arch/s390/kernel/cache.c
|
|
+++ b/arch/s390/kernel/cache.c
|
|
@@ -166,5 +166,6 @@ int populate_cache_leaves(unsigned int cpu)
|
|
ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
|
|
}
|
|
}
|
|
+ this_cpu_ci->cpu_map_populated = true;
|
|
return 0;
|
|
}
|
|
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
|
|
index 6826e2a69a216..f61a652046cfb 100644
|
|
--- a/arch/s390/kernel/perf_pai_crypto.c
|
|
+++ b/arch/s390/kernel/perf_pai_crypto.c
|
|
@@ -647,7 +647,7 @@ static int __init attr_event_init(void)
|
|
for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
|
|
ret = attr_event_init_one(attrs, i);
|
|
if (ret) {
|
|
- attr_event_free(attrs, i - 1);
|
|
+ attr_event_free(attrs, i);
|
|
return ret;
|
|
}
|
|
}
|
|
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
|
|
index 74b53c531e0cd..b4d89654183a2 100644
|
|
--- a/arch/s390/kernel/perf_pai_ext.c
|
|
+++ b/arch/s390/kernel/perf_pai_ext.c
|
|
@@ -612,7 +612,7 @@ static int __init attr_event_init(void)
|
|
for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) {
|
|
ret = attr_event_init_one(attrs, i);
|
|
if (ret) {
|
|
- attr_event_free(attrs, i - 1);
|
|
+ attr_event_free(attrs, i);
|
|
return ret;
|
|
}
|
|
}
|
|
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
|
|
index 245bddfe9bc0e..cc513add48eb5 100644
|
|
--- a/arch/s390/kernel/vdso32/Makefile
|
|
+++ b/arch/s390/kernel/vdso32/Makefile
|
|
@@ -22,7 +22,7 @@ KBUILD_AFLAGS_32 += -m31 -s
|
|
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
|
KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
|
|
|
|
-LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \
|
|
+LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
|
|
--hash-style=both --build-id=sha1 -melf_s390 -T
|
|
|
|
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
|
|
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
|
|
index 1605ba45ac4c0..42d918d50a1ff 100644
|
|
--- a/arch/s390/kernel/vdso64/Makefile
|
|
+++ b/arch/s390/kernel/vdso64/Makefile
|
|
@@ -26,7 +26,7 @@ KBUILD_AFLAGS_64 += -m64 -s
|
|
|
|
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
|
KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
|
|
-ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
|
|
+ldflags-y := -shared -soname=linux-vdso64.so.1 \
|
|
--hash-style=both --build-id=sha1 -T
|
|
|
|
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
|
|
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
|
|
index 9436f3053b88c..003c926a0f4de 100644
|
|
--- a/arch/s390/kernel/vtime.c
|
|
+++ b/arch/s390/kernel/vtime.c
|
|
@@ -210,13 +210,13 @@ void vtime_flush(struct task_struct *tsk)
|
|
virt_timer_expire();
|
|
|
|
steal = S390_lowcore.steal_timer;
|
|
- avg_steal = S390_lowcore.avg_steal_timer / 2;
|
|
+ avg_steal = S390_lowcore.avg_steal_timer;
|
|
if ((s64) steal > 0) {
|
|
S390_lowcore.steal_timer = 0;
|
|
account_steal_time(cputime_to_nsecs(steal));
|
|
avg_steal += steal;
|
|
}
|
|
- S390_lowcore.avg_steal_timer = avg_steal;
|
|
+ S390_lowcore.avg_steal_timer = avg_steal / 2;
|
|
}
|
|
|
|
static u64 vtime_delta(void)
|
|
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
|
|
index e6935d0ac1ec9..c32590bdd3120 100644
|
|
--- a/arch/sparc/kernel/leon_pci_grpci1.c
|
|
+++ b/arch/sparc/kernel/leon_pci_grpci1.c
|
|
@@ -696,7 +696,7 @@ static int grpci1_of_probe(struct platform_device *ofdev)
|
|
return err;
|
|
}
|
|
|
|
-static const struct of_device_id grpci1_of_match[] __initconst = {
|
|
+static const struct of_device_id grpci1_of_match[] = {
|
|
{
|
|
.name = "GAISLER_PCIFBRG",
|
|
},
|
|
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
|
|
index ca22f93d90454..dd06abc61657f 100644
|
|
--- a/arch/sparc/kernel/leon_pci_grpci2.c
|
|
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
|
|
@@ -887,7 +887,7 @@ static int grpci2_of_probe(struct platform_device *ofdev)
|
|
return err;
|
|
}
|
|
|
|
-static const struct of_device_id grpci2_of_match[] __initconst = {
|
|
+static const struct of_device_id grpci2_of_match[] = {
|
|
{
|
|
.name = "GAISLER_GRPCI2",
|
|
},
|
|
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
|
|
index 04f4b96dec6df..fd091b9dd7067 100644
|
|
--- a/arch/x86/events/amd/core.c
|
|
+++ b/arch/x86/events/amd/core.c
|
|
@@ -604,7 +604,6 @@ static void amd_pmu_cpu_dead(int cpu)
|
|
|
|
kfree(cpuhw->lbr_sel);
|
|
cpuhw->lbr_sel = NULL;
|
|
- amd_pmu_cpu_reset(cpu);
|
|
|
|
if (!x86_pmu.amd_nb_constraints)
|
|
return;
|
|
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
|
|
index ab60a71a8dcb9..472f0263dbc61 100644
|
|
--- a/arch/x86/include/asm/vsyscall.h
|
|
+++ b/arch/x86/include/asm/vsyscall.h
|
|
@@ -4,6 +4,7 @@
|
|
|
|
#include <linux/seqlock.h>
|
|
#include <uapi/asm/vsyscall.h>
|
|
+#include <asm/page_types.h>
|
|
|
|
#ifdef CONFIG_X86_VSYSCALL_EMULATION
|
|
extern void map_vsyscall(void);
|
|
@@ -24,4 +25,13 @@ static inline bool emulate_vsyscall(unsigned long error_code,
|
|
}
|
|
#endif
|
|
|
|
+/*
|
|
+ * The (legacy) vsyscall page is the long page in the kernel portion
|
|
+ * of the address space that has user-accessible permissions.
|
|
+ */
|
|
+static inline bool is_vsyscall_vaddr(unsigned long vaddr)
|
|
+{
|
|
+ return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
|
|
+}
|
|
+
|
|
#endif /* _ASM_X86_VSYSCALL_H */
|
|
diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c
|
|
index 8d8752b44f113..ff8f25faca3dd 100644
|
|
--- a/arch/x86/kernel/acpi/cppc.c
|
|
+++ b/arch/x86/kernel/acpi/cppc.c
|
|
@@ -20,7 +20,7 @@ bool cpc_supported_by_cpu(void)
|
|
(boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
|
|
return true;
|
|
else if (boot_cpu_data.x86 == 0x17 &&
|
|
- boot_cpu_data.x86_model >= 0x70 && boot_cpu_data.x86_model <= 0x7f)
|
|
+ boot_cpu_data.x86_model >= 0x30 && boot_cpu_data.x86_model <= 0x7f)
|
|
return true;
|
|
return boot_cpu_has(X86_FEATURE_CPPC);
|
|
}
|
|
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
|
|
index 0b5c6c76f6f7b..4761d489a117a 100644
|
|
--- a/arch/x86/kernel/cpu/resctrl/internal.h
|
|
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
|
|
@@ -281,14 +281,10 @@ struct rftype {
|
|
* struct mbm_state - status for each MBM counter in each domain
|
|
* @prev_bw_bytes: Previous bytes value read for bandwidth calculation
|
|
* @prev_bw: The most recent bandwidth in MBps
|
|
- * @delta_bw: Difference between the current and previous bandwidth
|
|
- * @delta_comp: Indicates whether to compute the delta_bw
|
|
*/
|
|
struct mbm_state {
|
|
u64 prev_bw_bytes;
|
|
u32 prev_bw;
|
|
- u32 delta_bw;
|
|
- bool delta_comp;
|
|
};
|
|
|
|
/**
|
|
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
|
|
index 77538abeb72af..b9adb707750c6 100644
|
|
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
|
|
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
|
|
@@ -428,9 +428,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
|
|
|
|
cur_bw = bytes / SZ_1M;
|
|
|
|
- if (m->delta_comp)
|
|
- m->delta_bw = abs(cur_bw - m->prev_bw);
|
|
- m->delta_comp = false;
|
|
m->prev_bw = cur_bw;
|
|
}
|
|
|
|
@@ -508,11 +505,11 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
|
{
|
|
u32 closid, rmid, cur_msr_val, new_msr_val;
|
|
struct mbm_state *pmbm_data, *cmbm_data;
|
|
- u32 cur_bw, delta_bw, user_bw;
|
|
struct rdt_resource *r_mba;
|
|
struct rdt_domain *dom_mba;
|
|
struct list_head *head;
|
|
struct rdtgroup *entry;
|
|
+ u32 cur_bw, user_bw;
|
|
|
|
if (!is_mbm_local_enabled())
|
|
return;
|
|
@@ -531,7 +528,6 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
|
|
|
cur_bw = pmbm_data->prev_bw;
|
|
user_bw = dom_mba->mbps_val[closid];
|
|
- delta_bw = pmbm_data->delta_bw;
|
|
|
|
/* MBA resource doesn't support CDP */
|
|
cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
|
|
@@ -543,49 +539,31 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
|
list_for_each_entry(entry, head, mon.crdtgrp_list) {
|
|
cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
|
|
cur_bw += cmbm_data->prev_bw;
|
|
- delta_bw += cmbm_data->delta_bw;
|
|
}
|
|
|
|
/*
|
|
* Scale up/down the bandwidth linearly for the ctrl group. The
|
|
* bandwidth step is the bandwidth granularity specified by the
|
|
* hardware.
|
|
- *
|
|
- * The delta_bw is used when increasing the bandwidth so that we
|
|
- * dont alternately increase and decrease the control values
|
|
- * continuously.
|
|
- *
|
|
- * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
|
|
- * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
|
|
- * switching between 90 and 110 continuously if we only check
|
|
- * cur_bw < user_bw.
|
|
+ * Always increase throttling if current bandwidth is above the
|
|
+ * target set by user.
|
|
+ * But avoid thrashing up and down on every poll by checking
|
|
+ * whether a decrease in throttling is likely to push the group
|
|
+ * back over target. E.g. if currently throttling to 30% of bandwidth
|
|
+ * on a system with 10% granularity steps, check whether moving to
|
|
+ * 40% would go past the limit by multiplying current bandwidth by
|
|
+ * "(30 + 10) / 30".
|
|
*/
|
|
if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
|
|
new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
|
|
} else if (cur_msr_val < MAX_MBA_BW &&
|
|
- (user_bw > (cur_bw + delta_bw))) {
|
|
+ (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
|
|
new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
|
|
} else {
|
|
return;
|
|
}
|
|
|
|
resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
|
|
-
|
|
- /*
|
|
- * Delta values are updated dynamically package wise for each
|
|
- * rdtgrp every time the throttle MSR changes value.
|
|
- *
|
|
- * This is because (1)the increase in bandwidth is not perfectly
|
|
- * linear and only "approximately" linear even when the hardware
|
|
- * says it is linear.(2)Also since MBA is a core specific
|
|
- * mechanism, the delta values vary based on number of cores used
|
|
- * by the rdtgrp.
|
|
- */
|
|
- pmbm_data->delta_comp = true;
|
|
- list_for_each_entry(entry, head, mon.crdtgrp_list) {
|
|
- cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
|
|
- cmbm_data->delta_comp = true;
|
|
- }
|
|
}
|
|
|
|
static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
|
|
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
|
|
index 1dbbad73192a1..f20636510eb1e 100644
|
|
--- a/arch/x86/mm/fault.c
|
|
+++ b/arch/x86/mm/fault.c
|
|
@@ -818,15 +818,6 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
|
|
show_opcodes(regs, loglvl);
|
|
}
|
|
|
|
-/*
|
|
- * The (legacy) vsyscall page is the long page in the kernel portion
|
|
- * of the address space that has user-accessible permissions.
|
|
- */
|
|
-static bool is_vsyscall_vaddr(unsigned long vaddr)
|
|
-{
|
|
- return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
|
|
-}
|
|
-
|
|
static void
|
|
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
|
|
unsigned long address, u32 pkey, int si_code)
|
|
diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
|
|
index 6993f026adec9..42115ac079cfe 100644
|
|
--- a/arch/x86/mm/maccess.c
|
|
+++ b/arch/x86/mm/maccess.c
|
|
@@ -3,6 +3,8 @@
|
|
#include <linux/uaccess.h>
|
|
#include <linux/kernel.h>
|
|
|
|
+#include <asm/vsyscall.h>
|
|
+
|
|
#ifdef CONFIG_X86_64
|
|
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
|
|
{
|
|
@@ -15,6 +17,14 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
|
|
if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
|
|
return false;
|
|
|
|
+ /*
|
|
+ * Reading from the vsyscall page may cause an unhandled fault in
|
|
+ * certain cases. Though it is at an address above TASK_SIZE_MAX, it is
|
|
+ * usually considered as a user space address.
|
|
+ */
|
|
+ if (is_vsyscall_vaddr(vaddr))
|
|
+ return false;
|
|
+
|
|
/*
|
|
* Allow everything during early boot before 'x86_virt_bits'
|
|
* is initialized. Needed for instruction decoding in early
|
|
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
|
|
index d94ebd8acdfde..a11a6ebbf5ecf 100644
|
|
--- a/arch/x86/mm/mem_encrypt_identity.c
|
|
+++ b/arch/x86/mm/mem_encrypt_identity.c
|
|
@@ -507,7 +507,6 @@ void __init sme_enable(struct boot_params *bp)
|
|
const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
|
|
unsigned int eax, ebx, ecx, edx;
|
|
unsigned long feature_mask;
|
|
- bool active_by_default;
|
|
unsigned long me_mask;
|
|
char buffer[16];
|
|
bool snp;
|
|
@@ -593,22 +592,19 @@ void __init sme_enable(struct boot_params *bp)
|
|
: "p" (sme_cmdline_off));
|
|
|
|
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
|
|
- active_by_default = true;
|
|
- else
|
|
- active_by_default = false;
|
|
+ sme_me_mask = me_mask;
|
|
|
|
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
|
|
((u64)bp->ext_cmd_line_ptr << 32));
|
|
|
|
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
|
|
- return;
|
|
+ goto out;
|
|
|
|
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
|
|
sme_me_mask = me_mask;
|
|
else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
|
|
sme_me_mask = 0;
|
|
- else
|
|
- sme_me_mask = active_by_default ? me_mask : 0;
|
|
+
|
|
out:
|
|
if (sme_me_mask) {
|
|
physical_mask &= ~sme_me_mask;
|
|
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
|
|
index 2925074b9a588..9a5b101c45023 100644
|
|
--- a/arch/x86/tools/relocs.c
|
|
+++ b/arch/x86/tools/relocs.c
|
|
@@ -653,6 +653,14 @@ static void print_absolute_relocs(void)
|
|
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
|
|
continue;
|
|
}
|
|
+ /*
|
|
+ * Do not perform relocations in .notes section; any
|
|
+ * values there are meant for pre-boot consumption (e.g.
|
|
+ * startup_xen).
|
|
+ */
|
|
+ if (sec_applies->shdr.sh_type == SHT_NOTE) {
|
|
+ continue;
|
|
+ }
|
|
sh_symtab = sec_symtab->symtab;
|
|
sym_strtab = sec_symtab->link->strtab;
|
|
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
|
|
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
|
|
index 4b0d6fff88de5..1fb9a1644d944 100644
|
|
--- a/arch/x86/xen/smp.c
|
|
+++ b/arch/x86/xen/smp.c
|
|
@@ -65,6 +65,8 @@ int xen_smp_intr_init(unsigned int cpu)
|
|
char *resched_name, *callfunc_name, *debug_name;
|
|
|
|
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
|
|
+ if (!resched_name)
|
|
+ goto fail_mem;
|
|
per_cpu(xen_resched_irq, cpu).name = resched_name;
|
|
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
|
|
cpu,
|
|
@@ -77,6 +79,8 @@ int xen_smp_intr_init(unsigned int cpu)
|
|
per_cpu(xen_resched_irq, cpu).irq = rc;
|
|
|
|
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
|
|
+ if (!callfunc_name)
|
|
+ goto fail_mem;
|
|
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
|
|
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
|
|
cpu,
|
|
@@ -90,6 +94,9 @@ int xen_smp_intr_init(unsigned int cpu)
|
|
|
|
if (!xen_fifo_events) {
|
|
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
|
|
+ if (!debug_name)
|
|
+ goto fail_mem;
|
|
+
|
|
per_cpu(xen_debug_irq, cpu).name = debug_name;
|
|
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
|
|
xen_debug_interrupt,
|
|
@@ -101,6 +108,9 @@ int xen_smp_intr_init(unsigned int cpu)
|
|
}
|
|
|
|
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
|
|
+ if (!callfunc_name)
|
|
+ goto fail_mem;
|
|
+
|
|
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
|
|
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
|
|
cpu,
|
|
@@ -114,6 +124,8 @@ int xen_smp_intr_init(unsigned int cpu)
|
|
|
|
return 0;
|
|
|
|
+ fail_mem:
|
|
+ rc = -ENOMEM;
|
|
fail:
|
|
xen_smp_intr_free(cpu);
|
|
return rc;
|
|
diff --git a/block/opal_proto.h b/block/opal_proto.h
|
|
index 7152aa1f1a49e..7f306b08a0fe7 100644
|
|
--- a/block/opal_proto.h
|
|
+++ b/block/opal_proto.h
|
|
@@ -71,6 +71,7 @@ enum opal_response_token {
|
|
#define SHORT_ATOM_BYTE 0xBF
|
|
#define MEDIUM_ATOM_BYTE 0xDF
|
|
#define LONG_ATOM_BYTE 0xE3
|
|
+#define EMPTY_ATOM_BYTE 0xFF
|
|
|
|
#define OPAL_INVAL_PARAM 12
|
|
#define OPAL_MANUFACTURED_INACTIVE 0x08
|
|
diff --git a/block/sed-opal.c b/block/sed-opal.c
|
|
index 9bdb833e5817d..25e4ce452c1d3 100644
|
|
--- a/block/sed-opal.c
|
|
+++ b/block/sed-opal.c
|
|
@@ -935,16 +935,20 @@ static int response_parse(const u8 *buf, size_t length,
|
|
token_length = response_parse_medium(iter, pos);
|
|
else if (pos[0] <= LONG_ATOM_BYTE) /* long atom */
|
|
token_length = response_parse_long(iter, pos);
|
|
+ else if (pos[0] == EMPTY_ATOM_BYTE) /* empty atom */
|
|
+ token_length = 1;
|
|
else /* TOKEN */
|
|
token_length = response_parse_token(iter, pos);
|
|
|
|
if (token_length < 0)
|
|
return token_length;
|
|
|
|
+ if (pos[0] != EMPTY_ATOM_BYTE)
|
|
+ num_entries++;
|
|
+
|
|
pos += token_length;
|
|
total -= token_length;
|
|
iter++;
|
|
- num_entries++;
|
|
}
|
|
|
|
resp->num = num_entries;
|
|
diff --git a/crypto/Kconfig b/crypto/Kconfig
|
|
index d779667671b23..edf193aff23e7 100644
|
|
--- a/crypto/Kconfig
|
|
+++ b/crypto/Kconfig
|
|
@@ -1285,10 +1285,11 @@ config CRYPTO_JITTERENTROPY
|
|
|
|
A non-physical non-deterministic ("true") RNG (e.g., an entropy source
|
|
compliant with NIST SP800-90B) intended to provide a seed to a
|
|
- deterministic RNG (e.g. per NIST SP800-90C).
|
|
+ deterministic RNG (e.g., per NIST SP800-90C).
|
|
This RNG does not perform any cryptographic whitening of the generated
|
|
+ random numbers.
|
|
|
|
- See https://www.chronox.de/jent.html
|
|
+ See https://www.chronox.de/jent/
|
|
|
|
config CRYPTO_KDF800108_CTR
|
|
tristate
|
|
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
|
|
index fc5b5b2c9e819..6f613eef28879 100644
|
|
--- a/drivers/acpi/processor_idle.c
|
|
+++ b/drivers/acpi/processor_idle.c
|
|
@@ -1431,6 +1431,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr)
|
|
acpi_processor_registered--;
|
|
if (acpi_processor_registered == 0)
|
|
cpuidle_unregister_driver(&acpi_idle_driver);
|
|
+
|
|
+ kfree(dev);
|
|
}
|
|
|
|
pr->flags.power_setup_done = 0;
|
|
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
|
|
index 5ebeb0d7b6be0..1c5c1a269fbee 100644
|
|
--- a/drivers/acpi/resource.c
|
|
+++ b/drivers/acpi/resource.c
|
|
@@ -543,6 +543,39 @@ static const struct dmi_system_id lg_laptop[] = {
|
|
DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
|
|
},
|
|
},
|
|
+ {
|
|
+ /* Infinity E15-5A165-BM */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_BOARD_NAME, "GM5RG1E0009COM"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ /* Infinity E15-5A305-1M */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_BOARD_NAME, "GM5RGEE0016COM"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ /* Lunnen Ground 15 / AMD Ryzen 5 5500U */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"),
|
|
+ DMI_MATCH(DMI_BOARD_NAME, "LLL5DAW"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ /* Lunnen Ground 16 / AMD Ryzen 7 5800U */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"),
|
|
+ DMI_MATCH(DMI_BOARD_NAME, "LL6FA"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ /* MAIBENBEN X577 */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "MAIBENBEN"),
|
|
+ DMI_MATCH(DMI_BOARD_NAME, "X577"),
|
|
+ },
|
|
+ },
|
|
{ }
|
|
};
|
|
|
|
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
|
|
index 94154a849a3ea..293cdf486fd81 100644
|
|
--- a/drivers/acpi/scan.c
|
|
+++ b/drivers/acpi/scan.c
|
|
@@ -315,18 +315,14 @@ static int acpi_scan_device_check(struct acpi_device *adev)
|
|
* again).
|
|
*/
|
|
if (adev->handler) {
|
|
- dev_warn(&adev->dev, "Already enumerated\n");
|
|
- return -EALREADY;
|
|
+ dev_dbg(&adev->dev, "Already enumerated\n");
|
|
+ return 0;
|
|
}
|
|
error = acpi_bus_scan(adev->handle);
|
|
if (error) {
|
|
dev_warn(&adev->dev, "Namespace scan failure\n");
|
|
return error;
|
|
}
|
|
- if (!adev->handler) {
|
|
- dev_warn(&adev->dev, "Enumeration failure\n");
|
|
- error = -ENODEV;
|
|
- }
|
|
} else {
|
|
error = acpi_scan_device_not_present(adev);
|
|
}
|
|
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
|
|
index d7317425be510..cc9077b588d7e 100644
|
|
--- a/drivers/block/aoe/aoecmd.c
|
|
+++ b/drivers/block/aoe/aoecmd.c
|
|
@@ -419,13 +419,16 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
|
|
rcu_read_lock();
|
|
for_each_netdev_rcu(&init_net, ifp) {
|
|
dev_hold(ifp);
|
|
- if (!is_aoe_netif(ifp))
|
|
- goto cont;
|
|
+ if (!is_aoe_netif(ifp)) {
|
|
+ dev_put(ifp);
|
|
+ continue;
|
|
+ }
|
|
|
|
skb = new_skb(sizeof *h + sizeof *ch);
|
|
if (skb == NULL) {
|
|
printk(KERN_INFO "aoe: skb alloc failure\n");
|
|
- goto cont;
|
|
+ dev_put(ifp);
|
|
+ continue;
|
|
}
|
|
skb_put(skb, sizeof *h + sizeof *ch);
|
|
skb->dev = ifp;
|
|
@@ -440,9 +443,6 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
|
|
h->major = cpu_to_be16(aoemajor);
|
|
h->minor = aoeminor;
|
|
h->cmd = AOECMD_CFG;
|
|
-
|
|
-cont:
|
|
- dev_put(ifp);
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
|
|
index 63773a90581dd..1e66c7a188a12 100644
|
|
--- a/drivers/block/aoe/aoenet.c
|
|
+++ b/drivers/block/aoe/aoenet.c
|
|
@@ -64,6 +64,7 @@ tx(int id) __must_hold(&txlock)
|
|
pr_warn("aoe: packet could not be sent on %s. %s\n",
|
|
ifp ? ifp->name : "netif",
|
|
"consider increasing tx_queue_len");
|
|
+ dev_put(ifp);
|
|
spin_lock_irq(&txlock);
|
|
}
|
|
return 0;
|
|
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
|
|
index 9a53165de4cef..5c4be8dda253c 100644
|
|
--- a/drivers/block/nbd.c
|
|
+++ b/drivers/block/nbd.c
|
|
@@ -2408,6 +2408,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
|
|
}
|
|
|
|
dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
|
|
+ if (!dev_list) {
|
|
+ nlmsg_free(reply);
|
|
+ ret = -EMSGSIZE;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
if (index == -1) {
|
|
ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
|
|
if (ret) {
|
|
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
|
|
index 8bfef7f81b417..2acda547f4f3e 100644
|
|
--- a/drivers/bluetooth/hci_qca.c
|
|
+++ b/drivers/bluetooth/hci_qca.c
|
|
@@ -2254,7 +2254,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
|
|
|
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
|
|
GPIOD_OUT_LOW);
|
|
- if (IS_ERR_OR_NULL(qcadev->bt_en) &&
|
|
+ if (IS_ERR(qcadev->bt_en) &&
|
|
(data->soc_type == QCA_WCN6750 ||
|
|
data->soc_type == QCA_WCN6855)) {
|
|
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
|
|
@@ -2263,7 +2263,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
|
|
|
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
|
|
GPIOD_IN);
|
|
- if (IS_ERR_OR_NULL(qcadev->sw_ctrl) &&
|
|
+ if (IS_ERR(qcadev->sw_ctrl) &&
|
|
(data->soc_type == QCA_WCN6750 ||
|
|
data->soc_type == QCA_WCN6855 ||
|
|
data->soc_type == QCA_WCN7850))
|
|
@@ -2285,7 +2285,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
|
default:
|
|
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
|
|
GPIOD_OUT_LOW);
|
|
- if (IS_ERR_OR_NULL(qcadev->bt_en)) {
|
|
+ if (IS_ERR(qcadev->bt_en)) {
|
|
dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
|
|
power_ctrl_enabled = false;
|
|
}
|
|
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
|
|
index 7bfe998f3514a..bdc7633905504 100644
|
|
--- a/drivers/bus/Kconfig
|
|
+++ b/drivers/bus/Kconfig
|
|
@@ -186,11 +186,12 @@ config SUNXI_RSB
|
|
|
|
config TEGRA_ACONNECT
|
|
tristate "Tegra ACONNECT Bus Driver"
|
|
- depends on ARCH_TEGRA_210_SOC
|
|
+ depends on ARCH_TEGRA
|
|
depends on OF && PM
|
|
help
|
|
Driver for the Tegra ACONNECT bus which is used to interface with
|
|
- the devices inside the Audio Processing Engine (APE) for Tegra210.
|
|
+ the devices inside the Audio Processing Engine (APE) for
|
|
+ Tegra210 and later.
|
|
|
|
config TEGRA_GMI
|
|
tristate "Tegra Generic Memory Interface bus driver"
|
|
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
|
|
index 33fedbd096f33..9004e07182259 100644
|
|
--- a/drivers/clk/clk.c
|
|
+++ b/drivers/clk/clk.c
|
|
@@ -407,6 +407,9 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
|
|
if (IS_ERR(hw))
|
|
return ERR_CAST(hw);
|
|
|
|
+ if (!hw)
|
|
+ return NULL;
|
|
+
|
|
return hw->core;
|
|
}
|
|
|
|
diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
|
|
index ad0c7f350cf03..60d8a27a90824 100644
|
|
--- a/drivers/clk/hisilicon/clk-hi3519.c
|
|
+++ b/drivers/clk/hisilicon/clk-hi3519.c
|
|
@@ -130,7 +130,7 @@ static void hi3519_clk_unregister(struct platform_device *pdev)
|
|
of_clk_del_provider(pdev->dev.of_node);
|
|
|
|
hisi_clk_unregister_gate(hi3519_gate_clks,
|
|
- ARRAY_SIZE(hi3519_mux_clks),
|
|
+ ARRAY_SIZE(hi3519_gate_clks),
|
|
crg->clk_data);
|
|
hisi_clk_unregister_mux(hi3519_mux_clks,
|
|
ARRAY_SIZE(hi3519_mux_clks),
|
|
diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
|
|
index 9ea1a80acbe8b..0272276550ff1 100644
|
|
--- a/drivers/clk/hisilicon/clk-hi3559a.c
|
|
+++ b/drivers/clk/hisilicon/clk-hi3559a.c
|
|
@@ -491,7 +491,6 @@ static void hisi_clk_register_pll(struct hi3559av100_pll_clock *clks,
|
|
|
|
clk = clk_register(NULL, &p_clk->hw);
|
|
if (IS_ERR(clk)) {
|
|
- devm_kfree(dev, p_clk);
|
|
dev_err(dev, "%s: failed to register clock %s\n",
|
|
__func__, clks[i].name);
|
|
continue;
|
|
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
|
|
index 2ad3801398dc1..7802dabb26f6d 100644
|
|
--- a/drivers/clk/meson/axg.c
|
|
+++ b/drivers/clk/meson/axg.c
|
|
@@ -2144,7 +2144,9 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
|
|
&axg_vclk_input,
|
|
&axg_vclk2_input,
|
|
&axg_vclk_div,
|
|
+ &axg_vclk_div1,
|
|
&axg_vclk2_div,
|
|
+ &axg_vclk2_div1,
|
|
&axg_vclk_div2_en,
|
|
&axg_vclk_div4_en,
|
|
&axg_vclk_div6_en,
|
|
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
|
|
index 735adfefc3798..e792e0b130d33 100644
|
|
--- a/drivers/clk/qcom/dispcc-sdm845.c
|
|
+++ b/drivers/clk/qcom/dispcc-sdm845.c
|
|
@@ -759,6 +759,8 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
|
|
|
|
static struct gdsc mdss_gdsc = {
|
|
.gdscr = 0x3000,
|
|
+ .en_few_wait_val = 0x6,
|
|
+ .en_rest_wait_val = 0x5,
|
|
.pd = {
|
|
.name = "mdss_gdsc",
|
|
},
|
|
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
|
|
index e45e32804d2c7..d96c96a9089f4 100644
|
|
--- a/drivers/clk/qcom/reset.c
|
|
+++ b/drivers/clk/qcom/reset.c
|
|
@@ -22,8 +22,8 @@ static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
|
|
return 0;
|
|
}
|
|
|
|
-static int
|
|
-qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
|
|
+static int qcom_reset_set_assert(struct reset_controller_dev *rcdev,
|
|
+ unsigned long id, bool assert)
|
|
{
|
|
struct qcom_reset_controller *rst;
|
|
const struct qcom_reset_map *map;
|
|
@@ -33,21 +33,22 @@ qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
|
|
map = &rst->reset_map[id];
|
|
mask = map->bitmask ? map->bitmask : BIT(map->bit);
|
|
|
|
- return regmap_update_bits(rst->regmap, map->reg, mask, mask);
|
|
+ regmap_update_bits(rst->regmap, map->reg, mask, assert ? mask : 0);
|
|
+
|
|
+ /* Read back the register to ensure write completion, ignore the value */
|
|
+ regmap_read(rst->regmap, map->reg, &mask);
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
-static int
|
|
-qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
|
|
+static int qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
|
|
{
|
|
- struct qcom_reset_controller *rst;
|
|
- const struct qcom_reset_map *map;
|
|
- u32 mask;
|
|
-
|
|
- rst = to_qcom_reset_controller(rcdev);
|
|
- map = &rst->reset_map[id];
|
|
- mask = map->bitmask ? map->bitmask : BIT(map->bit);
|
|
+ return qcom_reset_set_assert(rcdev, id, true);
|
|
+}
|
|
|
|
- return regmap_update_bits(rst->regmap, map->reg, mask, 0);
|
|
+static int qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
|
|
+{
|
|
+ return qcom_reset_set_assert(rcdev, id, false);
|
|
}
|
|
|
|
const struct reset_control_ops qcom_reset_ops = {
|
|
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
|
|
index 27b668def357f..7a49b91c93710 100644
|
|
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
|
|
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
|
|
@@ -159,7 +159,7 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
|
|
DEF_MOD("cmt1", 911, R8A779F0_CLK_R),
|
|
DEF_MOD("cmt2", 912, R8A779F0_CLK_R),
|
|
DEF_MOD("cmt3", 913, R8A779F0_CLK_R),
|
|
- DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M),
|
|
+ DEF_MOD("pfc0", 915, R8A779F0_CLK_CPEX),
|
|
DEF_MOD("tsc", 919, R8A779F0_CLK_CL16M),
|
|
DEF_MOD("ufs", 1514, R8A779F0_CLK_S0D4_HSC),
|
|
};
|
|
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
|
|
index d5b325e3c5398..e4c616921e5ea 100644
|
|
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
|
|
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
|
|
@@ -22,7 +22,7 @@
|
|
|
|
enum clk_ids {
|
|
/* Core Clock Outputs exported to DT */
|
|
- LAST_DT_CORE_CLK = R8A779G0_CLK_R,
|
|
+ LAST_DT_CORE_CLK = R8A779G0_CLK_CP,
|
|
|
|
/* External Input Clocks */
|
|
CLK_EXTAL,
|
|
@@ -139,6 +139,7 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
|
|
DEF_FIXED("svd2_vip", R8A779G0_CLK_SVD2_VIP, CLK_SV_VIP, 2, 1),
|
|
DEF_FIXED("cbfusa", R8A779G0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
|
|
DEF_FIXED("cpex", R8A779G0_CLK_CPEX, CLK_EXTAL, 2, 1),
|
|
+ DEF_FIXED("cp", R8A779G0_CLK_CP, CLK_EXTAL, 2, 1),
|
|
DEF_FIXED("viobus", R8A779G0_CLK_VIOBUS, CLK_VIO, 1, 1),
|
|
DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1),
|
|
DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1),
|
|
@@ -169,10 +170,17 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
|
|
DEF_MOD("i2c4", 522, R8A779G0_CLK_S0D6_PER),
|
|
DEF_MOD("i2c5", 523, R8A779G0_CLK_S0D6_PER),
|
|
DEF_MOD("wdt1:wdt0", 907, R8A779G0_CLK_R),
|
|
- DEF_MOD("pfc0", 915, R8A779G0_CLK_CL16M),
|
|
- DEF_MOD("pfc1", 916, R8A779G0_CLK_CL16M),
|
|
- DEF_MOD("pfc2", 917, R8A779G0_CLK_CL16M),
|
|
- DEF_MOD("pfc3", 918, R8A779G0_CLK_CL16M),
|
|
+ DEF_MOD("cmt0", 910, R8A779G0_CLK_R),
|
|
+ DEF_MOD("cmt1", 911, R8A779G0_CLK_R),
|
|
+ DEF_MOD("cmt2", 912, R8A779G0_CLK_R),
|
|
+ DEF_MOD("cmt3", 913, R8A779G0_CLK_R),
|
|
+ DEF_MOD("pfc0", 915, R8A779G0_CLK_CP),
|
|
+ DEF_MOD("pfc1", 916, R8A779G0_CLK_CP),
|
|
+ DEF_MOD("pfc2", 917, R8A779G0_CLK_CP),
|
|
+ DEF_MOD("pfc3", 918, R8A779G0_CLK_CP),
|
|
+ DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M),
|
|
+ DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER),
|
|
+ DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER),
|
|
};
|
|
|
|
/*
|
|
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
|
|
index 541761e96aeb6..87e463ad42741 100644
|
|
--- a/drivers/clk/samsung/clk-exynos850.c
|
|
+++ b/drivers/clk/samsung/clk-exynos850.c
|
|
@@ -572,7 +572,7 @@ static const struct samsung_div_clock apm_div_clks[] __initconst = {
|
|
|
|
static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
|
|
GATE(CLK_GOUT_CLKCMU_CMGP_BUS, "gout_clkcmu_cmgp_bus", "dout_apm_bus",
|
|
- CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, 0, 0),
|
|
+ CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, CLK_SET_RATE_PARENT, 0),
|
|
GATE(CLK_GOUT_CLKCMU_CHUB_BUS, "gout_clkcmu_chub_bus",
|
|
"mout_clkcmu_chub_bus",
|
|
CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS, 21, 0, 0),
|
|
@@ -936,19 +936,19 @@ static const struct samsung_fixed_rate_clock cmgp_fixed_clks[] __initconst = {
|
|
static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = {
|
|
MUX(CLK_MOUT_CMGP_ADC, "mout_cmgp_adc", mout_cmgp_adc_p,
|
|
CLK_CON_MUX_CLK_CMGP_ADC, 0, 1),
|
|
- MUX(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
|
|
- CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1),
|
|
- MUX(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
|
|
- CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1),
|
|
+ MUX_F(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
|
|
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1, CLK_SET_RATE_PARENT, 0),
|
|
+ MUX_F(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
|
|
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1, CLK_SET_RATE_PARENT, 0),
|
|
};
|
|
|
|
static const struct samsung_div_clock cmgp_div_clks[] __initconst = {
|
|
DIV(CLK_DOUT_CMGP_ADC, "dout_cmgp_adc", "gout_clkcmu_cmgp_bus",
|
|
CLK_CON_DIV_DIV_CLK_CMGP_ADC, 0, 4),
|
|
- DIV(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
|
|
- CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5),
|
|
- DIV(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
|
|
- CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5),
|
|
+ DIV_F(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
|
|
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5, CLK_SET_RATE_PARENT, 0),
|
|
+ DIV_F(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
|
|
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5, CLK_SET_RATE_PARENT, 0),
|
|
};
|
|
|
|
static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
|
|
@@ -963,12 +963,12 @@ static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
|
|
"gout_clkcmu_cmgp_bus",
|
|
CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK, 21, CLK_IGNORE_UNUSED, 0),
|
|
GATE(CLK_GOUT_CMGP_USI0_IPCLK, "gout_cmgp_usi0_ipclk", "dout_cmgp_usi0",
|
|
- CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, 0, 0),
|
|
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
|
|
GATE(CLK_GOUT_CMGP_USI0_PCLK, "gout_cmgp_usi0_pclk",
|
|
"gout_clkcmu_cmgp_bus",
|
|
CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK, 21, 0, 0),
|
|
GATE(CLK_GOUT_CMGP_USI1_IPCLK, "gout_cmgp_usi1_ipclk", "dout_cmgp_usi1",
|
|
- CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, 0, 0),
|
|
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
|
|
GATE(CLK_GOUT_CMGP_USI1_PCLK, "gout_cmgp_usi1_pclk",
|
|
"gout_clkcmu_cmgp_bus",
|
|
CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK, 21, 0, 0),
|
|
@@ -1409,8 +1409,9 @@ static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
|
|
mout_peri_uart_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART_USER, 4, 1),
|
|
MUX(CLK_MOUT_PERI_HSI2C_USER, "mout_peri_hsi2c_user",
|
|
mout_peri_hsi2c_user_p, PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER, 4, 1),
|
|
- MUX(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user", mout_peri_spi_user_p,
|
|
- PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1),
|
|
+ MUX_F(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user",
|
|
+ mout_peri_spi_user_p, PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1,
|
|
+ CLK_SET_RATE_PARENT, 0),
|
|
};
|
|
|
|
static const struct samsung_div_clock peri_div_clks[] __initconst = {
|
|
@@ -1420,8 +1421,8 @@ static const struct samsung_div_clock peri_div_clks[] __initconst = {
|
|
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1, 0, 5),
|
|
DIV(CLK_DOUT_PERI_HSI2C2, "dout_peri_hsi2c2", "gout_peri_hsi2c2",
|
|
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2, 0, 5),
|
|
- DIV(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
|
|
- CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5),
|
|
+ DIV_F(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
|
|
+ CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5, CLK_SET_RATE_PARENT, 0),
|
|
};
|
|
|
|
static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
|
|
@@ -1463,7 +1464,7 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
|
|
"mout_peri_bus_user",
|
|
CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0),
|
|
GATE(CLK_GOUT_SPI0_IPCLK, "gout_spi0_ipclk", "dout_peri_spi0",
|
|
- CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, 0, 0),
|
|
+ CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
|
|
GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user",
|
|
CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0),
|
|
GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk",
|
|
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
|
|
index 7bdeaff2bfd68..c28d3dacf0fb2 100644
|
|
--- a/drivers/clk/zynq/clkc.c
|
|
+++ b/drivers/clk/zynq/clkc.c
|
|
@@ -42,6 +42,7 @@ static void __iomem *zynq_clkc_base;
|
|
#define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204)
|
|
|
|
#define NUM_MIO_PINS 54
|
|
+#define CLK_NAME_LEN 16
|
|
|
|
#define DBG_CLK_CTRL_CLKACT_TRC BIT(0)
|
|
#define DBG_CLK_CTRL_CPU_1XCLKACT BIT(1)
|
|
@@ -215,7 +216,7 @@ static void __init zynq_clk_setup(struct device_node *np)
|
|
int i;
|
|
u32 tmp;
|
|
int ret;
|
|
- char *clk_name;
|
|
+ char clk_name[CLK_NAME_LEN];
|
|
unsigned int fclk_enable = 0;
|
|
const char *clk_output_name[clk_max];
|
|
const char *cpu_parents[4];
|
|
@@ -426,12 +427,10 @@ static void __init zynq_clk_setup(struct device_node *np)
|
|
"gem1_emio_mux", CLK_SET_RATE_PARENT,
|
|
SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
|
|
|
|
- tmp = strlen("mio_clk_00x");
|
|
- clk_name = kmalloc(tmp, GFP_KERNEL);
|
|
for (i = 0; i < NUM_MIO_PINS; i++) {
|
|
int idx;
|
|
|
|
- snprintf(clk_name, tmp, "mio_clk_%2.2d", i);
|
|
+ snprintf(clk_name, CLK_NAME_LEN, "mio_clk_%2.2d", i);
|
|
idx = of_property_match_string(np, "clock-names", clk_name);
|
|
if (idx >= 0)
|
|
can_mio_mux_parents[i] = of_clk_get_parent_name(np,
|
|
@@ -439,7 +438,6 @@ static void __init zynq_clk_setup(struct device_node *np)
|
|
else
|
|
can_mio_mux_parents[i] = dummy_nm;
|
|
}
|
|
- kfree(clk_name);
|
|
clk_register_mux(NULL, "can_mux", periph_parents, 4,
|
|
CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
|
|
&canclk_lock);
|
|
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
|
|
index 0b5c0af1cebf0..626d53bf9146a 100644
|
|
--- a/drivers/comedi/drivers/comedi_test.c
|
|
+++ b/drivers/comedi/drivers/comedi_test.c
|
|
@@ -85,6 +85,8 @@ struct waveform_private {
|
|
struct comedi_device *dev; /* parent comedi device */
|
|
u64 ao_last_scan_time; /* time of previous AO scan in usec */
|
|
unsigned int ao_scan_period; /* AO scan period in usec */
|
|
+ bool ai_timer_enable:1; /* should AI timer be running? */
|
|
+ bool ao_timer_enable:1; /* should AO timer be running? */
|
|
unsigned short ao_loopbacks[N_CHANS];
|
|
};
|
|
|
|
@@ -234,8 +236,12 @@ static void waveform_ai_timer(struct timer_list *t)
|
|
time_increment = devpriv->ai_convert_time - now;
|
|
else
|
|
time_increment = 1;
|
|
- mod_timer(&devpriv->ai_timer,
|
|
- jiffies + usecs_to_jiffies(time_increment));
|
|
+ spin_lock(&dev->spinlock);
|
|
+ if (devpriv->ai_timer_enable) {
|
|
+ mod_timer(&devpriv->ai_timer,
|
|
+ jiffies + usecs_to_jiffies(time_increment));
|
|
+ }
|
|
+ spin_unlock(&dev->spinlock);
|
|
}
|
|
|
|
overrun:
|
|
@@ -391,9 +397,12 @@ static int waveform_ai_cmd(struct comedi_device *dev,
|
|
* Seem to need an extra jiffy here, otherwise timer expires slightly
|
|
* early!
|
|
*/
|
|
+ spin_lock_bh(&dev->spinlock);
|
|
+ devpriv->ai_timer_enable = true;
|
|
devpriv->ai_timer.expires =
|
|
jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
|
|
add_timer(&devpriv->ai_timer);
|
|
+ spin_unlock_bh(&dev->spinlock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -402,6 +411,9 @@ static int waveform_ai_cancel(struct comedi_device *dev,
|
|
{
|
|
struct waveform_private *devpriv = dev->private;
|
|
|
|
+ spin_lock_bh(&dev->spinlock);
|
|
+ devpriv->ai_timer_enable = false;
|
|
+ spin_unlock_bh(&dev->spinlock);
|
|
if (in_softirq()) {
|
|
/* Assume we were called from the timer routine itself. */
|
|
del_timer(&devpriv->ai_timer);
|
|
@@ -493,8 +505,12 @@ static void waveform_ao_timer(struct timer_list *t)
|
|
unsigned int time_inc = devpriv->ao_last_scan_time +
|
|
devpriv->ao_scan_period - now;
|
|
|
|
- mod_timer(&devpriv->ao_timer,
|
|
- jiffies + usecs_to_jiffies(time_inc));
|
|
+ spin_lock(&dev->spinlock);
|
|
+ if (devpriv->ao_timer_enable) {
|
|
+ mod_timer(&devpriv->ao_timer,
|
|
+ jiffies + usecs_to_jiffies(time_inc));
|
|
+ }
|
|
+ spin_unlock(&dev->spinlock);
|
|
}
|
|
|
|
underrun:
|
|
@@ -515,9 +531,12 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
|
|
async->inttrig = NULL;
|
|
|
|
devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
|
|
+ spin_lock_bh(&dev->spinlock);
|
|
+ devpriv->ao_timer_enable = true;
|
|
devpriv->ao_timer.expires =
|
|
jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
|
|
add_timer(&devpriv->ao_timer);
|
|
+ spin_unlock_bh(&dev->spinlock);
|
|
|
|
return 1;
|
|
}
|
|
@@ -602,6 +621,9 @@ static int waveform_ao_cancel(struct comedi_device *dev,
|
|
struct waveform_private *devpriv = dev->private;
|
|
|
|
s->async->inttrig = NULL;
|
|
+ spin_lock_bh(&dev->spinlock);
|
|
+ devpriv->ao_timer_enable = false;
|
|
+ spin_unlock_bh(&dev->spinlock);
|
|
if (in_softirq()) {
|
|
/* Assume we were called from the timer routine itself. */
|
|
del_timer(&devpriv->ao_timer);
|
|
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
|
|
index b74289a95a171..bea41ccabf1f0 100644
|
|
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
|
|
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
|
|
@@ -14,10 +14,8 @@
|
|
#include <linux/interrupt.h>
|
|
#include <linux/io.h>
|
|
#include <linux/mfd/syscon.h>
|
|
+#include <linux/mod_devicetable.h>
|
|
#include <linux/module.h>
|
|
-#include <linux/of_address.h>
|
|
-#include <linux/of_device.h>
|
|
-#include <linux/of_irq.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/pm_opp.h>
|
|
#include <linux/regmap.h>
|
|
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
|
|
index f644c5e325fb2..38ec0fedb247f 100644
|
|
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
|
|
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
|
|
@@ -481,6 +481,8 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
|
|
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
|
|
{
|
|
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
|
+ if (!policy)
|
|
+ return 0;
|
|
struct private_data *priv = policy->driver_data;
|
|
|
|
cpufreq_cpu_put(policy);
|
|
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
|
|
index f0e0a35c7f217..7f326bb5fd8de 100644
|
|
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
|
|
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
|
|
@@ -10,8 +10,10 @@
|
|
#include <linux/iopoll.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
-#include <linux/of_address.h>
|
|
+#include <linux/of.h>
|
|
#include <linux/of_platform.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/regulator/consumer.h>
|
|
#include <linux/slab.h>
|
|
|
|
#define LUT_MAX_ENTRIES 32U
|
|
@@ -295,7 +297,23 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
|
|
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
|
|
{
|
|
const void *data;
|
|
- int ret;
|
|
+ int ret, cpu;
|
|
+ struct device *cpu_dev;
|
|
+ struct regulator *cpu_reg;
|
|
+
|
|
+ /* Make sure that all CPU supplies are available before proceeding. */
|
|
+ for_each_possible_cpu(cpu) {
|
|
+ cpu_dev = get_cpu_device(cpu);
|
|
+ if (!cpu_dev)
|
|
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
|
|
+ "Failed to get cpu%d device\n", cpu);
|
|
+
|
|
+ cpu_reg = devm_regulator_get(cpu_dev, "cpu");
|
|
+ if (IS_ERR(cpu_reg))
|
|
+ return dev_err_probe(&pdev->dev, PTR_ERR(cpu_reg),
|
|
+ "CPU%d regulator get failed\n", cpu);
|
|
+ }
|
|
+
|
|
|
|
data = of_device_get_match_data(&pdev->dev);
|
|
if (!data)
|
|
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
|
|
index e3313ce63b388..88afc49941b71 100644
|
|
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
|
|
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
|
|
@@ -9,7 +9,7 @@
|
|
|
|
#include <linux/cpufreq.h>
|
|
#include <linux/module.h>
|
|
-#include <linux/of_platform.h>
|
|
+#include <linux/of.h>
|
|
|
|
#include <asm/machdep.h>
|
|
#include <asm/cell-regs.h>
|
|
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
|
|
index 4fba3637b115c..6f0c32592416d 100644
|
|
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
|
|
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
|
|
@@ -11,7 +11,6 @@
|
|
#include <linux/types.h>
|
|
#include <linux/timer.h>
|
|
#include <linux/init.h>
|
|
-#include <linux/of_platform.h>
|
|
#include <linux/pm_qos.h>
|
|
#include <linux/slab.h>
|
|
|
|
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
|
|
index a577586b23be2..cb03bfb0435ea 100644
|
|
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
|
|
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
|
|
@@ -22,7 +22,6 @@
|
|
#include <linux/module.h>
|
|
#include <linux/nvmem-consumer.h>
|
|
#include <linux/of.h>
|
|
-#include <linux/of_device.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/pm_domain.h>
|
|
#include <linux/pm_opp.h>
|
|
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
|
|
index fd2c16821d54c..ac719aca49b75 100644
|
|
--- a/drivers/cpufreq/scpi-cpufreq.c
|
|
+++ b/drivers/cpufreq/scpi-cpufreq.c
|
|
@@ -14,7 +14,7 @@
|
|
#include <linux/cpumask.h>
|
|
#include <linux/export.h>
|
|
#include <linux/module.h>
|
|
-#include <linux/of_platform.h>
|
|
+#include <linux/platform_device.h>
|
|
#include <linux/pm_opp.h>
|
|
#include <linux/scpi_protocol.h>
|
|
#include <linux/slab.h>
|
|
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
|
|
index 1a63aeea87112..9c542e723a157 100644
|
|
--- a/drivers/cpufreq/sti-cpufreq.c
|
|
+++ b/drivers/cpufreq/sti-cpufreq.c
|
|
@@ -13,7 +13,7 @@
|
|
#include <linux/mfd/syscon.h>
|
|
#include <linux/module.h>
|
|
#include <linux/of.h>
|
|
-#include <linux/of_platform.h>
|
|
+#include <linux/platform_device.h>
|
|
#include <linux/pm_opp.h>
|
|
#include <linux/regmap.h>
|
|
|
|
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
|
|
index f64180dd2005b..61ef653bcf56f 100644
|
|
--- a/drivers/cpufreq/ti-cpufreq.c
|
|
+++ b/drivers/cpufreq/ti-cpufreq.c
|
|
@@ -12,7 +12,7 @@
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/of.h>
|
|
-#include <linux/of_platform.h>
|
|
+#include <linux/platform_device.h>
|
|
#include <linux/pm_opp.h>
|
|
#include <linux/regmap.h>
|
|
#include <linux/slab.h>
|
|
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
|
|
index d295f405c4bb0..865e501648034 100644
|
|
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
|
|
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
|
|
@@ -18,7 +18,6 @@
|
|
#include <linux/device.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mutex.h>
|
|
-#include <linux/of_platform.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/pm_opp.h>
|
|
#include <linux/slab.h>
|
|
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
|
|
index bf1f421e05f25..74bd3eb63734d 100644
|
|
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
|
|
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
|
|
@@ -231,7 +231,10 @@ static int zynqmp_handle_aes_req(struct crypto_engine *engine,
|
|
err = zynqmp_aes_aead_cipher(areq);
|
|
}
|
|
|
|
+ local_bh_disable();
|
|
crypto_finalize_aead_request(engine, areq, err);
|
|
+ local_bh_enable();
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
|
|
index 81de833ccd041..66ef0a1114845 100644
|
|
--- a/drivers/dma/Kconfig
|
|
+++ b/drivers/dma/Kconfig
|
|
@@ -665,16 +665,16 @@ config TEGRA20_APB_DMA
|
|
|
|
config TEGRA210_ADMA
|
|
tristate "NVIDIA Tegra210 ADMA support"
|
|
- depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
|
|
+ depends on (ARCH_TEGRA || COMPILE_TEST)
|
|
select DMA_ENGINE
|
|
select DMA_VIRTUAL_CHANNELS
|
|
help
|
|
- Support for the NVIDIA Tegra210 ADMA controller driver. The
|
|
- DMA controller has multiple DMA channels and is used to service
|
|
- various audio clients in the Tegra210 audio processing engine
|
|
- (APE). This DMA controller transfers data from memory to
|
|
- peripheral and vice versa. It does not support memory to
|
|
- memory data transfer.
|
|
+ Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA
|
|
+ controller driver. The DMA controller has multiple DMA channels
|
|
+ and is used to service various audio clients in the Tegra210
|
|
+ audio processing engine (APE). This DMA controller transfers
|
|
+ data from memory to peripheral and vice versa. It does not
|
|
+ support memory to memory data transfer.
|
|
|
|
config TIMB_DMA
|
|
tristate "Timberdale FPGA DMA support"
|
|
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
|
|
index 8aaa7fcb2630d..401a77e3b5fa8 100644
|
|
--- a/drivers/firewire/core-card.c
|
|
+++ b/drivers/firewire/core-card.c
|
|
@@ -500,7 +500,19 @@ static void bm_work(struct work_struct *work)
|
|
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
|
|
new_root_id, gap_count);
|
|
fw_send_phy_config(card, new_root_id, generation, gap_count);
|
|
- reset_bus(card, true);
|
|
+ /*
|
|
+ * Where possible, use a short bus reset to minimize
|
|
+ * disruption to isochronous transfers. But in the event
|
|
+ * of a gap count inconsistency, use a long bus reset.
|
|
+ *
|
|
+ * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
|
|
+ * may set different gap counts after a bus reset. On a mixed
|
|
+ * 1394/1394a bus, a short bus reset can get doubled. Some
|
|
+ * nodes may treat the double reset as one bus reset and others
|
|
+ * may treat it as two, causing a gap count inconsistency
|
|
+ * again. Using a long bus reset prevents this.
|
|
+ */
|
|
+ reset_bus(card, card->gap_count != 0);
|
|
/* Will allocate broadcast channel after the reset. */
|
|
goto out;
|
|
}
|
|
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
|
|
index ac0bd51ef16a2..42ea308a2c1d5 100644
|
|
--- a/drivers/firmware/arm_scmi/smc.c
|
|
+++ b/drivers/firmware/arm_scmi/smc.c
|
|
@@ -171,6 +171,13 @@ static int smc_chan_free(int id, void *p, void *data)
|
|
struct scmi_chan_info *cinfo = p;
|
|
struct scmi_smc *scmi_info = cinfo->transport_info;
|
|
|
|
+ /*
|
|
+ * Different protocols might share the same chan info, so a previous
|
|
+ * smc_chan_free call might have already freed the structure.
|
|
+ */
|
|
+ if (!scmi_info)
|
|
+ return 0;
|
|
+
|
|
/* Ignore any possible further reception on the IRQ path */
|
|
if (scmi_info->irq > 0)
|
|
free_irq(scmi_info->irq, scmi_info);
|
|
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
|
|
index 784e1b2ae5ccd..dc50dda40239e 100644
|
|
--- a/drivers/firmware/efi/libstub/x86-stub.c
|
|
+++ b/drivers/firmware/efi/libstub/x86-stub.c
|
|
@@ -21,6 +21,8 @@
|
|
#include "efistub.h"
|
|
#include "x86-stub.h"
|
|
|
|
+extern char _bss[], _ebss[];
|
|
+
|
|
const efi_system_table_t *efi_system_table;
|
|
const efi_dxe_services_table_t *efi_dxe_table;
|
|
static efi_loaded_image_t *image = NULL;
|
|
@@ -432,6 +434,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
|
|
efi_status_t status;
|
|
char *cmdline_ptr;
|
|
|
|
+ if (efi_is_native())
|
|
+ memset(_bss, 0, _ebss - _bss);
|
|
+
|
|
efi_system_table = sys_table_arg;
|
|
|
|
/* Check if we were booted by the EFI firmware */
|
|
@@ -950,8 +955,6 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
|
|
void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
|
|
struct boot_params *boot_params)
|
|
{
|
|
- extern char _bss[], _ebss[];
|
|
-
|
|
memset(_bss, 0, _ebss - _bss);
|
|
efi_stub_entry(handle, sys_table_arg, boot_params);
|
|
}
|
|
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
|
|
index 3e8e5f4ffa59f..700f71c954956 100644
|
|
--- a/drivers/gpio/Kconfig
|
|
+++ b/drivers/gpio/Kconfig
|
|
@@ -679,7 +679,8 @@ config GPIO_UNIPHIER
|
|
Say yes here to support UniPhier GPIOs.
|
|
|
|
config GPIO_VF610
|
|
- def_bool y
|
|
+ bool "VF610 GPIO support"
|
|
+ default y if SOC_VF610
|
|
depends on ARCH_MXC
|
|
select GPIOLIB_IRQCHIP
|
|
help
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
|
|
index 1c5d9388ad0bb..cb6eb47aab65b 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
|
|
@@ -313,7 +313,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
|
|
DEBUG("IMM 0x%02X\n", val);
|
|
return val;
|
|
}
|
|
- return 0;
|
|
+ break;
|
|
case ATOM_ARG_PLL:
|
|
idx = U8(*ptr);
|
|
(*ptr)++;
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
|
|
index 489c89465c78b..c373a2a3248eb 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
|
|
@@ -584,11 +584,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
|
|
return AMD_RESET_METHOD_MODE1;
|
|
}
|
|
|
|
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
|
|
+{
|
|
+ u32 sol_reg;
|
|
+
|
|
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
|
+
|
|
+ /* Will reset for the following suspend abort cases.
|
|
+ * 1) Only reset limit on APU side, dGPU hasn't checked yet.
|
|
+ * 2) S3 suspend abort and TOS already launched.
|
|
+ */
|
|
+ if (adev->flags & AMD_IS_APU && adev->in_s3 &&
|
|
+ !adev->suspend_complete &&
|
|
+ sol_reg)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
static int soc15_asic_reset(struct amdgpu_device *adev)
|
|
{
|
|
/* original raven doesn't have full asic reset */
|
|
- if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
|
|
- (adev->apu_flags & AMD_APU_IS_RAVEN2))
|
|
+ /* On the latest Raven, the GPU reset can be performed
|
|
+ * successfully. So now, temporarily enable it for the
|
|
+ * S3 suspend abort case.
|
|
+ */
|
|
+ if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
|
|
+ (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
|
|
+ !soc15_need_reset_on_resume(adev))
|
|
return 0;
|
|
|
|
switch (soc15_asic_reset_method(adev)) {
|
|
@@ -1285,24 +1308,6 @@ static int soc15_common_suspend(void *handle)
|
|
return soc15_common_hw_fini(adev);
|
|
}
|
|
|
|
-static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
|
|
-{
|
|
- u32 sol_reg;
|
|
-
|
|
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
|
-
|
|
- /* Will reset for the following suspend abort cases.
|
|
- * 1) Only reset limit on APU side, dGPU hasn't checked yet.
|
|
- * 2) S3 suspend abort and TOS already launched.
|
|
- */
|
|
- if (adev->flags & AMD_IS_APU && adev->in_s3 &&
|
|
- !adev->suspend_complete &&
|
|
- sol_reg)
|
|
- return true;
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
static int soc15_common_resume(void *handle)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
|
index ee242d9d8b060..ff7dd17ad0763 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
|
@@ -1358,7 +1358,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
|
|
const uint32_t rd_buf_size = 10;
|
|
struct pipe_ctx *pipe_ctx;
|
|
ssize_t result = 0;
|
|
- int i, r, str_len = 30;
|
|
+ int i, r, str_len = 10;
|
|
|
|
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
index 009b5861a3fec..d6c5d48c878ec 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
@@ -1854,6 +1854,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
|
|
{
|
|
struct dpp *dpp = pipe_ctx->plane_res.dpp;
|
|
|
|
+ if (!stream)
|
|
+ return false;
|
|
+
|
|
if (dpp == NULL)
|
|
return false;
|
|
|
|
@@ -1876,8 +1879,8 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
|
|
} else
|
|
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
|
|
|
|
- if (stream != NULL && stream->ctx != NULL &&
|
|
- stream->out_transfer_func != NULL) {
|
|
+ if (stream->ctx &&
|
|
+ stream->out_transfer_func) {
|
|
log_tf(stream->ctx,
|
|
stream->out_transfer_func,
|
|
dpp->regamma_params.hw_points_num);
|
|
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
|
|
index 0f1ca0b0db495..d72c5bf4e5ac1 100644
|
|
--- a/drivers/gpu/drm/lima/lima_gem.c
|
|
+++ b/drivers/gpu/drm/lima/lima_gem.c
|
|
@@ -75,29 +75,34 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
|
|
} else {
|
|
bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
|
|
if (!bo->base.sgt) {
|
|
- sg_free_table(&sgt);
|
|
- return -ENOMEM;
|
|
+ ret = -ENOMEM;
|
|
+ goto err_out0;
|
|
}
|
|
}
|
|
|
|
ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
|
|
- if (ret) {
|
|
- sg_free_table(&sgt);
|
|
- kfree(bo->base.sgt);
|
|
- bo->base.sgt = NULL;
|
|
- return ret;
|
|
- }
|
|
+ if (ret)
|
|
+ goto err_out1;
|
|
|
|
*bo->base.sgt = sgt;
|
|
|
|
if (vm) {
|
|
ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_out2;
|
|
}
|
|
|
|
bo->heap_size = new_size;
|
|
return 0;
|
|
+
|
|
+err_out2:
|
|
+ dma_unmap_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
|
|
+err_out1:
|
|
+ kfree(bo->base.sgt);
|
|
+ bo->base.sgt = NULL;
|
|
+err_out0:
|
|
+ sg_free_table(&sgt);
|
|
+ return ret;
|
|
}
|
|
|
|
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
|
|
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
|
|
index 558000db4a100..beaaf44004cfd 100644
|
|
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
|
|
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
|
|
@@ -91,11 +91,13 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
|
|
struct drm_crtc *crtc = &mtk_crtc->base;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
|
|
- drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
|
|
- drm_crtc_vblank_put(crtc);
|
|
- mtk_crtc->event = NULL;
|
|
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
|
+ if (mtk_crtc->event) {
|
|
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
|
|
+ drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
|
|
+ drm_crtc_vblank_put(crtc);
|
|
+ mtk_crtc->event = NULL;
|
|
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
|
+ }
|
|
}
|
|
|
|
static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
|
|
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
|
|
index 3e74c7c1b89fa..d871b1dba083d 100644
|
|
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
|
|
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
|
|
@@ -70,8 +70,8 @@
|
|
#define DSI_PS_WC 0x3fff
|
|
#define DSI_PS_SEL (3 << 16)
|
|
#define PACKED_PS_16BIT_RGB565 (0 << 16)
|
|
-#define LOOSELY_PS_18BIT_RGB666 (1 << 16)
|
|
-#define PACKED_PS_18BIT_RGB666 (2 << 16)
|
|
+#define PACKED_PS_18BIT_RGB666 (1 << 16)
|
|
+#define LOOSELY_PS_24BIT_RGB666 (2 << 16)
|
|
#define PACKED_PS_24BIT_RGB888 (3 << 16)
|
|
|
|
#define DSI_VSA_NL 0x20
|
|
@@ -366,10 +366,10 @@ static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
|
|
ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
|
|
break;
|
|
case MIPI_DSI_FMT_RGB666:
|
|
- ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
|
|
+ ps_bpp_mode |= LOOSELY_PS_24BIT_RGB666;
|
|
break;
|
|
case MIPI_DSI_FMT_RGB666_PACKED:
|
|
- ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
|
|
+ ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
|
|
break;
|
|
case MIPI_DSI_FMT_RGB565:
|
|
ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
|
|
@@ -423,7 +423,7 @@ static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
|
|
dsi_tmp_buf_bpp = 3;
|
|
break;
|
|
case MIPI_DSI_FMT_RGB666:
|
|
- tmp_reg = LOOSELY_PS_18BIT_RGB666;
|
|
+ tmp_reg = LOOSELY_PS_24BIT_RGB666;
|
|
dsi_tmp_buf_bpp = 3;
|
|
break;
|
|
case MIPI_DSI_FMT_RGB666_PACKED:
|
|
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
|
|
index 25245ef386db6..3632f0768aa9e 100644
|
|
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
|
|
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
|
|
@@ -228,6 +228,13 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
|
|
return dpu_enc->wide_bus_en;
|
|
}
|
|
|
|
+bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
|
|
+{
|
|
+ const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
|
|
+
|
|
+ return dpu_enc->dsc ? true : false;
|
|
+}
|
|
+
|
|
int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
|
|
{
|
|
struct dpu_encoder_virt *dpu_enc;
|
|
@@ -1864,7 +1871,9 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
|
|
dsc_common_mode = 0;
|
|
pic_width = dsc->pic_width;
|
|
|
|
- dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
|
|
+ dsc_common_mode = DSC_MODE_SPLIT_PANEL;
|
|
+ if (dpu_encoder_use_dsc_merge(enc_master->parent))
|
|
+ dsc_common_mode |= DSC_MODE_MULTIPLEX;
|
|
if (enc_master->intf_mode == INTF_MODE_VIDEO)
|
|
dsc_common_mode |= DSC_MODE_VIDEO;
|
|
|
|
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
|
|
index 9e7236ef34e6d..a71efa2b9e508 100644
|
|
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
|
|
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
|
|
@@ -175,6 +175,13 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
|
|
|
|
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
|
|
|
|
+/**
|
|
+ * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled
|
|
+ * for the encoder.
|
|
+ * @drm_enc: Pointer to previously created drm encoder structure
|
|
+ */
|
|
+bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc);
|
|
+
|
|
/**
|
|
* dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
|
|
* in virtual encoder that can collect CRC values
|
|
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
|
|
index 2c14646661b77..2baade1cd4876 100644
|
|
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
|
|
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
|
|
@@ -100,6 +100,7 @@ static void drm_mode_to_intf_timing_params(
|
|
}
|
|
|
|
timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
|
|
+ timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
|
|
|
|
/*
|
|
* for DP, divide the horizonal parameters by 2 when
|
|
@@ -256,12 +257,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
|
|
mode.htotal >>= 1;
|
|
mode.hsync_start >>= 1;
|
|
mode.hsync_end >>= 1;
|
|
+ mode.hskew >>= 1;
|
|
|
|
DPU_DEBUG_VIDENC(phys_enc,
|
|
- "split_role %d, halve horizontal %d %d %d %d\n",
|
|
+ "split_role %d, halve horizontal %d %d %d %d %d\n",
|
|
phys_enc->split_role,
|
|
mode.hdisplay, mode.htotal,
|
|
- mode.hsync_start, mode.hsync_end);
|
|
+ mode.hsync_start, mode.hsync_end,
|
|
+ mode.hskew);
|
|
}
|
|
|
|
drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
|
|
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
|
|
index 384558d2f9602..1debac4fcc3eb 100644
|
|
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
|
|
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
|
|
@@ -154,13 +154,8 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
|
|
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
|
|
display_hctl = (hsync_end_x << 16) | hsync_start_x;
|
|
|
|
- /*
|
|
- * DATA_HCTL_EN controls data timing which can be different from
|
|
- * video timing. It is recommended to enable it for all cases, except
|
|
- * if compression is enabled in 1 pixel per clock mode
|
|
- */
|
|
if (p->wide_bus_en)
|
|
- intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
|
|
+ intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN;
|
|
|
|
data_width = p->width;
|
|
|
|
@@ -230,6 +225,14 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
|
|
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
|
|
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
|
|
if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
|
|
+ /*
|
|
+ * DATA_HCTL_EN controls data timing which can be different from
|
|
+ * video timing. It is recommended to enable it for all cases, except
|
|
+ * if compression is enabled in 1 pixel per clock mode
|
|
+ */
|
|
+ if (!(p->compression_en && !p->wide_bus_en))
|
|
+ intf_cfg2 |= INTF_CFG2_DATA_HCTL_EN;
|
|
+
|
|
DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
|
|
DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
|
|
DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
|
|
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
|
|
index e75339b96a1d2..7f502c8bee1d4 100644
|
|
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
|
|
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
|
|
@@ -33,6 +33,7 @@ struct intf_timing_params {
|
|
u32 hsync_skew;
|
|
|
|
bool wide_bus_en;
|
|
+ bool compression_en;
|
|
};
|
|
|
|
struct intf_prog_fetch {
|
|
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
|
|
index 126b3c6e12f99..f2dca41e46c5f 100644
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
|
|
@@ -1194,6 +1194,8 @@ nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
|
|
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
|
|
bdev->dev_mapping);
|
|
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
|
|
+ nvbo->bo.resource->bus.offset = 0;
|
|
+ nvbo->bo.resource->bus.addr = NULL;
|
|
goto retry;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
|
|
index 42584d8a9aeb6..bfcddd4aa9322 100644
|
|
--- a/drivers/gpu/drm/panel/panel-edp.c
|
|
+++ b/drivers/gpu/drm/panel/panel-edp.c
|
|
@@ -413,8 +413,7 @@ static int panel_edp_unprepare(struct drm_panel *panel)
|
|
if (!p->prepared)
|
|
return 0;
|
|
|
|
- pm_runtime_mark_last_busy(panel->dev);
|
|
- ret = pm_runtime_put_autosuspend(panel->dev);
|
|
+ ret = pm_runtime_put_sync_suspend(panel->dev);
|
|
if (ret < 0)
|
|
return ret;
|
|
p->prepared = false;
|
|
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
|
|
index 927e5f42e97d0..3e48cbb522a1c 100644
|
|
--- a/drivers/gpu/drm/radeon/ni.c
|
|
+++ b/drivers/gpu/drm/radeon/ni.c
|
|
@@ -813,7 +813,7 @@ int ni_init_microcode(struct radeon_device *rdev)
|
|
err = 0;
|
|
} else if (rdev->smc_fw->size != smc_req_size) {
|
|
pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
|
|
- rdev->mc_fw->size, fw_name);
|
|
+ rdev->smc_fw->size, fw_name);
|
|
err = -EINVAL;
|
|
}
|
|
}
|
|
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
|
|
index f51774866f412..8f230f4c01bc3 100644
|
|
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
|
|
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
|
|
@@ -411,7 +411,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
|
|
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF);
|
|
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF);
|
|
|
|
- value = mode->hsync_start - mode->hdisplay;
|
|
+ value = mode->htotal - mode->hsync_start;
|
|
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF);
|
|
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF);
|
|
|
|
@@ -426,7 +426,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
|
|
value = mode->vtotal - mode->vdisplay;
|
|
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF);
|
|
|
|
- value = mode->vsync_start - mode->vdisplay;
|
|
+ value = mode->vtotal - mode->vsync_start;
|
|
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF);
|
|
|
|
value = mode->vsync_end - mode->vsync_start;
|
|
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
|
|
index 68f6ebb33460b..eb4a108c5bd2a 100644
|
|
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
|
|
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
|
|
@@ -577,8 +577,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
|
|
ret = -EINVAL;
|
|
goto err_put_port;
|
|
} else if (ret) {
|
|
- DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
|
|
- ret = -EPROBE_DEFER;
|
|
+ dev_err_probe(dev, ret, "failed to find panel and bridge node\n");
|
|
goto err_put_port;
|
|
}
|
|
if (lvds->panel)
|
|
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
|
|
index d773ef4854188..b563988fb6848 100644
|
|
--- a/drivers/gpu/drm/tegra/dpaux.c
|
|
+++ b/drivers/gpu/drm/tegra/dpaux.c
|
|
@@ -524,7 +524,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
|
|
if (err < 0) {
|
|
dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
|
|
dpaux->irq, err);
|
|
- return err;
|
|
+ goto err_pm_disable;
|
|
}
|
|
|
|
disable_irq(dpaux->irq);
|
|
@@ -544,7 +544,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
|
|
*/
|
|
err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C);
|
|
if (err < 0)
|
|
- return err;
|
|
+ goto err_pm_disable;
|
|
|
|
#ifdef CONFIG_GENERIC_PINCONF
|
|
dpaux->desc.name = dev_name(&pdev->dev);
|
|
@@ -557,7 +557,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
|
|
dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
|
|
if (IS_ERR(dpaux->pinctrl)) {
|
|
dev_err(&pdev->dev, "failed to register pincontrol\n");
|
|
- return PTR_ERR(dpaux->pinctrl);
|
|
+ err = PTR_ERR(dpaux->pinctrl);
|
|
+ goto err_pm_disable;
|
|
}
|
|
#endif
|
|
/* enable and clear all interrupts */
|
|
@@ -573,10 +574,15 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
|
|
err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
|
|
if (err < 0) {
|
|
dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
|
|
- return err;
|
|
+ goto err_pm_disable;
|
|
}
|
|
|
|
return 0;
|
|
+
|
|
+err_pm_disable:
|
|
+ pm_runtime_put_sync(&pdev->dev);
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+ return err;
|
|
}
|
|
|
|
static int tegra_dpaux_remove(struct platform_device *pdev)
|
|
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
|
|
index de1333dc0d867..7bb26655cb3cc 100644
|
|
--- a/drivers/gpu/drm/tegra/dsi.c
|
|
+++ b/drivers/gpu/drm/tegra/dsi.c
|
|
@@ -1534,9 +1534,11 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
|
|
np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
|
|
if (np) {
|
|
struct platform_device *gangster = of_find_device_by_node(np);
|
|
+ of_node_put(np);
|
|
+ if (!gangster)
|
|
+ return -EPROBE_DEFER;
|
|
|
|
dsi->slave = platform_get_drvdata(gangster);
|
|
- of_node_put(np);
|
|
|
|
if (!dsi->slave) {
|
|
put_device(&gangster->dev);
|
|
@@ -1584,48 +1586,58 @@ static int tegra_dsi_probe(struct platform_device *pdev)
|
|
|
|
if (!pdev->dev.pm_domain) {
|
|
dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
|
|
- if (IS_ERR(dsi->rst))
|
|
- return PTR_ERR(dsi->rst);
|
|
+ if (IS_ERR(dsi->rst)) {
|
|
+ err = PTR_ERR(dsi->rst);
|
|
+ goto remove;
|
|
+ }
|
|
}
|
|
|
|
dsi->clk = devm_clk_get(&pdev->dev, NULL);
|
|
if (IS_ERR(dsi->clk)) {
|
|
- dev_err(&pdev->dev, "cannot get DSI clock\n");
|
|
- return PTR_ERR(dsi->clk);
|
|
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
|
|
+ "cannot get DSI clock\n");
|
|
+ goto remove;
|
|
}
|
|
|
|
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
|
|
if (IS_ERR(dsi->clk_lp)) {
|
|
- dev_err(&pdev->dev, "cannot get low-power clock\n");
|
|
- return PTR_ERR(dsi->clk_lp);
|
|
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
|
|
+ "cannot get low-power clock\n");
|
|
+ goto remove;
|
|
}
|
|
|
|
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
|
|
if (IS_ERR(dsi->clk_parent)) {
|
|
- dev_err(&pdev->dev, "cannot get parent clock\n");
|
|
- return PTR_ERR(dsi->clk_parent);
|
|
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
|
|
+ "cannot get parent clock\n");
|
|
+ goto remove;
|
|
}
|
|
|
|
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
|
|
if (IS_ERR(dsi->vdd)) {
|
|
- dev_err(&pdev->dev, "cannot get VDD supply\n");
|
|
- return PTR_ERR(dsi->vdd);
|
|
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
|
|
+ "cannot get VDD supply\n");
|
|
+ goto remove;
|
|
}
|
|
|
|
err = tegra_dsi_setup_clocks(dsi);
|
|
if (err < 0) {
|
|
dev_err(&pdev->dev, "cannot setup clocks\n");
|
|
- return err;
|
|
+ goto remove;
|
|
}
|
|
|
|
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
|
|
- if (IS_ERR(dsi->regs))
|
|
- return PTR_ERR(dsi->regs);
|
|
+ if (IS_ERR(dsi->regs)) {
|
|
+ err = PTR_ERR(dsi->regs);
|
|
+ goto remove;
|
|
+ }
|
|
|
|
dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
|
|
- if (IS_ERR(dsi->mipi))
|
|
- return PTR_ERR(dsi->mipi);
|
|
+ if (IS_ERR(dsi->mipi)) {
|
|
+ err = PTR_ERR(dsi->mipi);
|
|
+ goto remove;
|
|
+ }
|
|
|
|
dsi->host.ops = &tegra_dsi_host_ops;
|
|
dsi->host.dev = &pdev->dev;
|
|
@@ -1653,9 +1665,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
|
|
return 0;
|
|
|
|
unregister:
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
mipi_dsi_host_unregister(&dsi->host);
|
|
mipi_free:
|
|
tegra_mipi_free(dsi->mipi);
|
|
+remove:
|
|
+ tegra_output_remove(&dsi->output);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
|
|
index 9291209154a7a..a688ecf08451e 100644
|
|
--- a/drivers/gpu/drm/tegra/fb.c
|
|
+++ b/drivers/gpu/drm/tegra/fb.c
|
|
@@ -166,6 +166,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
|
|
|
|
if (gem->size < size) {
|
|
err = -EINVAL;
|
|
+ drm_gem_object_put(gem);
|
|
goto unreference;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
|
|
index bf240767dad9f..c66764c0bd250 100644
|
|
--- a/drivers/gpu/drm/tegra/hdmi.c
|
|
+++ b/drivers/gpu/drm/tegra/hdmi.c
|
|
@@ -1776,7 +1776,6 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
|
|
static int tegra_hdmi_probe(struct platform_device *pdev)
|
|
{
|
|
struct tegra_hdmi *hdmi;
|
|
- struct resource *regs;
|
|
int err;
|
|
|
|
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
|
|
@@ -1838,14 +1837,15 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
|
|
if (err < 0)
|
|
return err;
|
|
|
|
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
- hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
|
|
- if (IS_ERR(hdmi->regs))
|
|
- return PTR_ERR(hdmi->regs);
|
|
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
|
|
+ if (IS_ERR(hdmi->regs)) {
|
|
+ err = PTR_ERR(hdmi->regs);
|
|
+ goto remove;
|
|
+ }
|
|
|
|
err = platform_get_irq(pdev, 0);
|
|
if (err < 0)
|
|
- return err;
|
|
+ goto remove;
|
|
|
|
hdmi->irq = err;
|
|
|
|
@@ -1854,18 +1854,18 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
|
|
if (err < 0) {
|
|
dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
|
|
hdmi->irq, err);
|
|
- return err;
|
|
+ goto remove;
|
|
}
|
|
|
|
platform_set_drvdata(pdev, hdmi);
|
|
|
|
err = devm_pm_runtime_enable(&pdev->dev);
|
|
if (err)
|
|
- return err;
|
|
+ goto remove;
|
|
|
|
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
|
|
if (err)
|
|
- return err;
|
|
+ goto remove;
|
|
|
|
INIT_LIST_HEAD(&hdmi->client.list);
|
|
hdmi->client.ops = &hdmi_client_ops;
|
|
@@ -1875,10 +1875,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
|
|
if (err < 0) {
|
|
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
|
|
err);
|
|
- return err;
|
|
+ goto remove;
|
|
}
|
|
|
|
return 0;
|
|
+
|
|
+remove:
|
|
+ tegra_output_remove(&hdmi->output);
|
|
+ return err;
|
|
}
|
|
|
|
static int tegra_hdmi_remove(struct platform_device *pdev)
|
|
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
|
|
index 47d26b5d99456..7ccd010a821b7 100644
|
|
--- a/drivers/gpu/drm/tegra/output.c
|
|
+++ b/drivers/gpu/drm/tegra/output.c
|
|
@@ -139,8 +139,10 @@ int tegra_output_probe(struct tegra_output *output)
|
|
GPIOD_IN,
|
|
"HDMI hotplug detect");
|
|
if (IS_ERR(output->hpd_gpio)) {
|
|
- if (PTR_ERR(output->hpd_gpio) != -ENOENT)
|
|
- return PTR_ERR(output->hpd_gpio);
|
|
+ if (PTR_ERR(output->hpd_gpio) != -ENOENT) {
|
|
+ err = PTR_ERR(output->hpd_gpio);
|
|
+ goto put_i2c;
|
|
+ }
|
|
|
|
output->hpd_gpio = NULL;
|
|
}
|
|
@@ -149,7 +151,7 @@ int tegra_output_probe(struct tegra_output *output)
|
|
err = gpiod_to_irq(output->hpd_gpio);
|
|
if (err < 0) {
|
|
dev_err(output->dev, "gpiod_to_irq(): %d\n", err);
|
|
- return err;
|
|
+ goto put_i2c;
|
|
}
|
|
|
|
output->hpd_irq = err;
|
|
@@ -162,7 +164,7 @@ int tegra_output_probe(struct tegra_output *output)
|
|
if (err < 0) {
|
|
dev_err(output->dev, "failed to request IRQ#%u: %d\n",
|
|
output->hpd_irq, err);
|
|
- return err;
|
|
+ goto put_i2c;
|
|
}
|
|
|
|
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
|
|
@@ -176,6 +178,12 @@ int tegra_output_probe(struct tegra_output *output)
|
|
}
|
|
|
|
return 0;
|
|
+
|
|
+put_i2c:
|
|
+ if (output->ddc)
|
|
+ i2c_put_adapter(output->ddc);
|
|
+
|
|
+ return err;
|
|
}
|
|
|
|
void tegra_output_remove(struct tegra_output *output)
|
|
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
|
|
index ff8fce36d2aa1..86e55e5d12b39 100644
|
|
--- a/drivers/gpu/drm/tegra/rgb.c
|
|
+++ b/drivers/gpu/drm/tegra/rgb.c
|
|
@@ -214,26 +214,28 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
|
|
rgb->clk = devm_clk_get(dc->dev, NULL);
|
|
if (IS_ERR(rgb->clk)) {
|
|
dev_err(dc->dev, "failed to get clock\n");
|
|
- return PTR_ERR(rgb->clk);
|
|
+ err = PTR_ERR(rgb->clk);
|
|
+ goto remove;
|
|
}
|
|
|
|
rgb->clk_parent = devm_clk_get(dc->dev, "parent");
|
|
if (IS_ERR(rgb->clk_parent)) {
|
|
dev_err(dc->dev, "failed to get parent clock\n");
|
|
- return PTR_ERR(rgb->clk_parent);
|
|
+ err = PTR_ERR(rgb->clk_parent);
|
|
+ goto remove;
|
|
}
|
|
|
|
err = clk_set_parent(rgb->clk, rgb->clk_parent);
|
|
if (err < 0) {
|
|
dev_err(dc->dev, "failed to set parent clock: %d\n", err);
|
|
- return err;
|
|
+ goto remove;
|
|
}
|
|
|
|
rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0");
|
|
if (IS_ERR(rgb->pll_d_out0)) {
|
|
err = PTR_ERR(rgb->pll_d_out0);
|
|
dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err);
|
|
- return err;
|
|
+ goto remove;
|
|
}
|
|
|
|
if (dc->soc->has_pll_d2_out0) {
|
|
@@ -241,13 +243,19 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
|
|
if (IS_ERR(rgb->pll_d2_out0)) {
|
|
err = PTR_ERR(rgb->pll_d2_out0);
|
|
dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err);
|
|
- return err;
|
|
+ goto put_pll;
|
|
}
|
|
}
|
|
|
|
dc->rgb = &rgb->output;
|
|
|
|
return 0;
|
|
+
|
|
+put_pll:
|
|
+ clk_put(rgb->pll_d_out0);
|
|
+remove:
|
|
+ tegra_output_remove(&rgb->output);
|
|
+ return err;
|
|
}
|
|
|
|
int tegra_dc_rgb_remove(struct tegra_dc *dc)
|
|
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
|
|
index cb66a425dd200..896a77853ebc5 100644
|
|
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
|
|
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
|
|
@@ -270,6 +270,16 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
|
|
|
|
reinit_completion(&tcrtc->framedone_completion);
|
|
|
|
+ /*
|
|
+ * If a layer is left enabled when the videoport is disabled, and the
|
|
+ * vid pipeline that was used for the layer is taken into use on
|
|
+ * another videoport, the DSS will report sync lost issues. Disable all
|
|
+ * the layers here as a work-around.
|
|
+ */
|
|
+ for (u32 layer = 0; layer < tidss->feat->num_planes; layer++)
|
|
+ dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
|
|
+ false);
|
|
+
|
|
dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport);
|
|
|
|
if (!wait_for_completion_timeout(&tcrtc->framedone_completion,
|
|
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
|
|
index 42d50ec5526d7..435b3b66ae632 100644
|
|
--- a/drivers/gpu/drm/tidss/tidss_plane.c
|
|
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
|
|
@@ -211,7 +211,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
|
|
|
|
drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
|
|
|
|
- drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0,
|
|
+ drm_plane_create_zpos_property(&tplane->plane, tidss->num_planes, 0,
|
|
num_planes - 1);
|
|
|
|
ret = drm_plane_create_color_properties(&tplane->plane,
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
|
|
index 60e3cc537f365..b9e5c8cd31001 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
|
|
@@ -65,8 +65,11 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
|
|
ttm_resource_init(bo, place, *res);
|
|
|
|
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
|
|
- if (id < 0)
|
|
+ if (id < 0) {
|
|
+ ttm_resource_fini(man, *res);
|
|
+ kfree(*res);
|
|
return id;
|
|
+ }
|
|
|
|
spin_lock(&gman->lock);
|
|
|
|
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
|
|
index c936d6a51c0cd..9c963ad27f9d1 100644
|
|
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
|
|
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
|
|
@@ -34,6 +34,8 @@ static int sensor_mask_override = -1;
|
|
module_param_named(sensor_mask, sensor_mask_override, int, 0444);
|
|
MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
|
|
|
|
+static bool intr_disable = true;
|
|
+
|
|
static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
|
|
{
|
|
union cmd_response cmd_resp;
|
|
@@ -54,7 +56,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
|
|
|
|
cmd_base.ul = 0;
|
|
cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
|
|
- cmd_base.cmd_v2.intr_disable = 1;
|
|
+ cmd_base.cmd_v2.intr_disable = intr_disable;
|
|
cmd_base.cmd_v2.period = info.period;
|
|
cmd_base.cmd_v2.sensor_id = info.sensor_idx;
|
|
cmd_base.cmd_v2.length = 16;
|
|
@@ -72,7 +74,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
|
|
|
|
cmd_base.ul = 0;
|
|
cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
|
|
- cmd_base.cmd_v2.intr_disable = 1;
|
|
+ cmd_base.cmd_v2.intr_disable = intr_disable;
|
|
cmd_base.cmd_v2.period = 0;
|
|
cmd_base.cmd_v2.sensor_id = sensor_idx;
|
|
cmd_base.cmd_v2.length = 16;
|
|
@@ -86,7 +88,7 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
|
|
union sfh_cmd_base cmd_base;
|
|
|
|
cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
|
|
- cmd_base.cmd_v2.intr_disable = 1;
|
|
+ cmd_base.cmd_v2.intr_disable = intr_disable;
|
|
cmd_base.cmd_v2.period = 0;
|
|
cmd_base.cmd_v2.sensor_id = 0;
|
|
|
|
@@ -288,6 +290,26 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
|
|
return 0;
|
|
}
|
|
|
|
+static int mp2_disable_intr(const struct dmi_system_id *id)
|
|
+{
|
|
+ intr_disable = false;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct dmi_system_id dmi_sfh_table[] = {
|
|
+ {
|
|
+ /*
|
|
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218104
|
|
+ */
|
|
+ .callback = mp2_disable_intr,
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook x360 435 G7"),
|
|
+ },
|
|
+ },
|
|
+ {}
|
|
+};
|
|
+
|
|
static const struct dmi_system_id dmi_nodevs[] = {
|
|
{
|
|
/*
|
|
@@ -311,6 +333,8 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
|
|
if (dmi_first_match(dmi_nodevs))
|
|
return -ENODEV;
|
|
|
|
+ dmi_check_system(dmi_sfh_table);
|
|
+
|
|
privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
|
|
if (!privdata)
|
|
return -ENOMEM;
|
|
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
|
|
index dfb7cabd82efe..2b125cd9742cb 100644
|
|
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
|
|
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
|
|
@@ -89,10 +89,10 @@ enum mem_use_type {
|
|
struct hpd_status {
|
|
union {
|
|
struct {
|
|
- u32 human_presence_report : 4;
|
|
- u32 human_presence_actual : 4;
|
|
- u32 probablity : 8;
|
|
u32 object_distance : 16;
|
|
+ u32 probablity : 8;
|
|
+ u32 human_presence_actual : 4;
|
|
+ u32 human_presence_report : 4;
|
|
} shpd;
|
|
u32 val;
|
|
};
|
|
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
|
|
index 149a3c74346b4..f86c1ea83a037 100644
|
|
--- a/drivers/hid/hid-lenovo.c
|
|
+++ b/drivers/hid/hid-lenovo.c
|
|
@@ -54,10 +54,10 @@ struct lenovo_drvdata {
|
|
/* 0: Up
|
|
* 1: Down (undecided)
|
|
* 2: Scrolling
|
|
- * 3: Patched firmware, disable workaround
|
|
*/
|
|
u8 middlebutton_state;
|
|
bool fn_lock;
|
|
+ bool middleclick_workaround_cptkbd;
|
|
};
|
|
|
|
#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
|
|
@@ -621,6 +621,36 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev,
|
|
return count;
|
|
}
|
|
|
|
+static ssize_t attr_middleclick_workaround_show_cptkbd(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct hid_device *hdev = to_hid_device(dev);
|
|
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
|
|
+
|
|
+ return snprintf(buf, PAGE_SIZE, "%u\n",
|
|
+ cptkbd_data->middleclick_workaround_cptkbd);
|
|
+}
|
|
+
|
|
+static ssize_t attr_middleclick_workaround_store_cptkbd(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf,
|
|
+ size_t count)
|
|
+{
|
|
+ struct hid_device *hdev = to_hid_device(dev);
|
|
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
|
|
+ int value;
|
|
+
|
|
+ if (kstrtoint(buf, 10, &value))
|
|
+ return -EINVAL;
|
|
+ if (value < 0 || value > 1)
|
|
+ return -EINVAL;
|
|
+
|
|
+ cptkbd_data->middleclick_workaround_cptkbd = !!value;
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
|
|
static struct device_attribute dev_attr_fn_lock =
|
|
__ATTR(fn_lock, S_IWUSR | S_IRUGO,
|
|
@@ -632,10 +662,16 @@ static struct device_attribute dev_attr_sensitivity_cptkbd =
|
|
attr_sensitivity_show_cptkbd,
|
|
attr_sensitivity_store_cptkbd);
|
|
|
|
+static struct device_attribute dev_attr_middleclick_workaround_cptkbd =
|
|
+ __ATTR(middleclick_workaround, S_IWUSR | S_IRUGO,
|
|
+ attr_middleclick_workaround_show_cptkbd,
|
|
+ attr_middleclick_workaround_store_cptkbd);
|
|
+
|
|
|
|
static struct attribute *lenovo_attributes_cptkbd[] = {
|
|
&dev_attr_fn_lock.attr,
|
|
&dev_attr_sensitivity_cptkbd.attr,
|
|
+ &dev_attr_middleclick_workaround_cptkbd.attr,
|
|
NULL
|
|
};
|
|
|
|
@@ -686,23 +722,7 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
|
|
{
|
|
struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
|
|
|
|
- if (cptkbd_data->middlebutton_state != 3) {
|
|
- /* REL_X and REL_Y events during middle button pressed
|
|
- * are only possible on patched, bug-free firmware
|
|
- * so set middlebutton_state to 3
|
|
- * to never apply workaround anymore
|
|
- */
|
|
- if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD &&
|
|
- cptkbd_data->middlebutton_state == 1 &&
|
|
- usage->type == EV_REL &&
|
|
- (usage->code == REL_X || usage->code == REL_Y)) {
|
|
- cptkbd_data->middlebutton_state = 3;
|
|
- /* send middle button press which was hold before */
|
|
- input_event(field->hidinput->input,
|
|
- EV_KEY, BTN_MIDDLE, 1);
|
|
- input_sync(field->hidinput->input);
|
|
- }
|
|
-
|
|
+ if (cptkbd_data->middleclick_workaround_cptkbd) {
|
|
/* "wheel" scroll events */
|
|
if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
|
|
usage->code == REL_HWHEEL)) {
|
|
@@ -1166,6 +1186,7 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
|
|
cptkbd_data->middlebutton_state = 0;
|
|
cptkbd_data->fn_lock = true;
|
|
cptkbd_data->sensitivity = 0x05;
|
|
+ cptkbd_data->middleclick_workaround_cptkbd = true;
|
|
lenovo_features_set_cptkbd(hdev);
|
|
|
|
ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd);
|
|
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
|
|
index 5ec1f174127a3..3816fd06bc953 100644
|
|
--- a/drivers/hid/hid-multitouch.c
|
|
+++ b/drivers/hid/hid-multitouch.c
|
|
@@ -2153,6 +2153,10 @@ static const struct hid_device_id mt_devices[] = {
|
|
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
|
USB_VENDOR_ID_SYNAPTICS, 0xcd7e) },
|
|
|
|
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
|
|
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
|
+ USB_VENDOR_ID_SYNAPTICS, 0xcddc) },
|
|
+
|
|
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
|
|
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
|
USB_VENDOR_ID_SYNAPTICS, 0xce08) },
|
|
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
|
|
index 8d8fa8e8afe04..20a9cddb3723a 100644
|
|
--- a/drivers/hwtracing/ptt/hisi_ptt.c
|
|
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
|
|
@@ -654,6 +654,9 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
|
|
int ret;
|
|
u32 val;
|
|
|
|
+ if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
|
|
+ return -ENOENT;
|
|
+
|
|
if (event->cpu < 0) {
|
|
dev_dbg(event->pmu->dev, "Per-task mode not supported\n");
|
|
return -EOPNOTSUPP;
|
|
@@ -662,9 +665,6 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
|
|
if (event->attach_state & PERF_ATTACH_TASK)
|
|
return -EOPNOTSUPP;
|
|
|
|
- if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
|
|
- return -ENOENT;
|
|
-
|
|
ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config);
|
|
if (ret < 0)
|
|
return ret;
|
|
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
|
|
index 3a9b9a28d858f..453188db39d83 100644
|
|
--- a/drivers/infiniband/core/device.c
|
|
+++ b/drivers/infiniband/core/device.c
|
|
@@ -1730,7 +1730,7 @@ static int assign_client_id(struct ib_client *client)
|
|
{
|
|
int ret;
|
|
|
|
- down_write(&clients_rwsem);
|
|
+ lockdep_assert_held(&clients_rwsem);
|
|
/*
|
|
* The add/remove callbacks must be called in FIFO/LIFO order. To
|
|
* achieve this we assign client_ids so they are sorted in
|
|
@@ -1739,14 +1739,11 @@ static int assign_client_id(struct ib_client *client)
|
|
client->client_id = highest_client_id;
|
|
ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
|
|
if (ret)
|
|
- goto out;
|
|
+ return ret;
|
|
|
|
highest_client_id++;
|
|
xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
|
|
-
|
|
-out:
|
|
- up_write(&clients_rwsem);
|
|
- return ret;
|
|
+ return 0;
|
|
}
|
|
|
|
static void remove_client_id(struct ib_client *client)
|
|
@@ -1776,25 +1773,35 @@ int ib_register_client(struct ib_client *client)
|
|
{
|
|
struct ib_device *device;
|
|
unsigned long index;
|
|
+ bool need_unreg = false;
|
|
int ret;
|
|
|
|
refcount_set(&client->uses, 1);
|
|
init_completion(&client->uses_zero);
|
|
+
|
|
+ /*
|
|
+ * The devices_rwsem is held in write mode to ensure that a racing
|
|
+ * ib_register_device() sees a consisent view of clients and devices.
|
|
+ */
|
|
+ down_write(&devices_rwsem);
|
|
+ down_write(&clients_rwsem);
|
|
ret = assign_client_id(client);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto out;
|
|
|
|
- down_read(&devices_rwsem);
|
|
+ need_unreg = true;
|
|
xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
|
|
ret = add_client_context(device, client);
|
|
- if (ret) {
|
|
- up_read(&devices_rwsem);
|
|
- ib_unregister_client(client);
|
|
- return ret;
|
|
- }
|
|
+ if (ret)
|
|
+ goto out;
|
|
}
|
|
- up_read(&devices_rwsem);
|
|
- return 0;
|
|
+ ret = 0;
|
|
+out:
|
|
+ up_write(&clients_rwsem);
|
|
+ up_write(&devices_rwsem);
|
|
+ if (need_unreg && ret)
|
|
+ ib_unregister_client(client);
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL(ib_register_client);
|
|
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
|
|
index 1112afa0af552..8748b65c87ea7 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
|
|
@@ -595,6 +595,13 @@ struct hns_roce_work {
|
|
u32 queue_num;
|
|
};
|
|
|
|
+enum hns_roce_cong_type {
|
|
+ CONG_TYPE_DCQCN,
|
|
+ CONG_TYPE_LDCP,
|
|
+ CONG_TYPE_HC3,
|
|
+ CONG_TYPE_DIP,
|
|
+};
|
|
+
|
|
struct hns_roce_qp {
|
|
struct ib_qp ibqp;
|
|
struct hns_roce_wq rq;
|
|
@@ -639,6 +646,7 @@ struct hns_roce_qp {
|
|
struct list_head sq_node; /* all send qps are on a list */
|
|
struct hns_user_mmap_entry *dwqe_mmap_entry;
|
|
u32 config;
|
|
+ enum hns_roce_cong_type cong_type;
|
|
};
|
|
|
|
struct hns_roce_ib_iboe {
|
|
@@ -710,13 +718,6 @@ struct hns_roce_eq_table {
|
|
struct hns_roce_eq *eq;
|
|
};
|
|
|
|
-enum cong_type {
|
|
- CONG_TYPE_DCQCN,
|
|
- CONG_TYPE_LDCP,
|
|
- CONG_TYPE_HC3,
|
|
- CONG_TYPE_DIP,
|
|
-};
|
|
-
|
|
struct hns_roce_caps {
|
|
u64 fw_ver;
|
|
u8 num_ports;
|
|
@@ -847,7 +848,7 @@ struct hns_roce_caps {
|
|
u16 default_aeq_period;
|
|
u16 default_aeq_arm_st;
|
|
u16 default_ceq_arm_st;
|
|
- enum cong_type cong_type;
|
|
+ enum hns_roce_cong_type cong_type;
|
|
};
|
|
|
|
enum hns_roce_device_state {
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
index 58fbb1d3b7f41..d06b19e69a151 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
@@ -4886,12 +4886,15 @@ static int check_cong_type(struct ib_qp *ibqp,
|
|
struct hns_roce_congestion_algorithm *cong_alg)
|
|
{
|
|
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
|
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
|
|
|
- if (ibqp->qp_type == IB_QPT_UD)
|
|
- hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
|
|
+ if (ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_GSI)
|
|
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
|
|
+ else
|
|
+ hr_qp->cong_type = hr_dev->caps.cong_type;
|
|
|
|
/* different congestion types match different configurations */
|
|
- switch (hr_dev->caps.cong_type) {
|
|
+ switch (hr_qp->cong_type) {
|
|
case CONG_TYPE_DCQCN:
|
|
cong_alg->alg_sel = CONG_DCQCN;
|
|
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
|
|
@@ -4919,8 +4922,8 @@ static int check_cong_type(struct ib_qp *ibqp,
|
|
default:
|
|
ibdev_warn(&hr_dev->ib_dev,
|
|
"invalid type(%u) for congestion selection.\n",
|
|
- hr_dev->caps.cong_type);
|
|
- hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
|
|
+ hr_qp->cong_type);
|
|
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
|
|
cong_alg->alg_sel = CONG_DCQCN;
|
|
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
|
|
cong_alg->dip_vld = DIP_INVALID;
|
|
@@ -4939,6 +4942,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
|
struct hns_roce_congestion_algorithm cong_field;
|
|
struct ib_device *ibdev = ibqp->device;
|
|
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
|
|
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
|
u32 dip_idx = 0;
|
|
int ret;
|
|
|
|
@@ -4951,7 +4955,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
|
return ret;
|
|
|
|
hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
|
|
- hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
|
|
+ hr_qp->cong_type * HNS_ROCE_CONG_SIZE);
|
|
hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
|
|
hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
|
|
hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
|
|
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
|
|
index 280d633d4ec4f..d691cdef5e9a3 100644
|
|
--- a/drivers/infiniband/hw/irdma/uk.c
|
|
+++ b/drivers/infiniband/hw/irdma/uk.c
|
|
@@ -1414,6 +1414,78 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
|
|
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
|
|
}
|
|
|
|
+/**
|
|
+ * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
|
|
+ * @ukinfo: qp initialization info
|
|
+ * @sq_shift: Returns shift of SQ
|
|
+ * @rq_shift: Returns shift of RQ
|
|
+ */
|
|
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
|
|
+ u8 *rq_shift)
|
|
+{
|
|
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
|
|
+
|
|
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
|
|
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
|
|
+ ukinfo->max_sq_frag_cnt,
|
|
+ ukinfo->max_inline_data, sq_shift);
|
|
+
|
|
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
|
|
+ rq_shift);
|
|
+
|
|
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
|
|
+ if (ukinfo->abi_ver > 4)
|
|
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
|
|
+ * @ukinfo: qp initialization info
|
|
+ * @sq_depth: Returns depth of SQ
|
|
+ * @sq_shift: Returns shift of SQ
|
|
+ */
|
|
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
|
|
+ u32 *sq_depth, u8 *sq_shift)
|
|
+{
|
|
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
|
|
+ int status;
|
|
+
|
|
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
|
|
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
|
|
+ ukinfo->max_sq_frag_cnt,
|
|
+ ukinfo->max_inline_data, sq_shift);
|
|
+ status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
|
|
+ *sq_shift, sq_depth);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
|
|
+ * @ukinfo: qp initialization info
|
|
+ * @rq_depth: Returns depth of RQ
|
|
+ * @rq_shift: Returns shift of RQ
|
|
+ */
|
|
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
|
|
+ u32 *rq_depth, u8 *rq_shift)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
|
|
+ rq_shift);
|
|
+
|
|
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
|
|
+ if (ukinfo->abi_ver > 4)
|
|
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
|
|
+ }
|
|
+
|
|
+ status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
|
|
+ *rq_shift, rq_depth);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
/**
|
|
* irdma_uk_qp_init - initialize shared qp
|
|
* @qp: hw qp (user and kernel)
|
|
@@ -1428,23 +1500,12 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
|
|
{
|
|
int ret_code = 0;
|
|
u32 sq_ring_size;
|
|
- u8 sqshift, rqshift;
|
|
|
|
qp->uk_attrs = info->uk_attrs;
|
|
if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
|
|
info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
|
|
return -EINVAL;
|
|
|
|
- irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
|
|
- if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
|
|
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
|
|
- info->max_inline_data, &sqshift);
|
|
- if (info->abi_ver > 4)
|
|
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
|
|
- } else {
|
|
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
|
|
- info->max_inline_data, &sqshift);
|
|
- }
|
|
qp->qp_caps = info->qp_caps;
|
|
qp->sq_base = info->sq;
|
|
qp->rq_base = info->rq;
|
|
@@ -1458,7 +1519,7 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
|
|
qp->sq_size = info->sq_size;
|
|
qp->push_mode = false;
|
|
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
|
|
- sq_ring_size = qp->sq_size << sqshift;
|
|
+ sq_ring_size = qp->sq_size << info->sq_shift;
|
|
IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
|
|
IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
|
|
if (info->first_sq_wq) {
|
|
@@ -1473,9 +1534,9 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
|
|
qp->rq_size = info->rq_size;
|
|
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
|
|
qp->max_inline_data = info->max_inline_data;
|
|
- qp->rq_wqe_size = rqshift;
|
|
+ qp->rq_wqe_size = info->rq_shift;
|
|
IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
|
|
- qp->rq_wqe_size_multiplier = 1 << rqshift;
|
|
+ qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
|
|
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
|
|
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
|
|
else
|
|
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
|
|
index d0cdf609f5e06..1e0e1a71dbada 100644
|
|
--- a/drivers/infiniband/hw/irdma/user.h
|
|
+++ b/drivers/infiniband/hw/irdma/user.h
|
|
@@ -295,6 +295,12 @@ void irdma_uk_cq_init(struct irdma_cq_uk *cq,
|
|
struct irdma_cq_uk_init_info *info);
|
|
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
|
|
struct irdma_qp_uk_init_info *info);
|
|
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
|
|
+ u8 *rq_shift);
|
|
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
|
|
+ u32 *sq_depth, u8 *sq_shift);
|
|
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
|
|
+ u32 *rq_depth, u8 *rq_shift);
|
|
struct irdma_sq_uk_wr_trk_info {
|
|
u64 wrid;
|
|
u32 wr_len;
|
|
@@ -374,8 +380,12 @@ struct irdma_qp_uk_init_info {
|
|
u32 max_sq_frag_cnt;
|
|
u32 max_rq_frag_cnt;
|
|
u32 max_inline_data;
|
|
+ u32 sq_depth;
|
|
+ u32 rq_depth;
|
|
u8 first_sq_wq;
|
|
u8 type;
|
|
+ u8 sq_shift;
|
|
+ u8 rq_shift;
|
|
int abi_ver;
|
|
bool legacy_mode;
|
|
};
|
|
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
|
|
index 42c671f209233..76c5f461faca0 100644
|
|
--- a/drivers/infiniband/hw/irdma/verbs.c
|
|
+++ b/drivers/infiniband/hw/irdma/verbs.c
|
|
@@ -277,7 +277,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
|
|
struct irdma_alloc_ucontext_req req = {};
|
|
struct irdma_alloc_ucontext_resp uresp = {};
|
|
struct irdma_ucontext *ucontext = to_ucontext(uctx);
|
|
- struct irdma_uk_attrs *uk_attrs;
|
|
+ struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
|
|
|
|
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
|
|
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
|
|
@@ -292,7 +292,9 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
|
|
ucontext->iwdev = iwdev;
|
|
ucontext->abi_ver = req.userspace_ver;
|
|
|
|
- uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
|
|
+ if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
|
|
+ ucontext->use_raw_attrs = true;
|
|
+
|
|
/* GEN_1 legacy support with libi40iw */
|
|
if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
|
|
if (uk_attrs->hw_rev != IRDMA_GEN_1)
|
|
@@ -327,6 +329,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
|
|
uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
|
|
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
|
|
uresp.hw_rev = uk_attrs->hw_rev;
|
|
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
|
|
if (ib_copy_to_udata(udata, &uresp,
|
|
min(sizeof(uresp), udata->outlen))) {
|
|
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
|
|
@@ -566,6 +569,86 @@ static void irdma_setup_virt_qp(struct irdma_device *iwdev,
|
|
}
|
|
}
|
|
|
|
+/**
|
|
+ * irdma_setup_umode_qp - setup sq and rq size in user mode qp
|
|
+ * @iwdev: iwarp device
|
|
+ * @iwqp: qp ptr (user or kernel)
|
|
+ * @info: initialize info to return
|
|
+ * @init_attr: Initial QP create attributes
|
|
+ */
|
|
+static int irdma_setup_umode_qp(struct ib_udata *udata,
|
|
+ struct irdma_device *iwdev,
|
|
+ struct irdma_qp *iwqp,
|
|
+ struct irdma_qp_init_info *info,
|
|
+ struct ib_qp_init_attr *init_attr)
|
|
+{
|
|
+ struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
|
|
+ struct irdma_ucontext, ibucontext);
|
|
+ struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
|
|
+ struct irdma_create_qp_req req;
|
|
+ unsigned long flags;
|
|
+ int ret;
|
|
+
|
|
+ ret = ib_copy_from_udata(&req, udata,
|
|
+ min(sizeof(req), udata->inlen));
|
|
+ if (ret) {
|
|
+ ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
|
|
+ iwqp->user_mode = 1;
|
|
+ if (req.user_wqe_bufs) {
|
|
+ info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
|
|
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
|
|
+ iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
|
|
+ &ucontext->qp_reg_mem_list);
|
|
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
|
|
+
|
|
+ if (!iwqp->iwpbl) {
|
|
+ ret = -ENODATA;
|
|
+ ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!ucontext->use_raw_attrs) {
|
|
+ /**
|
|
+ * Maintain backward compat with older ABI which passes sq and
|
|
+ * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
|
|
+ * There is no way to compute the correct value of
|
|
+ * iwqp->max_send_wr/max_recv_wr in the kernel.
|
|
+ */
|
|
+ iwqp->max_send_wr = init_attr->cap.max_send_wr;
|
|
+ iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
|
|
+ ukinfo->sq_size = init_attr->cap.max_send_wr;
|
|
+ ukinfo->rq_size = init_attr->cap.max_recv_wr;
|
|
+ irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
|
|
+ &ukinfo->rq_shift);
|
|
+ } else {
|
|
+ ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
|
|
+ &ukinfo->sq_shift);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
|
|
+ &ukinfo->rq_shift);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ iwqp->max_send_wr =
|
|
+ (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
|
|
+ iwqp->max_recv_wr =
|
|
+ (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
|
|
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
|
|
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
|
|
+ }
|
|
+
|
|
+ irdma_setup_virt_qp(iwdev, iwqp, info);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/**
|
|
* irdma_setup_kmode_qp - setup initialization for kernel mode qp
|
|
* @iwdev: iwarp device
|
|
@@ -579,40 +662,28 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
|
|
struct ib_qp_init_attr *init_attr)
|
|
{
|
|
struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
|
|
- u32 sqdepth, rqdepth;
|
|
- u8 sqshift, rqshift;
|
|
u32 size;
|
|
int status;
|
|
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
|
|
- struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
|
|
|
|
- irdma_get_wqe_shift(uk_attrs,
|
|
- uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
|
|
- ukinfo->max_sq_frag_cnt,
|
|
- ukinfo->max_inline_data, &sqshift);
|
|
- status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
|
|
- &sqdepth);
|
|
+ status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
|
|
+ &ukinfo->sq_shift);
|
|
if (status)
|
|
return status;
|
|
|
|
- if (uk_attrs->hw_rev == IRDMA_GEN_1)
|
|
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
|
|
- else
|
|
- irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
|
|
- &rqshift);
|
|
-
|
|
- status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
|
|
- &rqdepth);
|
|
+ status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
|
|
+ &ukinfo->rq_shift);
|
|
if (status)
|
|
return status;
|
|
|
|
iwqp->kqp.sq_wrid_mem =
|
|
- kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
|
|
+ kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
|
|
if (!iwqp->kqp.sq_wrid_mem)
|
|
return -ENOMEM;
|
|
|
|
iwqp->kqp.rq_wrid_mem =
|
|
- kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
|
|
+ kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
|
|
+
|
|
if (!iwqp->kqp.rq_wrid_mem) {
|
|
kfree(iwqp->kqp.sq_wrid_mem);
|
|
iwqp->kqp.sq_wrid_mem = NULL;
|
|
@@ -622,7 +693,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
|
|
ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
|
|
ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
|
|
|
|
- size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
|
|
+ size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
|
|
size += (IRDMA_SHADOW_AREA_SIZE << 3);
|
|
|
|
mem->size = ALIGN(size, 256);
|
|
@@ -638,16 +709,18 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
|
|
|
|
ukinfo->sq = mem->va;
|
|
info->sq_pa = mem->pa;
|
|
- ukinfo->rq = &ukinfo->sq[sqdepth];
|
|
- info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
|
|
- ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
|
|
- info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
|
|
- ukinfo->sq_size = sqdepth >> sqshift;
|
|
- ukinfo->rq_size = rqdepth >> rqshift;
|
|
- ukinfo->qp_id = iwqp->ibqp.qp_num;
|
|
-
|
|
- init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
|
|
- init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
|
|
+ ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
|
|
+ info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
|
|
+ ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
|
|
+ info->shadow_area_pa =
|
|
+ info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
|
|
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
|
|
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
|
|
+
|
|
+ iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
|
|
+ iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
|
|
+ init_attr->cap.max_send_wr = iwqp->max_send_wr;
|
|
+ init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
|
|
|
|
return 0;
|
|
}
|
|
@@ -805,18 +878,14 @@ static int irdma_create_qp(struct ib_qp *ibqp,
|
|
struct irdma_device *iwdev = to_iwdev(ibpd->device);
|
|
struct irdma_pci_f *rf = iwdev->rf;
|
|
struct irdma_qp *iwqp = to_iwqp(ibqp);
|
|
- struct irdma_create_qp_req req = {};
|
|
struct irdma_create_qp_resp uresp = {};
|
|
u32 qp_num = 0;
|
|
int err_code;
|
|
- int sq_size;
|
|
- int rq_size;
|
|
struct irdma_sc_qp *qp;
|
|
struct irdma_sc_dev *dev = &rf->sc_dev;
|
|
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
|
|
struct irdma_qp_init_info init_info = {};
|
|
struct irdma_qp_host_ctx_info *ctx_info;
|
|
- unsigned long flags;
|
|
|
|
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
|
|
if (err_code)
|
|
@@ -826,13 +895,10 @@ static int irdma_create_qp(struct ib_qp *ibqp,
|
|
udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
|
|
return -EINVAL;
|
|
|
|
- sq_size = init_attr->cap.max_send_wr;
|
|
- rq_size = init_attr->cap.max_recv_wr;
|
|
-
|
|
init_info.vsi = &iwdev->vsi;
|
|
init_info.qp_uk_init_info.uk_attrs = uk_attrs;
|
|
- init_info.qp_uk_init_info.sq_size = sq_size;
|
|
- init_info.qp_uk_init_info.rq_size = rq_size;
|
|
+ init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
|
|
+ init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
|
|
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
|
|
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
|
|
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
|
|
@@ -874,7 +940,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
|
|
iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
|
|
|
|
init_info.pd = &iwpd->sc_pd;
|
|
- init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
|
|
+ init_info.qp_uk_init_info.qp_id = qp_num;
|
|
if (!rdma_protocol_roce(&iwdev->ibdev, 1))
|
|
init_info.qp_uk_init_info.first_sq_wq = 1;
|
|
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
|
|
@@ -882,36 +948,9 @@ static int irdma_create_qp(struct ib_qp *ibqp,
|
|
init_waitqueue_head(&iwqp->mod_qp_waitq);
|
|
|
|
if (udata) {
|
|
- err_code = ib_copy_from_udata(&req, udata,
|
|
- min(sizeof(req), udata->inlen));
|
|
- if (err_code) {
|
|
- ibdev_dbg(&iwdev->ibdev,
|
|
- "VERBS: ib_copy_from_data fail\n");
|
|
- goto error;
|
|
- }
|
|
-
|
|
- iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
|
|
- iwqp->user_mode = 1;
|
|
- if (req.user_wqe_bufs) {
|
|
- struct irdma_ucontext *ucontext =
|
|
- rdma_udata_to_drv_context(udata,
|
|
- struct irdma_ucontext,
|
|
- ibucontext);
|
|
-
|
|
- init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
|
|
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
|
|
- iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
|
|
- &ucontext->qp_reg_mem_list);
|
|
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
|
|
-
|
|
- if (!iwqp->iwpbl) {
|
|
- err_code = -ENODATA;
|
|
- ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
|
|
- goto error;
|
|
- }
|
|
- }
|
|
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
|
|
- irdma_setup_virt_qp(iwdev, iwqp, &init_info);
|
|
+ err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
|
|
+ init_attr);
|
|
} else {
|
|
INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
|
|
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
|
|
@@ -966,8 +1005,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
|
|
spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
|
|
iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
|
|
rf->qp_table[qp_num] = iwqp;
|
|
- iwqp->max_send_wr = sq_size;
|
|
- iwqp->max_recv_wr = rq_size;
|
|
|
|
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
|
|
if (dev->ws_add(&iwdev->vsi, 0)) {
|
|
@@ -988,8 +1025,8 @@ static int irdma_create_qp(struct ib_qp *ibqp,
|
|
if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
|
|
uresp.lsmm = 1;
|
|
}
|
|
- uresp.actual_sq_size = sq_size;
|
|
- uresp.actual_rq_size = rq_size;
|
|
+ uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
|
|
+ uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
|
|
uresp.qp_id = qp_num;
|
|
uresp.qp_caps = qp->qp_uk.qp_caps;
|
|
|
|
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
|
|
index 9f9e273bbff3e..0bc0d0faa0868 100644
|
|
--- a/drivers/infiniband/hw/irdma/verbs.h
|
|
+++ b/drivers/infiniband/hw/irdma/verbs.h
|
|
@@ -18,7 +18,8 @@ struct irdma_ucontext {
|
|
struct list_head qp_reg_mem_list;
|
|
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
|
|
int abi_ver;
|
|
- bool legacy_mode;
|
|
+ u8 legacy_mode : 1;
|
|
+ u8 use_raw_attrs : 1;
|
|
};
|
|
|
|
struct irdma_pd {
|
|
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
|
|
index f8e2baed27a5c..7013ce20549bd 100644
|
|
--- a/drivers/infiniband/hw/mlx5/devx.c
|
|
+++ b/drivers/infiniband/hw/mlx5/devx.c
|
|
@@ -2951,7 +2951,7 @@ DECLARE_UVERBS_NAMED_METHOD(
|
|
MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
|
|
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
|
|
UVERBS_IDR_ANY_OBJECT,
|
|
- UVERBS_ACCESS_WRITE,
|
|
+ UVERBS_ACCESS_READ,
|
|
UA_MANDATORY),
|
|
UVERBS_ATTR_PTR_IN(
|
|
MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
|
|
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
|
|
index 855f3f4fefadd..737db67a9ce1d 100644
|
|
--- a/drivers/infiniband/hw/mlx5/wr.c
|
|
+++ b/drivers/infiniband/hw/mlx5/wr.c
|
|
@@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
|
|
*/
|
|
copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
|
|
left);
|
|
- memcpy(eseg->inline_hdr.start, pdata, copysz);
|
|
+ memcpy(eseg->inline_hdr.data, pdata, copysz);
|
|
stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
|
|
sizeof(eseg->inline_hdr.start) + copysz, 16);
|
|
*size += stride / 16;
|
|
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
|
|
index d3c436ead6946..4aa80c9388f05 100644
|
|
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
|
|
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
|
|
@@ -133,7 +133,7 @@ static ssize_t mpath_policy_store(struct device *dev,
|
|
|
|
/* distinguish "mi" and "min-latency" with length */
|
|
len = strnlen(buf, NAME_MAX);
|
|
- if (buf[len - 1] == '\n')
|
|
+ if (len && buf[len - 1] == '\n')
|
|
len--;
|
|
|
|
if (!strncasecmp(buf, "round-robin", 11) ||
|
|
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
index cffa93f114a73..fd6c260d5857d 100644
|
|
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
@@ -3209,7 +3209,6 @@ static int srpt_add_one(struct ib_device *device)
|
|
|
|
INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
|
|
srpt_event_handler);
|
|
- ib_register_event_handler(&sdev->event_handler);
|
|
|
|
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
|
|
sport = &sdev->port[i - 1];
|
|
@@ -3232,6 +3231,7 @@ static int srpt_add_one(struct ib_device *device)
|
|
}
|
|
}
|
|
|
|
+ ib_register_event_handler(&sdev->event_handler);
|
|
spin_lock(&srpt_dev_lock);
|
|
list_add_tail(&sdev->list, &srpt_dev_list);
|
|
spin_unlock(&srpt_dev_lock);
|
|
@@ -3242,7 +3242,6 @@ static int srpt_add_one(struct ib_device *device)
|
|
|
|
err_port:
|
|
srpt_unregister_mad_agent(sdev, i);
|
|
- ib_unregister_event_handler(&sdev->event_handler);
|
|
err_cm:
|
|
if (sdev->cm_id)
|
|
ib_destroy_cm_id(sdev->cm_id);
|
|
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
|
|
index c3937d2fc7446..a0f9978c68f55 100644
|
|
--- a/drivers/input/keyboard/gpio_keys_polled.c
|
|
+++ b/drivers/input/keyboard/gpio_keys_polled.c
|
|
@@ -319,12 +319,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
|
|
|
|
error = devm_gpio_request_one(dev, button->gpio,
|
|
flags, button->desc ? : DRV_NAME);
|
|
- if (error) {
|
|
- dev_err(dev,
|
|
- "unable to claim gpio %u, err=%d\n",
|
|
- button->gpio, error);
|
|
- return error;
|
|
- }
|
|
+ if (error)
|
|
+ return dev_err_probe(dev, error,
|
|
+ "unable to claim gpio %u\n",
|
|
+ button->gpio);
|
|
|
|
bdata->gpiod = gpio_to_desc(button->gpio);
|
|
if (!bdata->gpiod) {
|
|
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
|
|
index dc5f7a156ff5e..dc19e7fb07cfe 100644
|
|
--- a/drivers/iommu/Kconfig
|
|
+++ b/drivers/iommu/Kconfig
|
|
@@ -192,7 +192,7 @@ source "drivers/iommu/intel/Kconfig"
|
|
config IRQ_REMAP
|
|
bool "Support for Interrupt Remapping"
|
|
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
|
|
- select DMAR_TABLE
|
|
+ select DMAR_TABLE if INTEL_IOMMU
|
|
help
|
|
Supports Interrupt remapping for IO-APIC and MSI devices.
|
|
To use x2apic mode in the CPU's which support x2APIC enhancements or
|
|
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
|
|
index f6e64c9858021..cc94ac6662339 100644
|
|
--- a/drivers/iommu/amd/init.c
|
|
+++ b/drivers/iommu/amd/init.c
|
|
@@ -2047,6 +2047,9 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
|
|
/* Prevent binding other PCI device drivers to IOMMU devices */
|
|
iommu->dev->match_driver = false;
|
|
|
|
+ /* ACPI _PRT won't have an IRQ for IOMMU */
|
|
+ iommu->dev->irq_managed = 1;
|
|
+
|
|
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
|
|
&iommu->cap);
|
|
|
|
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
|
|
index b7dff5092fd21..12e1e90fdae13 100644
|
|
--- a/drivers/iommu/intel/Kconfig
|
|
+++ b/drivers/iommu/intel/Kconfig
|
|
@@ -96,4 +96,15 @@ config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
|
|
passing intel_iommu=sm_on to the kernel. If not sure, please use
|
|
the default value.
|
|
|
|
+config INTEL_IOMMU_PERF_EVENTS
|
|
+ def_bool y
|
|
+ bool "Intel IOMMU performance events"
|
|
+ depends on INTEL_IOMMU && PERF_EVENTS
|
|
+ help
|
|
+ Selecting this option will enable the performance monitoring
|
|
+ infrastructure in the Intel IOMMU. It collects information about
|
|
+ key events occurring during operation of the remapping hardware,
|
|
+ to aid performance tuning and debug. These are available on modern
|
|
+ processors which support Intel VT-d 4.0 and later.
|
|
+
|
|
endif # INTEL_IOMMU
|
|
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
|
|
index fa0dae16441cb..29d26a4371327 100644
|
|
--- a/drivers/iommu/intel/Makefile
|
|
+++ b/drivers/iommu/intel/Makefile
|
|
@@ -5,4 +5,7 @@ obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
|
|
obj-$(CONFIG_DMAR_PERF) += perf.o
|
|
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
|
|
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
|
|
+ifdef CONFIG_INTEL_IOMMU
|
|
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
|
|
+endif
|
|
+obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
|
|
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
|
|
index 418af1db0192d..4759f79ad7b94 100644
|
|
--- a/drivers/iommu/intel/dmar.c
|
|
+++ b/drivers/iommu/intel/dmar.c
|
|
@@ -34,6 +34,7 @@
|
|
#include "../irq_remapping.h"
|
|
#include "perf.h"
|
|
#include "trace.h"
|
|
+#include "perfmon.h"
|
|
|
|
typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
|
|
struct dmar_res_callback {
|
|
@@ -1104,6 +1105,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|
if (sts & DMA_GSTS_QIES)
|
|
iommu->gcmd |= DMA_GCMD_QIE;
|
|
|
|
+ if (alloc_iommu_pmu(iommu))
|
|
+ pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
|
|
+
|
|
raw_spin_lock_init(&iommu->register_lock);
|
|
|
|
/*
|
|
@@ -1131,6 +1135,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|
err_sysfs:
|
|
iommu_device_sysfs_remove(&iommu->iommu);
|
|
err_unmap:
|
|
+ free_iommu_pmu(iommu);
|
|
unmap_iommu(iommu);
|
|
error_free_seq_id:
|
|
ida_free(&dmar_seq_ids, iommu->seq_id);
|
|
@@ -1146,6 +1151,8 @@ static void free_iommu(struct intel_iommu *iommu)
|
|
iommu_device_sysfs_remove(&iommu->iommu);
|
|
}
|
|
|
|
+ free_iommu_pmu(iommu);
|
|
+
|
|
if (iommu->irq) {
|
|
if (iommu->pr_irq) {
|
|
free_irq(iommu->pr_irq, iommu);
|
|
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
|
|
index c99cb715bd9a2..c1348bedab3b3 100644
|
|
--- a/drivers/iommu/intel/iommu.h
|
|
+++ b/drivers/iommu/intel/iommu.h
|
|
@@ -125,6 +125,11 @@
|
|
#define DMAR_MTRR_PHYSMASK8_REG 0x208
|
|
#define DMAR_MTRR_PHYSBASE9_REG 0x210
|
|
#define DMAR_MTRR_PHYSMASK9_REG 0x218
|
|
+#define DMAR_PERFCAP_REG 0x300
|
|
+#define DMAR_PERFCFGOFF_REG 0x310
|
|
+#define DMAR_PERFOVFOFF_REG 0x318
|
|
+#define DMAR_PERFCNTROFF_REG 0x31c
|
|
+#define DMAR_PERFEVNTCAP_REG 0x380
|
|
#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
|
|
#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
|
|
#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
|
|
@@ -148,6 +153,7 @@
|
|
*/
|
|
#define cap_esrtps(c) (((c) >> 63) & 1)
|
|
#define cap_esirtps(c) (((c) >> 62) & 1)
|
|
+#define cap_ecmds(c) (((c) >> 61) & 1)
|
|
#define cap_fl5lp_support(c) (((c) >> 60) & 1)
|
|
#define cap_pi_support(c) (((c) >> 59) & 1)
|
|
#define cap_fl1gp_support(c) (((c) >> 56) & 1)
|
|
@@ -179,7 +185,8 @@
|
|
* Extended Capability Register
|
|
*/
|
|
|
|
-#define ecap_rps(e) (((e) >> 49) & 0x1)
|
|
+#define ecap_pms(e) (((e) >> 51) & 0x1)
|
|
+#define ecap_rps(e) (((e) >> 49) & 0x1)
|
|
#define ecap_smpwc(e) (((e) >> 48) & 0x1)
|
|
#define ecap_flts(e) (((e) >> 47) & 0x1)
|
|
#define ecap_slts(e) (((e) >> 46) & 0x1)
|
|
@@ -210,6 +217,22 @@
|
|
#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
|
|
#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
|
|
|
|
+/*
|
|
+ * Decoding Perf Capability Register
|
|
+ */
|
|
+#define pcap_num_cntr(p) ((p) & 0xffff)
|
|
+#define pcap_cntr_width(p) (((p) >> 16) & 0x7f)
|
|
+#define pcap_num_event_group(p) (((p) >> 24) & 0x1f)
|
|
+#define pcap_filters_mask(p) (((p) >> 32) & 0x1f)
|
|
+#define pcap_interrupt(p) (((p) >> 50) & 0x1)
|
|
+/* The counter stride is calculated as 2 ^ (x+10) bytes */
|
|
+#define pcap_cntr_stride(p) (1ULL << ((((p) >> 52) & 0x7) + 10))
|
|
+
|
|
+/*
|
|
+ * Decoding Perf Event Capability Register
|
|
+ */
|
|
+#define pecap_es(p) ((p) & 0xfffffff)
|
|
+
|
|
/* Virtual command interface capability */
|
|
#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
|
|
|
|
@@ -561,6 +584,22 @@ struct dmar_domain {
|
|
iommu core */
|
|
};
|
|
|
|
+struct iommu_pmu {
|
|
+ struct intel_iommu *iommu;
|
|
+ u32 num_cntr; /* Number of counters */
|
|
+ u32 num_eg; /* Number of event group */
|
|
+ u32 cntr_width; /* Counter width */
|
|
+ u32 cntr_stride; /* Counter Stride */
|
|
+ u32 filter; /* Bitmask of filter support */
|
|
+ void __iomem *base; /* the PerfMon base address */
|
|
+ void __iomem *cfg_reg; /* counter configuration base address */
|
|
+ void __iomem *cntr_reg; /* counter 0 address*/
|
|
+ void __iomem *overflow; /* overflow status register */
|
|
+
|
|
+ u64 *evcap; /* Indicates all supported events */
|
|
+ u32 **cntr_evcap; /* Supported events of each counter. */
|
|
+};
|
|
+
|
|
struct intel_iommu {
|
|
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
|
|
u64 reg_phys; /* physical address of hw register set */
|
|
@@ -608,6 +647,8 @@ struct intel_iommu {
|
|
|
|
struct dmar_drhd_unit *drhd;
|
|
void *perf_statistic;
|
|
+
|
|
+ struct iommu_pmu *pmu;
|
|
};
|
|
|
|
/* PCI domain-device relationship */
|
|
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
|
|
index 3f03039e5cce5..32432d82d7744 100644
|
|
--- a/drivers/iommu/intel/pasid.c
|
|
+++ b/drivers/iommu/intel/pasid.c
|
|
@@ -435,6 +435,9 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
|
|
if (!info || !info->ats_enabled)
|
|
return;
|
|
|
|
+ if (pci_dev_is_disconnected(to_pci_dev(dev)))
|
|
+ return;
|
|
+
|
|
sid = info->bus << 8 | info->devfn;
|
|
qdep = info->ats_qdep;
|
|
pfsid = info->pfsid;
|
|
diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
|
|
new file mode 100644
|
|
index 0000000000000..db5791a544551
|
|
--- /dev/null
|
|
+++ b/drivers/iommu/intel/perfmon.c
|
|
@@ -0,0 +1,172 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-only
|
|
+/*
|
|
+ * Support Intel IOMMU PerfMon
|
|
+ * Copyright(c) 2023 Intel Corporation.
|
|
+ */
|
|
+#define pr_fmt(fmt) "DMAR: " fmt
|
|
+#define dev_fmt(fmt) pr_fmt(fmt)
|
|
+
|
|
+#include <linux/dmar.h>
|
|
+#include "iommu.h"
|
|
+#include "perfmon.h"
|
|
+
|
|
+static inline void __iomem *
|
|
+get_perf_reg_address(struct intel_iommu *iommu, u32 offset)
|
|
+{
|
|
+ u32 off = dmar_readl(iommu->reg + offset);
|
|
+
|
|
+ return iommu->reg + off;
|
|
+}
|
|
+
|
|
+int alloc_iommu_pmu(struct intel_iommu *iommu)
|
|
+{
|
|
+ struct iommu_pmu *iommu_pmu;
|
|
+ int i, j, ret;
|
|
+ u64 perfcap;
|
|
+ u32 cap;
|
|
+
|
|
+ if (!ecap_pms(iommu->ecap))
|
|
+ return 0;
|
|
+
|
|
+ /* The IOMMU PMU requires the ECMD support as well */
|
|
+ if (!cap_ecmds(iommu->cap))
|
|
+ return -ENODEV;
|
|
+
|
|
+ perfcap = dmar_readq(iommu->reg + DMAR_PERFCAP_REG);
|
|
+ /* The performance monitoring is not supported. */
|
|
+ if (!perfcap)
|
|
+ return -ENODEV;
|
|
+
|
|
+ /* Sanity check for the number of the counters and event groups */
|
|
+ if (!pcap_num_cntr(perfcap) || !pcap_num_event_group(perfcap))
|
|
+ return -ENODEV;
|
|
+
|
|
+ /* The interrupt on overflow is required */
|
|
+ if (!pcap_interrupt(perfcap))
|
|
+ return -ENODEV;
|
|
+
|
|
+ iommu_pmu = kzalloc(sizeof(*iommu_pmu), GFP_KERNEL);
|
|
+ if (!iommu_pmu)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ iommu_pmu->num_cntr = pcap_num_cntr(perfcap);
|
|
+ iommu_pmu->cntr_width = pcap_cntr_width(perfcap);
|
|
+ iommu_pmu->filter = pcap_filters_mask(perfcap);
|
|
+ iommu_pmu->cntr_stride = pcap_cntr_stride(perfcap);
|
|
+ iommu_pmu->num_eg = pcap_num_event_group(perfcap);
|
|
+
|
|
+ iommu_pmu->evcap = kcalloc(iommu_pmu->num_eg, sizeof(u64), GFP_KERNEL);
|
|
+ if (!iommu_pmu->evcap) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_pmu;
|
|
+ }
|
|
+
|
|
+ /* Parse event group capabilities */
|
|
+ for (i = 0; i < iommu_pmu->num_eg; i++) {
|
|
+ u64 pcap;
|
|
+
|
|
+ pcap = dmar_readq(iommu->reg + DMAR_PERFEVNTCAP_REG +
|
|
+ i * IOMMU_PMU_CAP_REGS_STEP);
|
|
+ iommu_pmu->evcap[i] = pecap_es(pcap);
|
|
+ }
|
|
+
|
|
+ iommu_pmu->cntr_evcap = kcalloc(iommu_pmu->num_cntr, sizeof(u32 *), GFP_KERNEL);
|
|
+ if (!iommu_pmu->cntr_evcap) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_pmu_evcap;
|
|
+ }
|
|
+ for (i = 0; i < iommu_pmu->num_cntr; i++) {
|
|
+ iommu_pmu->cntr_evcap[i] = kcalloc(iommu_pmu->num_eg, sizeof(u32), GFP_KERNEL);
|
|
+ if (!iommu_pmu->cntr_evcap[i]) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_pmu_cntr_evcap;
|
|
+ }
|
|
+ /*
|
|
+ * Set to the global capabilities, will adjust according
|
|
+ * to per-counter capabilities later.
|
|
+ */
|
|
+ for (j = 0; j < iommu_pmu->num_eg; j++)
|
|
+ iommu_pmu->cntr_evcap[i][j] = (u32)iommu_pmu->evcap[j];
|
|
+ }
|
|
+
|
|
+ iommu_pmu->cfg_reg = get_perf_reg_address(iommu, DMAR_PERFCFGOFF_REG);
|
|
+ iommu_pmu->cntr_reg = get_perf_reg_address(iommu, DMAR_PERFCNTROFF_REG);
|
|
+ iommu_pmu->overflow = get_perf_reg_address(iommu, DMAR_PERFOVFOFF_REG);
|
|
+
|
|
+ /*
|
|
+ * Check per-counter capabilities. All counters should have the
|
|
+ * same capabilities on Interrupt on Overflow Support and Counter
|
|
+ * Width.
|
|
+ */
|
|
+ for (i = 0; i < iommu_pmu->num_cntr; i++) {
|
|
+ cap = dmar_readl(iommu_pmu->cfg_reg +
|
|
+ i * IOMMU_PMU_CFG_OFFSET +
|
|
+ IOMMU_PMU_CFG_CNTRCAP_OFFSET);
|
|
+ if (!iommu_cntrcap_pcc(cap))
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * It's possible that some counters have a different
|
|
+ * capability because of e.g., HW bug. Check the corner
|
|
+ * case here and simply drop those counters.
|
|
+ */
|
|
+ if ((iommu_cntrcap_cw(cap) != iommu_pmu->cntr_width) ||
|
|
+ !iommu_cntrcap_ios(cap)) {
|
|
+ iommu_pmu->num_cntr = i;
|
|
+ pr_warn("PMU counter capability inconsistent, counter number reduced to %d\n",
|
|
+ iommu_pmu->num_cntr);
|
|
+ }
|
|
+
|
|
+ /* Clear the pre-defined events group */
|
|
+ for (j = 0; j < iommu_pmu->num_eg; j++)
|
|
+ iommu_pmu->cntr_evcap[i][j] = 0;
|
|
+
|
|
+ /* Override with per-counter event capabilities */
|
|
+ for (j = 0; j < iommu_cntrcap_egcnt(cap); j++) {
|
|
+ cap = dmar_readl(iommu_pmu->cfg_reg + i * IOMMU_PMU_CFG_OFFSET +
|
|
+ IOMMU_PMU_CFG_CNTREVCAP_OFFSET +
|
|
+ (j * IOMMU_PMU_OFF_REGS_STEP));
|
|
+ iommu_pmu->cntr_evcap[i][iommu_event_group(cap)] = iommu_event_select(cap);
|
|
+ /*
|
|
+ * Some events may only be supported by a specific counter.
|
|
+ * Track them in the evcap as well.
|
|
+ */
|
|
+ iommu_pmu->evcap[iommu_event_group(cap)] |= iommu_event_select(cap);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ iommu_pmu->iommu = iommu;
|
|
+ iommu->pmu = iommu_pmu;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+free_pmu_cntr_evcap:
|
|
+ for (i = 0; i < iommu_pmu->num_cntr; i++)
|
|
+ kfree(iommu_pmu->cntr_evcap[i]);
|
|
+ kfree(iommu_pmu->cntr_evcap);
|
|
+free_pmu_evcap:
|
|
+ kfree(iommu_pmu->evcap);
|
|
+free_pmu:
|
|
+ kfree(iommu_pmu);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void free_iommu_pmu(struct intel_iommu *iommu)
|
|
+{
|
|
+ struct iommu_pmu *iommu_pmu = iommu->pmu;
|
|
+
|
|
+ if (!iommu_pmu)
|
|
+ return;
|
|
+
|
|
+ if (iommu_pmu->evcap) {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < iommu_pmu->num_cntr; i++)
|
|
+ kfree(iommu_pmu->cntr_evcap[i]);
|
|
+ kfree(iommu_pmu->cntr_evcap);
|
|
+ }
|
|
+ kfree(iommu_pmu->evcap);
|
|
+ kfree(iommu_pmu);
|
|
+ iommu->pmu = NULL;
|
|
+}
|
|
diff --git a/drivers/iommu/intel/perfmon.h b/drivers/iommu/intel/perfmon.h
|
|
new file mode 100644
|
|
index 0000000000000..4b0d9c1fea6ff
|
|
--- /dev/null
|
|
+++ b/drivers/iommu/intel/perfmon.h
|
|
@@ -0,0 +1,40 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+
|
|
+/*
|
|
+ * PERFCFGOFF_REG, PERFFRZOFF_REG
|
|
+ * PERFOVFOFF_REG, PERFCNTROFF_REG
|
|
+ */
|
|
+#define IOMMU_PMU_NUM_OFF_REGS 4
|
|
+#define IOMMU_PMU_OFF_REGS_STEP 4
|
|
+
|
|
+#define IOMMU_PMU_CFG_OFFSET 0x100
|
|
+#define IOMMU_PMU_CFG_CNTRCAP_OFFSET 0x80
|
|
+#define IOMMU_PMU_CFG_CNTREVCAP_OFFSET 0x84
|
|
+#define IOMMU_PMU_CFG_SIZE 0x8
|
|
+#define IOMMU_PMU_CFG_FILTERS_OFFSET 0x4
|
|
+
|
|
+#define IOMMU_PMU_CAP_REGS_STEP 8
|
|
+
|
|
+#define iommu_cntrcap_pcc(p) ((p) & 0x1)
|
|
+#define iommu_cntrcap_cw(p) (((p) >> 8) & 0xff)
|
|
+#define iommu_cntrcap_ios(p) (((p) >> 16) & 0x1)
|
|
+#define iommu_cntrcap_egcnt(p) (((p) >> 28) & 0xf)
|
|
+
|
|
+#define iommu_event_select(p) ((p) & 0xfffffff)
|
|
+#define iommu_event_group(p) (((p) >> 28) & 0xf)
|
|
+
|
|
+#ifdef CONFIG_INTEL_IOMMU_PERF_EVENTS
|
|
+int alloc_iommu_pmu(struct intel_iommu *iommu);
|
|
+void free_iommu_pmu(struct intel_iommu *iommu);
|
|
+#else
|
|
+static inline int
|
|
+alloc_iommu_pmu(struct intel_iommu *iommu)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline void
|
|
+free_iommu_pmu(struct intel_iommu *iommu)
|
|
+{
|
|
+}
|
|
+#endif /* CONFIG_INTEL_IOMMU_PERF_EVENTS */
|
|
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
|
|
index 83314b9d8f38b..ee59647c20501 100644
|
|
--- a/drivers/iommu/irq_remapping.c
|
|
+++ b/drivers/iommu/irq_remapping.c
|
|
@@ -99,7 +99,8 @@ int __init irq_remapping_prepare(void)
|
|
if (disable_irq_remap)
|
|
return -ENOSYS;
|
|
|
|
- if (intel_irq_remap_ops.prepare() == 0)
|
|
+ if (IS_ENABLED(CONFIG_INTEL_IOMMU) &&
|
|
+ intel_irq_remap_ops.prepare() == 0)
|
|
remap_ops = &intel_irq_remap_ops;
|
|
else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
|
|
amd_iommu_irq_ops.prepare() == 0)
|
|
diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c
|
|
index d3a30ad94ac46..dd5d327c52a10 100644
|
|
--- a/drivers/leds/flash/leds-sgm3140.c
|
|
+++ b/drivers/leds/flash/leds-sgm3140.c
|
|
@@ -114,8 +114,11 @@ static int sgm3140_brightness_set(struct led_classdev *led_cdev,
|
|
"failed to enable regulator: %d\n", ret);
|
|
return ret;
|
|
}
|
|
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
|
|
gpiod_set_value_cansleep(priv->enable_gpio, 1);
|
|
} else {
|
|
+ del_timer_sync(&priv->powerdown_timer);
|
|
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
|
|
gpiod_set_value_cansleep(priv->enable_gpio, 0);
|
|
ret = regulator_disable(priv->vin_regulator);
|
|
if (ret) {
|
|
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
|
|
index 0b52fc9097c6e..3c05958578a1c 100644
|
|
--- a/drivers/leds/leds-aw2013.c
|
|
+++ b/drivers/leds/leds-aw2013.c
|
|
@@ -397,6 +397,7 @@ static int aw2013_probe(struct i2c_client *client)
|
|
regulator_disable(chip->vcc_regulator);
|
|
|
|
error:
|
|
+ mutex_unlock(&chip->mutex);
|
|
mutex_destroy(&chip->mutex);
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
|
|
index 100a6a236d92a..ec662f97ba828 100644
|
|
--- a/drivers/md/dm-bufio.c
|
|
+++ b/drivers/md/dm-bufio.c
|
|
@@ -614,7 +614,7 @@ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
|
|
io_req.mem.ptr.vma = (char *)b->data + offset;
|
|
}
|
|
|
|
- r = dm_io(&io_req, 1, ®ion, NULL);
|
|
+ r = dm_io(&io_req, 1, ®ion, NULL, IOPRIO_DEFAULT);
|
|
if (unlikely(r))
|
|
b->end_io(b, errno_to_blk_status(r));
|
|
}
|
|
@@ -1375,7 +1375,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
|
|
|
|
BUG_ON(dm_bufio_in_request());
|
|
|
|
- return dm_io(&io_req, 1, &io_reg, NULL);
|
|
+ return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
|
|
}
|
|
EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
|
|
|
|
@@ -1398,7 +1398,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
|
|
|
|
BUG_ON(dm_bufio_in_request());
|
|
|
|
- return dm_io(&io_req, 1, &io_reg, NULL);
|
|
+ return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
|
|
}
|
|
EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
|
|
|
|
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
|
|
index 6ba3e9c91af53..8bc21d54884e9 100644
|
|
--- a/drivers/md/dm-cache-policy.h
|
|
+++ b/drivers/md/dm-cache-policy.h
|
|
@@ -75,7 +75,7 @@ struct dm_cache_policy {
|
|
* background work.
|
|
*/
|
|
int (*get_background_work)(struct dm_cache_policy *p, bool idle,
|
|
- struct policy_work **result);
|
|
+ struct policy_work **result);
|
|
|
|
/*
|
|
* You must pass in the same work pointer that you were given, not
|
|
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
|
|
index 3e215aa85b99a..25e51dc6e5598 100644
|
|
--- a/drivers/md/dm-crypt.c
|
|
+++ b/drivers/md/dm-crypt.c
|
|
@@ -52,11 +52,11 @@
|
|
struct convert_context {
|
|
struct completion restart;
|
|
struct bio *bio_in;
|
|
- struct bio *bio_out;
|
|
struct bvec_iter iter_in;
|
|
+ struct bio *bio_out;
|
|
struct bvec_iter iter_out;
|
|
- u64 cc_sector;
|
|
atomic_t cc_pending;
|
|
+ u64 cc_sector;
|
|
union {
|
|
struct skcipher_request *req;
|
|
struct aead_request *req_aead;
|
|
@@ -2535,7 +2535,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
|
|
type = &key_type_encrypted;
|
|
set_key = set_key_encrypted;
|
|
} else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
|
|
- !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
|
|
+ !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
|
|
type = &key_type_trusted;
|
|
set_key = set_key_trusted;
|
|
} else {
|
|
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
|
|
index 3da4359f51645..9c9e2b50c63c3 100644
|
|
--- a/drivers/md/dm-integrity.c
|
|
+++ b/drivers/md/dm-integrity.c
|
|
@@ -579,7 +579,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
|
|
}
|
|
}
|
|
|
|
- r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
|
if (unlikely(r))
|
|
return r;
|
|
|
|
@@ -1089,7 +1089,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
|
|
io_loc.sector = ic->start + SB_SECTORS + sector;
|
|
io_loc.count = n_sectors;
|
|
|
|
- r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
|
if (unlikely(r)) {
|
|
dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
|
|
"reading journal" : "writing journal", r);
|
|
@@ -1205,7 +1205,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, u
|
|
io_loc.sector = target;
|
|
io_loc.count = n_sectors;
|
|
|
|
- r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
|
if (unlikely(r)) {
|
|
WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
|
|
fn(-1UL, data);
|
|
@@ -1532,7 +1532,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
|
|
fr.io_reg.count = 0,
|
|
fr.ic = ic;
|
|
init_completion(&fr.comp);
|
|
- r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
|
|
+ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
|
|
BUG_ON(r);
|
|
}
|
|
|
|
@@ -1709,7 +1709,6 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
|
|
struct bio_vec bv;
|
|
sector_t sector, logical_sector, area, offset;
|
|
struct page *page;
|
|
- void *buffer;
|
|
|
|
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
|
|
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
|
|
@@ -1718,13 +1717,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
|
|
logical_sector = dio->range.logical_sector;
|
|
|
|
page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
|
|
- buffer = page_to_virt(page);
|
|
|
|
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
|
|
unsigned pos = 0;
|
|
|
|
do {
|
|
+ sector_t alignment;
|
|
char *mem;
|
|
+ char *buffer = page_to_virt(page);
|
|
int r;
|
|
struct dm_io_request io_req;
|
|
struct dm_io_region io_loc;
|
|
@@ -1737,7 +1737,15 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
|
|
io_loc.sector = sector;
|
|
io_loc.count = ic->sectors_per_block;
|
|
|
|
- r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
+ /* Align the bio to logical block size */
|
|
+ alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
|
|
+ alignment &= -alignment;
|
|
+ io_loc.sector = round_down(io_loc.sector, alignment);
|
|
+ io_loc.count += sector - io_loc.sector;
|
|
+ buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
|
|
+ io_loc.count = round_up(io_loc.count, alignment);
|
|
+
|
|
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
|
if (unlikely(r)) {
|
|
dio->bi_status = errno_to_blk_status(r);
|
|
goto free_ret;
|
|
@@ -1856,12 +1864,12 @@ static void integrity_metadata(struct work_struct *w)
|
|
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
|
|
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
|
|
if (unlikely(r)) {
|
|
+ if (likely(checksums != checksums_onstack))
|
|
+ kfree(checksums);
|
|
if (r > 0) {
|
|
- integrity_recheck(dio, checksums);
|
|
+ integrity_recheck(dio, checksums_onstack);
|
|
goto skip_io;
|
|
}
|
|
- if (likely(checksums != checksums_onstack))
|
|
- kfree(checksums);
|
|
goto error;
|
|
}
|
|
|
|
@@ -2367,7 +2375,6 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
|
|
else
|
|
skip_check:
|
|
dec_in_flight(dio);
|
|
-
|
|
} else {
|
|
INIT_WORK(&dio->work, integrity_metadata);
|
|
queue_work(ic->metadata_wq, &dio->work);
|
|
@@ -2775,7 +2782,7 @@ static void integrity_recalc(struct work_struct *w)
|
|
io_loc.sector = get_data_sector(ic, area, offset);
|
|
io_loc.count = n_sectors;
|
|
|
|
- r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
|
if (unlikely(r)) {
|
|
dm_integrity_io_error(ic, "reading data", r);
|
|
goto err;
|
|
@@ -4151,7 +4158,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
|
|
} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
|
|
if (val < 1 << SECTOR_SHIFT ||
|
|
val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
|
|
- (val & (val -1))) {
|
|
+ (val & (val - 1))) {
|
|
r = -EINVAL;
|
|
ti->error = "Invalid block_size argument";
|
|
goto bad;
|
|
@@ -4477,7 +4484,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
|
|
if (ic->internal_hash) {
|
|
size_t recalc_tags_size;
|
|
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
|
|
- if (!ic->recalc_wq ) {
|
|
+ if (!ic->recalc_wq) {
|
|
ti->error = "Cannot allocate workqueue";
|
|
r = -ENOMEM;
|
|
goto bad;
|
|
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
|
|
index e488b05e35fa3..ec97658387c39 100644
|
|
--- a/drivers/md/dm-io.c
|
|
+++ b/drivers/md/dm-io.c
|
|
@@ -295,7 +295,7 @@ static void km_dp_init(struct dpages *dp, void *data)
|
|
*---------------------------------------------------------------*/
|
|
static void do_region(const blk_opf_t opf, unsigned int region,
|
|
struct dm_io_region *where, struct dpages *dp,
|
|
- struct io *io)
|
|
+ struct io *io, unsigned short ioprio)
|
|
{
|
|
struct bio *bio;
|
|
struct page *page;
|
|
@@ -344,6 +344,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
|
|
&io->client->bios);
|
|
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
|
|
bio->bi_end_io = endio;
|
|
+ bio->bi_ioprio = ioprio;
|
|
store_io_and_region_in_bio(bio, io, region);
|
|
|
|
if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
|
|
@@ -371,7 +372,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
|
|
|
|
static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
|
|
struct dm_io_region *where, struct dpages *dp,
|
|
- struct io *io, int sync)
|
|
+ struct io *io, int sync, unsigned short ioprio)
|
|
{
|
|
int i;
|
|
struct dpages old_pages = *dp;
|
|
@@ -388,7 +389,7 @@ static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
|
|
for (i = 0; i < num_regions; i++) {
|
|
*dp = old_pages;
|
|
if (where[i].count || (opf & REQ_PREFLUSH))
|
|
- do_region(opf, i, where + i, dp, io);
|
|
+ do_region(opf, i, where + i, dp, io, ioprio);
|
|
}
|
|
|
|
/*
|
|
@@ -413,7 +414,7 @@ static void sync_io_complete(unsigned long error, void *context)
|
|
|
|
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
|
struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
|
|
- unsigned long *error_bits)
|
|
+ unsigned long *error_bits, unsigned short ioprio)
|
|
{
|
|
struct io *io;
|
|
struct sync_io sio;
|
|
@@ -435,7 +436,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
|
io->vma_invalidate_address = dp->vma_invalidate_address;
|
|
io->vma_invalidate_size = dp->vma_invalidate_size;
|
|
|
|
- dispatch_io(opf, num_regions, where, dp, io, 1);
|
|
+ dispatch_io(opf, num_regions, where, dp, io, 1, ioprio);
|
|
|
|
wait_for_completion_io(&sio.wait);
|
|
|
|
@@ -447,7 +448,8 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
|
|
|
static int async_io(struct dm_io_client *client, unsigned int num_regions,
|
|
struct dm_io_region *where, blk_opf_t opf,
|
|
- struct dpages *dp, io_notify_fn fn, void *context)
|
|
+ struct dpages *dp, io_notify_fn fn, void *context,
|
|
+ unsigned short ioprio)
|
|
{
|
|
struct io *io;
|
|
|
|
@@ -467,7 +469,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
|
|
io->vma_invalidate_address = dp->vma_invalidate_address;
|
|
io->vma_invalidate_size = dp->vma_invalidate_size;
|
|
|
|
- dispatch_io(opf, num_regions, where, dp, io, 0);
|
|
+ dispatch_io(opf, num_regions, where, dp, io, 0, ioprio);
|
|
return 0;
|
|
}
|
|
|
|
@@ -509,7 +511,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
|
|
}
|
|
|
|
int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
|
|
- struct dm_io_region *where, unsigned long *sync_error_bits)
|
|
+ struct dm_io_region *where, unsigned long *sync_error_bits,
|
|
+ unsigned short ioprio)
|
|
{
|
|
int r;
|
|
struct dpages dp;
|
|
@@ -520,11 +523,11 @@ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
|
|
|
|
if (!io_req->notify.fn)
|
|
return sync_io(io_req->client, num_regions, where,
|
|
- io_req->bi_opf, &dp, sync_error_bits);
|
|
+ io_req->bi_opf, &dp, sync_error_bits, ioprio);
|
|
|
|
return async_io(io_req->client, num_regions, where,
|
|
io_req->bi_opf, &dp, io_req->notify.fn,
|
|
- io_req->notify.context);
|
|
+ io_req->notify.context, ioprio);
|
|
}
|
|
EXPORT_SYMBOL(dm_io);
|
|
|
|
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
|
|
index 0ef78e56aa88c..fda51bd140ed3 100644
|
|
--- a/drivers/md/dm-kcopyd.c
|
|
+++ b/drivers/md/dm-kcopyd.c
|
|
@@ -572,9 +572,9 @@ static int run_io_job(struct kcopyd_job *job)
|
|
io_job_start(job->kc->throttle);
|
|
|
|
if (job->op == REQ_OP_READ)
|
|
- r = dm_io(&io_req, 1, &job->source, NULL);
|
|
+ r = dm_io(&io_req, 1, &job->source, NULL, IOPRIO_DEFAULT);
|
|
else
|
|
- r = dm_io(&io_req, job->num_dests, job->dests, NULL);
|
|
+ r = dm_io(&io_req, job->num_dests, job->dests, NULL, IOPRIO_DEFAULT);
|
|
|
|
return r;
|
|
}
|
|
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
|
|
index 05141eea18d3c..da77878cb2c02 100644
|
|
--- a/drivers/md/dm-log.c
|
|
+++ b/drivers/md/dm-log.c
|
|
@@ -295,7 +295,7 @@ static int rw_header(struct log_c *lc, enum req_op op)
|
|
{
|
|
lc->io_req.bi_opf = op;
|
|
|
|
- return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
|
|
+ return dm_io(&lc->io_req, 1, &lc->header_location, NULL, IOPRIO_DEFAULT);
|
|
}
|
|
|
|
static int flush_header(struct log_c *lc)
|
|
@@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc)
|
|
|
|
lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
|
|
|
- return dm_io(&lc->io_req, 1, &null_location, NULL);
|
|
+ return dm_io(&lc->io_req, 1, &null_location, NULL, IOPRIO_DEFAULT);
|
|
}
|
|
|
|
static int read_header(struct log_c *log)
|
|
@@ -756,8 +756,8 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
|
|
log_clear_bit(lc, lc->recovering_bits, region);
|
|
if (in_sync) {
|
|
log_set_bit(lc, lc->sync_bits, region);
|
|
- lc->sync_count++;
|
|
- } else if (log_test_bit(lc->sync_bits, region)) {
|
|
+ lc->sync_count++;
|
|
+ } else if (log_test_bit(lc->sync_bits, region)) {
|
|
lc->sync_count--;
|
|
log_clear_bit(lc, lc->sync_bits, region);
|
|
}
|
|
@@ -765,9 +765,9 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
|
|
|
|
static region_t core_get_sync_count(struct dm_dirty_log *log)
|
|
{
|
|
- struct log_c *lc = (struct log_c *) log->context;
|
|
+ struct log_c *lc = (struct log_c *) log->context;
|
|
|
|
- return lc->sync_count;
|
|
+ return lc->sync_count;
|
|
}
|
|
|
|
#define DMEMIT_SYNC \
|
|
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
|
|
index 4b7528dc2fd08..bf833ca880bc1 100644
|
|
--- a/drivers/md/dm-raid.c
|
|
+++ b/drivers/md/dm-raid.c
|
|
@@ -362,8 +362,8 @@ static struct {
|
|
const int mode;
|
|
const char *param;
|
|
} _raid456_journal_mode[] = {
|
|
- { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
|
|
- { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" }
|
|
+ { R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" },
|
|
+ { R5C_JOURNAL_MODE_WRITE_BACK, "writeback" }
|
|
};
|
|
|
|
/* Return MD raid4/5/6 journal mode for dm @journal_mode one */
|
|
@@ -1114,7 +1114,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
|
|
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
|
|
* [region_size <sectors>] Defines granularity of bitmap
|
|
* [journal_dev <dev>] raid4/5/6 journaling deviice
|
|
- * (i.e. write hole closing log)
|
|
+ * (i.e. write hole closing log)
|
|
*
|
|
* RAID10-only options:
|
|
* [raid10_copies <# copies>] Number of copies. (Default: 2)
|
|
@@ -3325,14 +3325,14 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
|
|
struct mddev *mddev = &rs->md;
|
|
|
|
/*
|
|
- * If we're reshaping to add disk(s)), ti->len and
|
|
+ * If we're reshaping to add disk(s), ti->len and
|
|
* mddev->array_sectors will differ during the process
|
|
* (ti->len > mddev->array_sectors), so we have to requeue
|
|
* bios with addresses > mddev->array_sectors here or
|
|
* there will occur accesses past EOD of the component
|
|
* data images thus erroring the raid set.
|
|
*/
|
|
- if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
|
|
+ if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors))
|
|
return DM_MAPIO_REQUEUE;
|
|
|
|
md_handle_request(mddev, bio);
|
|
@@ -3999,7 +3999,7 @@ static int raid_preresume(struct dm_target *ti)
|
|
}
|
|
|
|
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
|
|
- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
|
|
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
|
|
(test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
|
|
(rs->requested_bitmap_chunk_sectors &&
|
|
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
|
|
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
|
|
index c38e63706d911..1004199ae77ac 100644
|
|
--- a/drivers/md/dm-raid1.c
|
|
+++ b/drivers/md/dm-raid1.c
|
|
@@ -273,7 +273,7 @@ static int mirror_flush(struct dm_target *ti)
|
|
}
|
|
|
|
error_bits = -1;
|
|
- dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
|
|
+ dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT);
|
|
if (unlikely(error_bits != 0)) {
|
|
for (i = 0; i < ms->nr_mirrors; i++)
|
|
if (test_bit(i, &error_bits))
|
|
@@ -543,7 +543,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
|
|
|
|
map_region(&io, m, bio);
|
|
bio_set_m(bio, m);
|
|
- BUG_ON(dm_io(&io_req, 1, &io, NULL));
|
|
+ BUG_ON(dm_io(&io_req, 1, &io, NULL, IOPRIO_DEFAULT));
|
|
}
|
|
|
|
static inline int region_in_sync(struct mirror_set *ms, region_t region,
|
|
@@ -670,7 +670,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
|
|
*/
|
|
bio_set_m(bio, get_default_mirror(ms));
|
|
|
|
- BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
|
|
+ BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT));
|
|
}
|
|
|
|
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
|
|
@@ -902,7 +902,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
|
|
if (IS_ERR(ms->io_client)) {
|
|
ti->error = "Error creating dm_io client";
|
|
kfree(ms);
|
|
- return NULL;
|
|
+ return NULL;
|
|
}
|
|
|
|
ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
|
|
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
|
|
index 80b95746a43e0..eee1cd3aa3fcf 100644
|
|
--- a/drivers/md/dm-snap-persistent.c
|
|
+++ b/drivers/md/dm-snap-persistent.c
|
|
@@ -220,7 +220,7 @@ static void do_metadata(struct work_struct *work)
|
|
{
|
|
struct mdata_req *req = container_of(work, struct mdata_req, work);
|
|
|
|
- req->result = dm_io(req->io_req, 1, req->where, NULL);
|
|
+ req->result = dm_io(req->io_req, 1, req->where, NULL, IOPRIO_DEFAULT);
|
|
}
|
|
|
|
/*
|
|
@@ -244,7 +244,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
|
|
struct mdata_req req;
|
|
|
|
if (!metadata)
|
|
- return dm_io(&io_req, 1, &where, NULL);
|
|
+ return dm_io(&io_req, 1, &where, NULL, IOPRIO_DEFAULT);
|
|
|
|
req.where = &where;
|
|
req.io_req = &io_req;
|
|
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
|
|
index e0367a672eabf..aabb2435070b8 100644
|
|
--- a/drivers/md/dm-table.c
|
|
+++ b/drivers/md/dm-table.c
|
|
@@ -72,7 +72,7 @@ static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
|
|
n = get_child(n, CHILDREN_PER_NODE - 1);
|
|
|
|
if (n >= t->counts[l])
|
|
- return (sector_t) - 1;
|
|
+ return (sector_t) -1;
|
|
|
|
return get_node(t, l, n)[KEYS_PER_NODE - 1];
|
|
}
|
|
@@ -1533,7 +1533,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
|
|
if (ti->type->iterate_devices &&
|
|
ti->type->iterate_devices(ti, func, data))
|
|
return true;
|
|
- }
|
|
+ }
|
|
|
|
return false;
|
|
}
|
|
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
|
|
index 601f9e4e6234f..f24d89af7c5f0 100644
|
|
--- a/drivers/md/dm-thin.c
|
|
+++ b/drivers/md/dm-thin.c
|
|
@@ -1179,9 +1179,9 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
|
|
discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
|
|
discard_parent->bi_end_io = passdown_endio;
|
|
discard_parent->bi_private = m;
|
|
- if (m->maybe_shared)
|
|
- passdown_double_checking_shared_status(m, discard_parent);
|
|
- else {
|
|
+ if (m->maybe_shared)
|
|
+ passdown_double_checking_shared_status(m, discard_parent);
|
|
+ else {
|
|
struct discard_op op;
|
|
|
|
begin_discard(&op, tc, discard_parent);
|
|
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
|
|
index b48e1b59e6da4..6a707b41dc865 100644
|
|
--- a/drivers/md/dm-verity-target.c
|
|
+++ b/drivers/md/dm-verity-target.c
|
|
@@ -503,7 +503,7 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
|
|
io_loc.bdev = v->data_dev->bdev;
|
|
io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
|
|
io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
|
|
- r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
|
|
if (unlikely(r))
|
|
goto free_ret;
|
|
|
|
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
|
|
index 4620a98c99561..db93a91169d5e 100644
|
|
--- a/drivers/md/dm-verity.h
|
|
+++ b/drivers/md/dm-verity.h
|
|
@@ -80,12 +80,12 @@ struct dm_verity_io {
|
|
/* original value of bio->bi_end_io */
|
|
bio_end_io_t *orig_bi_end_io;
|
|
|
|
+ struct bvec_iter iter;
|
|
+
|
|
sector_t block;
|
|
unsigned int n_blocks;
|
|
bool in_tasklet;
|
|
|
|
- struct bvec_iter iter;
|
|
-
|
|
struct work_struct work;
|
|
|
|
char *recheck_buffer;
|
|
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
|
|
index c6ff43a8f0b25..20fc84b24fc75 100644
|
|
--- a/drivers/md/dm-writecache.c
|
|
+++ b/drivers/md/dm-writecache.c
|
|
@@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
|
|
req.notify.context = &endio;
|
|
|
|
/* writing via async dm-io (implied by notify.fn above) won't return an error */
|
|
- (void) dm_io(&req, 1, ®ion, NULL);
|
|
+ (void) dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT);
|
|
i = j;
|
|
}
|
|
|
|
@@ -568,7 +568,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
|
|
req.notify.fn = NULL;
|
|
req.notify.context = NULL;
|
|
|
|
- r = dm_io(&req, 1, ®ion, NULL);
|
|
+ r = dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT);
|
|
if (unlikely(r))
|
|
writecache_error(wc, r, "error writing superblock");
|
|
}
|
|
@@ -596,7 +596,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
|
|
req.client = wc->dm_io;
|
|
req.notify.fn = NULL;
|
|
|
|
- r = dm_io(&req, 1, ®ion, NULL);
|
|
+ r = dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT);
|
|
if (unlikely(r))
|
|
writecache_error(wc, r, "error flushing metadata: %d", r);
|
|
}
|
|
@@ -984,7 +984,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors
|
|
req.client = wc->dm_io;
|
|
req.notify.fn = NULL;
|
|
|
|
- return dm_io(&req, 1, ®ion, NULL);
|
|
+ return dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT);
|
|
}
|
|
|
|
static void writecache_resume(struct dm_target *ti)
|
|
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
|
|
index 0ec85d159bcde..29270f6f272f6 100644
|
|
--- a/drivers/md/dm.c
|
|
+++ b/drivers/md/dm.c
|
|
@@ -2897,6 +2897,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend
|
|
|
|
static void __dm_internal_resume(struct mapped_device *md)
|
|
{
|
|
+ int r;
|
|
+ struct dm_table *map;
|
|
+
|
|
BUG_ON(!md->internal_suspend_count);
|
|
|
|
if (--md->internal_suspend_count)
|
|
@@ -2905,12 +2908,23 @@ static void __dm_internal_resume(struct mapped_device *md)
|
|
if (dm_suspended_md(md))
|
|
goto done; /* resume from nested suspend */
|
|
|
|
- /*
|
|
- * NOTE: existing callers don't need to call dm_table_resume_targets
|
|
- * (which may fail -- so best to avoid it for now by passing NULL map)
|
|
- */
|
|
- (void) __dm_resume(md, NULL);
|
|
-
|
|
+ map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
|
|
+ r = __dm_resume(md, map);
|
|
+ if (r) {
|
|
+ /*
|
|
+ * If a preresume method of some target failed, we are in a
|
|
+ * tricky situation. We can't return an error to the caller. We
|
|
+ * can't fake success because then the "resume" and
|
|
+ * "postsuspend" methods would not be paired correctly, and it
|
|
+ * would break various targets, for example it would cause list
|
|
+ * corruption in the "origin" target.
|
|
+ *
|
|
+ * So, we fake normal suspend here, to make sure that the
|
|
+ * "resume" and "postsuspend" methods will be paired correctly.
|
|
+ */
|
|
+ DMERR("Preresume method failed: %d", r);
|
|
+ set_bit(DMF_SUSPENDED, &md->flags);
|
|
+ }
|
|
done:
|
|
clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
|
|
smp_mb__after_atomic();
|
|
diff --git a/drivers/md/md.c b/drivers/md/md.c
|
|
index 846bdee4daa0e..788acc81e7a84 100644
|
|
--- a/drivers/md/md.c
|
|
+++ b/drivers/md/md.c
|
|
@@ -4903,11 +4903,21 @@ action_store(struct mddev *mddev, const char *page, size_t len)
|
|
return -EINVAL;
|
|
err = mddev_lock(mddev);
|
|
if (!err) {
|
|
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
|
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
|
|
err = -EBUSY;
|
|
- else {
|
|
+ } else if (mddev->reshape_position == MaxSector ||
|
|
+ mddev->pers->check_reshape == NULL ||
|
|
+ mddev->pers->check_reshape(mddev)) {
|
|
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
|
err = mddev->pers->start_reshape(mddev);
|
|
+ } else {
|
|
+ /*
|
|
+ * If reshape is still in progress, and
|
|
+ * md_check_recovery() can continue to reshape,
|
|
+ * don't restart reshape because data can be
|
|
+ * corrupted for raid456.
|
|
+ */
|
|
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
|
}
|
|
mddev_unlock(mddev);
|
|
}
|
|
@@ -6233,7 +6243,15 @@ static void md_clean(struct mddev *mddev)
|
|
mddev->persistent = 0;
|
|
mddev->level = LEVEL_NONE;
|
|
mddev->clevel[0] = 0;
|
|
- mddev->flags = 0;
|
|
+ /*
|
|
+ * Don't clear MD_CLOSING, or mddev can be opened again.
|
|
+ * 'hold_active != 0' means mddev is still in the creation
|
|
+ * process and will be used later.
|
|
+ */
|
|
+ if (mddev->hold_active)
|
|
+ mddev->flags = 0;
|
|
+ else
|
|
+ mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
|
|
mddev->sb_flags = 0;
|
|
mddev->ro = MD_RDWR;
|
|
mddev->metadata_type[0] = 0;
|
|
@@ -7561,7 +7579,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
|
|
int err = 0;
|
|
void __user *argp = (void __user *)arg;
|
|
struct mddev *mddev = NULL;
|
|
- bool did_set_md_closing = false;
|
|
|
|
if (!md_ioctl_valid(cmd))
|
|
return -ENOTTY;
|
|
@@ -7648,7 +7665,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
|
|
err = -EBUSY;
|
|
goto out;
|
|
}
|
|
- did_set_md_closing = true;
|
|
mutex_unlock(&mddev->open_mutex);
|
|
sync_blockdev(bdev);
|
|
}
|
|
@@ -7811,7 +7827,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
|
|
mddev->hold_active = 0;
|
|
mddev_unlock(mddev);
|
|
out:
|
|
- if(did_set_md_closing)
|
|
+ if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY))
|
|
clear_bit(MD_CLOSING, &mddev->flags);
|
|
return err;
|
|
}
|
|
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
|
|
index 1cc783d7030d8..18d949d63543b 100644
|
|
--- a/drivers/md/persistent-data/dm-btree.c
|
|
+++ b/drivers/md/persistent-data/dm-btree.c
|
|
@@ -726,7 +726,7 @@ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *
|
|
* nodes, so saves metadata space.
|
|
*/
|
|
static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
|
|
- struct dm_btree_value_type *vt, uint64_t key)
|
|
+ struct dm_btree_value_type *vt, uint64_t key)
|
|
{
|
|
int r;
|
|
unsigned int middle_index;
|
|
@@ -781,7 +781,7 @@ static int split_two_into_three(struct shadow_spine *s, unsigned int parent_inde
|
|
if (shadow_current(s) != right)
|
|
unlock_block(s->info, right);
|
|
|
|
- return r;
|
|
+ return r;
|
|
}
|
|
|
|
|
|
@@ -1216,7 +1216,7 @@ int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
|
|
static bool need_insert(struct btree_node *node, uint64_t *keys,
|
|
unsigned int level, unsigned int index)
|
|
{
|
|
- return ((index >= le32_to_cpu(node->header.nr_entries)) ||
|
|
+ return ((index >= le32_to_cpu(node->header.nr_entries)) ||
|
|
(le64_to_cpu(node->keys[index]) != keys[level]));
|
|
}
|
|
|
|
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
|
|
index af800efed9f3c..4833a3998c1d9 100644
|
|
--- a/drivers/md/persistent-data/dm-space-map-common.c
|
|
+++ b/drivers/md/persistent-data/dm-space-map-common.c
|
|
@@ -390,7 +390,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
|
|
}
|
|
|
|
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
|
|
- dm_block_t begin, dm_block_t end, dm_block_t *b)
|
|
+ dm_block_t begin, dm_block_t end, dm_block_t *b)
|
|
{
|
|
int r;
|
|
uint32_t count;
|
|
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
|
|
index 706ceb85d6800..63d9a72e3265c 100644
|
|
--- a/drivers/md/persistent-data/dm-space-map-common.h
|
|
+++ b/drivers/md/persistent-data/dm-space-map-common.h
|
|
@@ -120,7 +120,7 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
|
|
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
|
|
dm_block_t end, dm_block_t *result);
|
|
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
|
|
- dm_block_t begin, dm_block_t end, dm_block_t *result);
|
|
+ dm_block_t begin, dm_block_t end, dm_block_t *result);
|
|
|
|
/*
|
|
* The next three functions return (via nr_allocations) the net number of
|
|
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
|
|
index 7b318e7e8d459..009f7ffe4e10c 100644
|
|
--- a/drivers/md/raid10.c
|
|
+++ b/drivers/md/raid10.c
|
|
@@ -920,6 +920,7 @@ static void flush_pending_writes(struct r10conf *conf)
|
|
|
|
raid1_submit_write(bio);
|
|
bio = next;
|
|
+ cond_resched();
|
|
}
|
|
blk_finish_plug(&plug);
|
|
} else
|
|
@@ -1130,6 +1131,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|
|
|
raid1_submit_write(bio);
|
|
bio = next;
|
|
+ cond_resched();
|
|
}
|
|
kfree(plug);
|
|
}
|
|
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
|
|
index 303d02b1d71c9..fe30f5b0050dd 100644
|
|
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
|
|
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
|
|
@@ -113,6 +113,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
|
|
{
|
|
unsigned pat;
|
|
unsigned plane;
|
|
+ int ret = 0;
|
|
|
|
tpg->max_line_width = max_w;
|
|
for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) {
|
|
@@ -121,14 +122,18 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
|
|
|
|
tpg->lines[pat][plane] =
|
|
vzalloc(array3_size(max_w, 2, pixelsz));
|
|
- if (!tpg->lines[pat][plane])
|
|
- return -ENOMEM;
|
|
+ if (!tpg->lines[pat][plane]) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_lines;
|
|
+ }
|
|
if (plane == 0)
|
|
continue;
|
|
tpg->downsampled_lines[pat][plane] =
|
|
vzalloc(array3_size(max_w, 2, pixelsz));
|
|
- if (!tpg->downsampled_lines[pat][plane])
|
|
- return -ENOMEM;
|
|
+ if (!tpg->downsampled_lines[pat][plane]) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_lines;
|
|
+ }
|
|
}
|
|
}
|
|
for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
|
|
@@ -136,18 +141,45 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
|
|
|
|
tpg->contrast_line[plane] =
|
|
vzalloc(array_size(pixelsz, max_w));
|
|
- if (!tpg->contrast_line[plane])
|
|
- return -ENOMEM;
|
|
+ if (!tpg->contrast_line[plane]) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_contrast_line;
|
|
+ }
|
|
tpg->black_line[plane] =
|
|
vzalloc(array_size(pixelsz, max_w));
|
|
- if (!tpg->black_line[plane])
|
|
- return -ENOMEM;
|
|
+ if (!tpg->black_line[plane]) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_contrast_line;
|
|
+ }
|
|
tpg->random_line[plane] =
|
|
vzalloc(array3_size(max_w, 2, pixelsz));
|
|
- if (!tpg->random_line[plane])
|
|
- return -ENOMEM;
|
|
+ if (!tpg->random_line[plane]) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_contrast_line;
|
|
+ }
|
|
}
|
|
return 0;
|
|
+
|
|
+free_contrast_line:
|
|
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
|
|
+ vfree(tpg->contrast_line[plane]);
|
|
+ vfree(tpg->black_line[plane]);
|
|
+ vfree(tpg->random_line[plane]);
|
|
+ tpg->contrast_line[plane] = NULL;
|
|
+ tpg->black_line[plane] = NULL;
|
|
+ tpg->random_line[plane] = NULL;
|
|
+ }
|
|
+free_lines:
|
|
+ for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++)
|
|
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
|
|
+ vfree(tpg->lines[pat][plane]);
|
|
+ tpg->lines[pat][plane] = NULL;
|
|
+ if (plane == 0)
|
|
+ continue;
|
|
+ vfree(tpg->downsampled_lines[pat][plane]);
|
|
+ tpg->downsampled_lines[pat][plane] = NULL;
|
|
+ }
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(tpg_alloc);
|
|
|
|
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
|
|
index d352e028491aa..aefee2277254d 100644
|
|
--- a/drivers/media/dvb-core/dvbdev.c
|
|
+++ b/drivers/media/dvb-core/dvbdev.c
|
|
@@ -494,6 +494,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
|
|
dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
|
|
if (!dvbdevfops) {
|
|
kfree(dvbdev);
|
|
+ *pdvbdev = NULL;
|
|
mutex_unlock(&dvbdev_register_lock);
|
|
return -ENOMEM;
|
|
}
|
|
@@ -502,6 +503,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
|
|
if (!new_node) {
|
|
kfree(dvbdevfops);
|
|
kfree(dvbdev);
|
|
+ *pdvbdev = NULL;
|
|
mutex_unlock(&dvbdev_register_lock);
|
|
return -ENOMEM;
|
|
}
|
|
@@ -535,6 +537,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
|
|
}
|
|
list_del (&dvbdev->list_head);
|
|
kfree(dvbdev);
|
|
+ *pdvbdev = NULL;
|
|
up_write(&minor_rwsem);
|
|
mutex_unlock(&dvbdev_register_lock);
|
|
return -EINVAL;
|
|
@@ -557,6 +560,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
|
|
dvb_media_device_free(dvbdev);
|
|
list_del (&dvbdev->list_head);
|
|
kfree(dvbdev);
|
|
+ *pdvbdev = NULL;
|
|
mutex_unlock(&dvbdev_register_lock);
|
|
return ret;
|
|
}
|
|
@@ -575,6 +579,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
|
|
dvb_media_device_free(dvbdev);
|
|
list_del (&dvbdev->list_head);
|
|
kfree(dvbdev);
|
|
+ *pdvbdev = NULL;
|
|
mutex_unlock(&dvbdev_register_lock);
|
|
return PTR_ERR(clsdev);
|
|
}
|
|
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
|
|
index 04556b77c16c9..0977564a4a1a4 100644
|
|
--- a/drivers/media/dvb-frontends/stv0367.c
|
|
+++ b/drivers/media/dvb-frontends/stv0367.c
|
|
@@ -118,50 +118,32 @@ static const s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_S
|
|
}
|
|
};
|
|
|
|
-static
|
|
-int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
|
|
+static noinline_for_stack
|
|
+int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
|
|
{
|
|
- u8 buf[MAX_XFER_SIZE];
|
|
+ u8 buf[3] = { MSB(reg), LSB(reg), data };
|
|
struct i2c_msg msg = {
|
|
.addr = state->config->demod_address,
|
|
.flags = 0,
|
|
.buf = buf,
|
|
- .len = len + 2
|
|
+ .len = 3,
|
|
};
|
|
int ret;
|
|
|
|
- if (2 + len > sizeof(buf)) {
|
|
- printk(KERN_WARNING
|
|
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
|
|
- KBUILD_MODNAME, reg, len);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
-
|
|
- buf[0] = MSB(reg);
|
|
- buf[1] = LSB(reg);
|
|
- memcpy(buf + 2, data, len);
|
|
-
|
|
if (i2cdebug)
|
|
printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__,
|
|
- state->config->demod_address, reg, buf[2]);
|
|
+ state->config->demod_address, reg, data);
|
|
|
|
ret = i2c_transfer(state->i2c, &msg, 1);
|
|
if (ret != 1)
|
|
printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n",
|
|
- __func__, state->config->demod_address, reg, buf[2]);
|
|
+ __func__, state->config->demod_address, reg, data);
|
|
|
|
return (ret != 1) ? -EREMOTEIO : 0;
|
|
}
|
|
|
|
-static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
|
|
-{
|
|
- u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
|
|
-
|
|
- return stv0367_writeregs(state, reg, &tmp, 1);
|
|
-}
|
|
-
|
|
-static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
|
|
+static noinline_for_stack
|
|
+u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
|
|
{
|
|
u8 b0[] = { 0, 0 };
|
|
u8 b1[] = { 0 };
|
|
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
|
|
index 200841c1f5cf0..68628ccecd161 100644
|
|
--- a/drivers/media/i2c/tc358743.c
|
|
+++ b/drivers/media/i2c/tc358743.c
|
|
@@ -2094,9 +2094,6 @@ static int tc358743_probe(struct i2c_client *client)
|
|
state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
|
|
|
|
sd->dev = &client->dev;
|
|
- err = v4l2_async_register_subdev(sd);
|
|
- if (err < 0)
|
|
- goto err_hdl;
|
|
|
|
mutex_init(&state->confctl_mutex);
|
|
|
|
@@ -2154,6 +2151,10 @@ static int tc358743_probe(struct i2c_client *client)
|
|
if (err)
|
|
goto err_work_queues;
|
|
|
|
+ err = v4l2_async_register_subdev(sd);
|
|
+ if (err < 0)
|
|
+ goto err_work_queues;
|
|
+
|
|
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
|
|
client->addr << 1, client->adapter->name);
|
|
|
|
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
|
|
index b065ccd069140..378a1cba0144f 100644
|
|
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
|
|
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
|
|
@@ -26,7 +26,7 @@ static void mtk_mdp_vpu_handle_init_ack(const struct mdp_ipi_comm_ack *msg)
|
|
vpu->inst_addr = msg->vpu_inst_addr;
|
|
}
|
|
|
|
-static void mtk_mdp_vpu_ipi_handler(const void *data, unsigned int len,
|
|
+static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len,
|
|
void *priv)
|
|
{
|
|
const struct mdp_ipi_comm_ack *msg = data;
|
|
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c
|
|
index cfc7ebed8fb7a..1ec29f1b163a1 100644
|
|
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c
|
|
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c
|
|
@@ -29,15 +29,7 @@ static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
|
|
mtk_vcodec_ipi_handler handler,
|
|
const char *name, void *priv)
|
|
{
|
|
- /*
|
|
- * The handler we receive takes a void * as its first argument. We
|
|
- * cannot change this because it needs to be passed down to the rproc
|
|
- * subsystem when SCP is used. VPU takes a const argument, which is
|
|
- * more constrained, so the conversion below is safe.
|
|
- */
|
|
- ipi_handler_t handler_const = (ipi_handler_t)handler;
|
|
-
|
|
- return vpu_ipi_register(fw->pdev, id, handler_const, name, priv);
|
|
+ return vpu_ipi_register(fw->pdev, id, handler, name, priv);
|
|
}
|
|
|
|
static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
|
|
diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
|
|
index 6beab9e86a22a..44adf5cfc9bb2 100644
|
|
--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c
|
|
+++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
|
|
@@ -635,7 +635,7 @@ int vpu_load_firmware(struct platform_device *pdev)
|
|
}
|
|
EXPORT_SYMBOL_GPL(vpu_load_firmware);
|
|
|
|
-static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv)
|
|
+static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
|
|
{
|
|
struct mtk_vpu *vpu = priv;
|
|
const struct vpu_run *run = data;
|
|
diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.h b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
|
|
index a56053ff135af..da05f3e740810 100644
|
|
--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.h
|
|
+++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
|
|
@@ -17,7 +17,7 @@
|
|
* VPU interfaces with other blocks by share memory and interrupt.
|
|
*/
|
|
|
|
-typedef void (*ipi_handler_t) (const void *data,
|
|
+typedef void (*ipi_handler_t) (void *data,
|
|
unsigned int len,
|
|
void *priv);
|
|
|
|
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
|
|
index d4540684ea9af..0bcb9db5ad190 100644
|
|
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
|
|
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
|
|
@@ -701,6 +701,9 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
|
|
unsigned int i;
|
|
u32 status;
|
|
|
|
+ if (!rkisp1->irqs_enabled)
|
|
+ return IRQ_NONE;
|
|
+
|
|
status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
|
|
if (!status)
|
|
return IRQ_NONE;
|
|
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
|
|
index f9ec1c6138947..5776292f914a4 100644
|
|
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
|
|
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
|
|
@@ -467,6 +467,7 @@ struct rkisp1_debug {
|
|
* @debug: debug params to be exposed on debugfs
|
|
* @info: version-specific ISP information
|
|
* @irqs: IRQ line numbers
|
|
+ * @irqs_enabled: the hardware is enabled and can cause interrupts
|
|
*/
|
|
struct rkisp1_device {
|
|
void __iomem *base_addr;
|
|
@@ -488,6 +489,7 @@ struct rkisp1_device {
|
|
struct rkisp1_debug debug;
|
|
const struct rkisp1_info *info;
|
|
int irqs[RKISP1_NUM_IRQS];
|
|
+ bool irqs_enabled;
|
|
};
|
|
|
|
/*
|
|
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
|
|
index e862f515cc6d3..95b6e41c48ec2 100644
|
|
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
|
|
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
|
|
@@ -211,6 +211,9 @@ irqreturn_t rkisp1_csi_isr(int irq, void *ctx)
|
|
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
|
|
u32 val, status;
|
|
|
|
+ if (!rkisp1->irqs_enabled)
|
|
+ return IRQ_NONE;
|
|
+
|
|
status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
|
|
if (!status)
|
|
return IRQ_NONE;
|
|
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
|
|
index 41abb18b00acb..7a3b69ba51b97 100644
|
|
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
|
|
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
|
|
@@ -305,6 +305,24 @@ static int __maybe_unused rkisp1_runtime_suspend(struct device *dev)
|
|
{
|
|
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
|
|
|
|
+ rkisp1->irqs_enabled = false;
|
|
+ /* Make sure the IRQ handler will see the above */
|
|
+ mb();
|
|
+
|
|
+ /*
|
|
+ * Wait until any running IRQ handler has returned. The IRQ handler
|
|
+ * may get called even after this (as it's a shared interrupt line)
|
|
+ * but the 'irqs_enabled' flag will make the handler return immediately.
|
|
+ */
|
|
+ for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) {
|
|
+ if (rkisp1->irqs[il] == -1)
|
|
+ continue;
|
|
+
|
|
+ /* Skip if the irq line is the same as previous */
|
|
+ if (il == 0 || rkisp1->irqs[il - 1] != rkisp1->irqs[il])
|
|
+ synchronize_irq(rkisp1->irqs[il]);
|
|
+ }
|
|
+
|
|
clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks);
|
|
return pinctrl_pm_select_sleep_state(dev);
|
|
}
|
|
@@ -321,6 +339,10 @@ static int __maybe_unused rkisp1_runtime_resume(struct device *dev)
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ rkisp1->irqs_enabled = true;
|
|
+ /* Make sure the IRQ handler will see the above */
|
|
+ mb();
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
|
|
index 00dca284c1222..2af5c1a48070b 100644
|
|
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
|
|
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
|
|
@@ -1023,6 +1023,9 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
|
|
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
|
|
u32 status, isp_err;
|
|
|
|
+ if (!rkisp1->irqs_enabled)
|
|
+ return IRQ_NONE;
|
|
+
|
|
status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
|
|
if (!status)
|
|
return IRQ_NONE;
|
|
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
|
|
index aa65d70b6270a..7a2f558c981db 100644
|
|
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
|
|
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
|
|
@@ -66,6 +66,7 @@ static void deinterlace_device_run(void *priv)
|
|
struct vb2_v4l2_buffer *src, *dst;
|
|
unsigned int hstep, vstep;
|
|
dma_addr_t addr;
|
|
+ int i;
|
|
|
|
src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
|
dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
|
|
@@ -160,6 +161,26 @@ static void deinterlace_device_run(void *priv)
|
|
deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep);
|
|
deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep);
|
|
|
|
+ /* neutral filter coefficients */
|
|
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
|
|
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS);
|
|
+ readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
|
|
+ val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
|
|
+
|
|
+ for (i = 0; i < 32; i++) {
|
|
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
|
|
+ DEINTERLACE_IDENTITY_COEF);
|
|
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
|
|
+ DEINTERLACE_IDENTITY_COEF);
|
|
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
|
|
+ DEINTERLACE_IDENTITY_COEF);
|
|
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
|
|
+ DEINTERLACE_IDENTITY_COEF);
|
|
+ }
|
|
+
|
|
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
|
|
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
|
|
+
|
|
deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL,
|
|
DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK,
|
|
DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field));
|
|
@@ -248,7 +269,6 @@ static irqreturn_t deinterlace_irq(int irq, void *data)
|
|
static void deinterlace_init(struct deinterlace_dev *dev)
|
|
{
|
|
u32 val;
|
|
- int i;
|
|
|
|
deinterlace_write(dev, DEINTERLACE_BYPASS,
|
|
DEINTERLACE_BYPASS_CSC);
|
|
@@ -284,27 +304,7 @@ static void deinterlace_init(struct deinterlace_dev *dev)
|
|
|
|
deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF,
|
|
DEINTERLACE_CHROMA_DIFF_TH_MSK,
|
|
- DEINTERLACE_CHROMA_DIFF_TH(5));
|
|
-
|
|
- /* neutral filter coefficients */
|
|
- deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
|
|
- DEINTERLACE_FRM_CTRL_COEF_ACCESS);
|
|
- readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
|
|
- val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
|
|
-
|
|
- for (i = 0; i < 32; i++) {
|
|
- deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
|
|
- DEINTERLACE_IDENTITY_COEF);
|
|
- deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
|
|
- DEINTERLACE_IDENTITY_COEF);
|
|
- deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
|
|
- DEINTERLACE_IDENTITY_COEF);
|
|
- deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
|
|
- DEINTERLACE_IDENTITY_COEF);
|
|
- }
|
|
-
|
|
- deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
|
|
- DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
|
|
+ DEINTERLACE_CHROMA_DIFF_TH(31));
|
|
}
|
|
|
|
static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
|
|
@@ -931,11 +931,18 @@ static int deinterlace_runtime_resume(struct device *device)
|
|
return ret;
|
|
}
|
|
|
|
+ ret = reset_control_deassert(dev->rstc);
|
|
+ if (ret) {
|
|
+ dev_err(dev->dev, "Failed to apply reset\n");
|
|
+
|
|
+ goto err_exclusive_rate;
|
|
+ }
|
|
+
|
|
ret = clk_prepare_enable(dev->bus_clk);
|
|
if (ret) {
|
|
dev_err(dev->dev, "Failed to enable bus clock\n");
|
|
|
|
- goto err_exclusive_rate;
|
|
+ goto err_rst;
|
|
}
|
|
|
|
ret = clk_prepare_enable(dev->mod_clk);
|
|
@@ -952,23 +959,16 @@ static int deinterlace_runtime_resume(struct device *device)
|
|
goto err_mod_clk;
|
|
}
|
|
|
|
- ret = reset_control_deassert(dev->rstc);
|
|
- if (ret) {
|
|
- dev_err(dev->dev, "Failed to apply reset\n");
|
|
-
|
|
- goto err_ram_clk;
|
|
- }
|
|
-
|
|
deinterlace_init(dev);
|
|
|
|
return 0;
|
|
|
|
-err_ram_clk:
|
|
- clk_disable_unprepare(dev->ram_clk);
|
|
err_mod_clk:
|
|
clk_disable_unprepare(dev->mod_clk);
|
|
err_bus_clk:
|
|
clk_disable_unprepare(dev->bus_clk);
|
|
+err_rst:
|
|
+ reset_control_assert(dev->rstc);
|
|
err_exclusive_rate:
|
|
clk_rate_exclusive_put(dev->mod_clk);
|
|
|
|
@@ -979,11 +979,12 @@ static int deinterlace_runtime_suspend(struct device *device)
|
|
{
|
|
struct deinterlace_dev *dev = dev_get_drvdata(device);
|
|
|
|
- reset_control_assert(dev->rstc);
|
|
-
|
|
clk_disable_unprepare(dev->ram_clk);
|
|
clk_disable_unprepare(dev->mod_clk);
|
|
clk_disable_unprepare(dev->bus_clk);
|
|
+
|
|
+ reset_control_assert(dev->rstc);
|
|
+
|
|
clk_rate_exclusive_put(dev->mod_clk);
|
|
|
|
return 0;
|
|
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
|
|
index 4d037c92af7c5..bae76023cf71d 100644
|
|
--- a/drivers/media/usb/em28xx/em28xx-cards.c
|
|
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
|
|
@@ -4094,6 +4094,10 @@ static int em28xx_usb_probe(struct usb_interface *intf,
|
|
* topology will likely change after the load of the em28xx subdrivers.
|
|
*/
|
|
#ifdef CONFIG_MEDIA_CONTROLLER
|
|
+ /*
|
|
+ * No need to check the return value, the device will still be
|
|
+ * usable without media controller API.
|
|
+ */
|
|
retval = media_device_register(dev->media_dev);
|
|
#endif
|
|
|
|
diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
|
|
index 0c24e29843048..eb03f98b2ef11 100644
|
|
--- a/drivers/media/usb/go7007/go7007-driver.c
|
|
+++ b/drivers/media/usb/go7007/go7007-driver.c
|
|
@@ -80,7 +80,7 @@ static int go7007_load_encoder(struct go7007 *go)
|
|
const struct firmware *fw_entry;
|
|
char fw_name[] = "go7007/go7007fw.bin";
|
|
void *bounce;
|
|
- int fw_len, rv = 0;
|
|
+ int fw_len;
|
|
u16 intr_val, intr_data;
|
|
|
|
if (go->boot_fw == NULL) {
|
|
@@ -109,9 +109,11 @@ static int go7007_load_encoder(struct go7007 *go)
|
|
go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
|
|
(intr_val & ~0x1) != 0x5a5a) {
|
|
v4l2_err(go, "error transferring firmware\n");
|
|
- rv = -1;
|
|
+ kfree(go->boot_fw);
|
|
+ go->boot_fw = NULL;
|
|
+ return -1;
|
|
}
|
|
- return rv;
|
|
+ return 0;
|
|
}
|
|
|
|
MODULE_FIRMWARE("go7007/go7007fw.bin");
|
|
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
|
|
index eeb85981e02b6..762c13e49bfa5 100644
|
|
--- a/drivers/media/usb/go7007/go7007-usb.c
|
|
+++ b/drivers/media/usb/go7007/go7007-usb.c
|
|
@@ -1201,7 +1201,9 @@ static int go7007_usb_probe(struct usb_interface *intf,
|
|
u16 channel;
|
|
|
|
/* read channel number from GPIO[1:0] */
|
|
- go7007_read_addr(go, 0x3c81, &channel);
|
|
+ if (go7007_read_addr(go, 0x3c81, &channel))
|
|
+ goto allocfail;
|
|
+
|
|
channel &= 0x3;
|
|
go->board_id = GO7007_BOARDID_ADLINK_MPG24;
|
|
usb->board = board = &board_adlink_mpg24;
|
|
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
|
|
index 1764674de98bc..73c95ba2328a4 100644
|
|
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
|
|
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
|
|
@@ -90,8 +90,10 @@ static void pvr2_context_destroy(struct pvr2_context *mp)
|
|
}
|
|
|
|
|
|
-static void pvr2_context_notify(struct pvr2_context *mp)
|
|
+static void pvr2_context_notify(void *ptr)
|
|
{
|
|
+ struct pvr2_context *mp = ptr;
|
|
+
|
|
pvr2_context_set_notify(mp,!0);
|
|
}
|
|
|
|
@@ -106,9 +108,7 @@ static void pvr2_context_check(struct pvr2_context *mp)
|
|
pvr2_trace(PVR2_TRACE_CTXT,
|
|
"pvr2_context %p (initialize)", mp);
|
|
/* Finish hardware initialization */
|
|
- if (pvr2_hdw_initialize(mp->hdw,
|
|
- (void (*)(void *))pvr2_context_notify,
|
|
- mp)) {
|
|
+ if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) {
|
|
mp->video_stream.stream =
|
|
pvr2_hdw_get_video_stream(mp->hdw);
|
|
/* Trigger interface initialization. By doing this
|
|
@@ -267,9 +267,9 @@ static void pvr2_context_exit(struct pvr2_context *mp)
|
|
void pvr2_context_disconnect(struct pvr2_context *mp)
|
|
{
|
|
pvr2_hdw_disconnect(mp->hdw);
|
|
- mp->disconnect_flag = !0;
|
|
if (!pvr2_context_shutok())
|
|
pvr2_context_notify(mp);
|
|
+ mp->disconnect_flag = !0;
|
|
}
|
|
|
|
|
|
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
|
|
index 26811efe0fb58..9a9bae21c6147 100644
|
|
--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
|
|
+++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
|
|
@@ -88,8 +88,10 @@ static int pvr2_dvb_feed_thread(void *data)
|
|
return stat;
|
|
}
|
|
|
|
-static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap)
|
|
+static void pvr2_dvb_notify(void *ptr)
|
|
{
|
|
+ struct pvr2_dvb_adapter *adap = ptr;
|
|
+
|
|
wake_up(&adap->buffer_wait_data);
|
|
}
|
|
|
|
@@ -149,7 +151,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
|
|
}
|
|
|
|
pvr2_stream_set_callback(pvr->video_stream.stream,
|
|
- (pvr2_stream_callback) pvr2_dvb_notify, adap);
|
|
+ pvr2_dvb_notify, adap);
|
|
|
|
ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT);
|
|
if (ret < 0) return ret;
|
|
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
|
|
index c04ab7258d645..d608b793fa847 100644
|
|
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
|
|
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
|
|
@@ -1033,8 +1033,10 @@ static int pvr2_v4l2_open(struct file *file)
|
|
}
|
|
|
|
|
|
-static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp)
|
|
+static void pvr2_v4l2_notify(void *ptr)
|
|
{
|
|
+ struct pvr2_v4l2_fh *fhp = ptr;
|
|
+
|
|
wake_up(&fhp->wait_data);
|
|
}
|
|
|
|
@@ -1067,7 +1069,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
|
|
|
|
hdw = fh->channel.mc_head->hdw;
|
|
sp = fh->pdi->stream->stream;
|
|
- pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
|
|
+ pvr2_stream_set_callback(sp, pvr2_v4l2_notify, fh);
|
|
pvr2_hdw_set_stream_type(hdw,fh->pdi->config);
|
|
if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
|
|
return pvr2_ioread_set_enabled(fh->rhp,!0);
|
|
@@ -1198,11 +1200,6 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
|
|
dip->minor_type = pvr2_v4l_type_video;
|
|
nr_ptr = video_nr;
|
|
caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
|
|
- if (!dip->stream) {
|
|
- pr_err(KBUILD_MODNAME
|
|
- ": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n");
|
|
- return;
|
|
- }
|
|
break;
|
|
case VFL_TYPE_VBI:
|
|
dip->config = pvr2_config_vbi;
|
|
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
|
|
index be7fde1ed3eaa..97645d6509e1c 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
|
|
@@ -1084,11 +1084,17 @@ static int v4l2_m2m_register_entity(struct media_device *mdev,
|
|
entity->function = function;
|
|
|
|
ret = media_entity_pads_init(entity, num_pads, pads);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ kfree(entity->name);
|
|
+ entity->name = NULL;
|
|
return ret;
|
|
+ }
|
|
ret = media_device_register_entity(mdev, entity);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ kfree(entity->name);
|
|
+ entity->name = NULL;
|
|
return ret;
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
|
|
index 5d3715a28b28e..dbe1009943718 100644
|
|
--- a/drivers/mfd/altera-sysmgr.c
|
|
+++ b/drivers/mfd/altera-sysmgr.c
|
|
@@ -110,7 +110,9 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
|
|
|
|
dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver,
|
|
(void *)sysmgr_np);
|
|
- of_node_put(sysmgr_np);
|
|
+ if (property)
|
|
+ of_node_put(sysmgr_np);
|
|
+
|
|
if (!dev)
|
|
return ERR_PTR(-EPROBE_DEFER);
|
|
|
|
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
|
|
index 6196724ef39bb..ecfe151220919 100644
|
|
--- a/drivers/mfd/syscon.c
|
|
+++ b/drivers/mfd/syscon.c
|
|
@@ -223,7 +223,9 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
regmap = syscon_node_to_regmap(syscon_np);
|
|
- of_node_put(syscon_np);
|
|
+
|
|
+ if (property)
|
|
+ of_node_put(syscon_np);
|
|
|
|
return regmap;
|
|
}
|
|
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
|
|
index 9aa3027ca25e4..f2abebb2d8574 100644
|
|
--- a/drivers/mmc/host/wmt-sdmmc.c
|
|
+++ b/drivers/mmc/host/wmt-sdmmc.c
|
|
@@ -886,7 +886,6 @@ static int wmt_mci_remove(struct platform_device *pdev)
|
|
{
|
|
struct mmc_host *mmc;
|
|
struct wmt_mci_priv *priv;
|
|
- struct resource *res;
|
|
u32 reg_tmp;
|
|
|
|
mmc = platform_get_drvdata(pdev);
|
|
@@ -914,9 +913,6 @@ static int wmt_mci_remove(struct platform_device *pdev)
|
|
clk_disable_unprepare(priv->clk_sdmmc);
|
|
clk_put(priv->clk_sdmmc);
|
|
|
|
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
- release_mem_region(res->start, resource_size(res));
|
|
-
|
|
mmc_free_host(mmc);
|
|
|
|
dev_info(&pdev->dev, "WMT MCI device removed\n");
|
|
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
|
|
index 19dad5a23f944..8cdb3512107d3 100644
|
|
--- a/drivers/mtd/maps/physmap-core.c
|
|
+++ b/drivers/mtd/maps/physmap-core.c
|
|
@@ -524,7 +524,7 @@ static int physmap_flash_probe(struct platform_device *dev)
|
|
if (!info->maps[i].phys)
|
|
info->maps[i].phys = res->start;
|
|
|
|
- info->win_order = get_bitmask_order(resource_size(res)) - 1;
|
|
+ info->win_order = fls64(resource_size(res)) - 1;
|
|
info->maps[i].size = BIT(info->win_order +
|
|
(info->gpios ?
|
|
info->gpios->ndescs : 0));
|
|
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
|
|
index 452ecaf7775ac..1cfe3dd0bad4d 100644
|
|
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
|
|
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
|
|
@@ -303,8 +303,9 @@ static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
|
|
return 0;
|
|
}
|
|
|
|
-static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
|
|
+static irqreturn_t lpc3xxx_nand_irq(int irq, void *data)
|
|
{
|
|
+ struct lpc32xx_nand_host *host = data;
|
|
uint8_t sr;
|
|
|
|
/* Clear interrupt flag by reading status */
|
|
@@ -779,7 +780,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
|
|
goto release_dma_chan;
|
|
}
|
|
|
|
- if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
|
|
+ if (request_irq(host->irq, &lpc3xxx_nand_irq,
|
|
IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
|
|
dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
|
|
res = -ENXIO;
|
|
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
|
|
index b988c8a40d536..07065c1af55e4 100644
|
|
--- a/drivers/net/dsa/mt7530.c
|
|
+++ b/drivers/net/dsa/mt7530.c
|
|
@@ -998,20 +998,56 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
|
|
mutex_unlock(&priv->reg_mutex);
|
|
}
|
|
|
|
+/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
|
|
+ * 802.1Q™-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
|
|
+ * must only be propagated to C-VLAN and MAC Bridge components. That means
|
|
+ * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
|
|
+ * these frames are supposed to be processed by the CPU (software). So we make
|
|
+ * the switch only forward them to the CPU port. And if received from a CPU
|
|
+ * port, forward to a single port. The software is responsible of making the
|
|
+ * switch conform to the latter by setting a single port as destination port on
|
|
+ * the special tag.
|
|
+ *
|
|
+ * This switch intellectual property cannot conform to this part of the standard
|
|
+ * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
|
|
+ * DAs, it also includes :22-FF which the scope of propagation is not supposed
|
|
+ * to be restricted for these MAC DAs.
|
|
+ */
|
|
static void
|
|
mt753x_trap_frames(struct mt7530_priv *priv)
|
|
{
|
|
- /* Trap BPDUs to the CPU port(s) */
|
|
- mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
|
|
+ /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
|
|
+ * VLAN-untagged.
|
|
+ */
|
|
+ mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
|
|
+ MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
|
|
+ MT753X_BPDU_PORT_FW_MASK,
|
|
+ MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
|
|
+ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
|
|
+ MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
|
|
MT753X_BPDU_CPU_ONLY);
|
|
|
|
- /* Trap 802.1X PAE frames to the CPU port(s) */
|
|
- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK,
|
|
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY));
|
|
+ /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
|
|
+ * them VLAN-untagged.
|
|
+ */
|
|
+ mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
|
|
+ MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
|
|
+ MT753X_R01_PORT_FW_MASK,
|
|
+ MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
|
|
+ MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
|
|
+ MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
|
|
+ MT753X_BPDU_CPU_ONLY);
|
|
|
|
- /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
|
|
- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
|
|
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
|
|
+ /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
|
|
+ * them VLAN-untagged.
|
|
+ */
|
|
+ mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
|
|
+ MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
|
|
+ MT753X_R03_PORT_FW_MASK,
|
|
+ MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
|
|
+ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
|
|
+ MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
|
|
+ MT753X_BPDU_CPU_ONLY);
|
|
}
|
|
|
|
static int
|
|
@@ -2187,11 +2223,11 @@ mt7530_setup(struct dsa_switch *ds)
|
|
*/
|
|
if (priv->mcm) {
|
|
reset_control_assert(priv->rstc);
|
|
- usleep_range(1000, 1100);
|
|
+ usleep_range(5000, 5100);
|
|
reset_control_deassert(priv->rstc);
|
|
} else {
|
|
gpiod_set_value_cansleep(priv->reset, 0);
|
|
- usleep_range(1000, 1100);
|
|
+ usleep_range(5000, 5100);
|
|
gpiod_set_value_cansleep(priv->reset, 1);
|
|
}
|
|
|
|
@@ -2401,11 +2437,11 @@ mt7531_setup(struct dsa_switch *ds)
|
|
*/
|
|
if (priv->mcm) {
|
|
reset_control_assert(priv->rstc);
|
|
- usleep_range(1000, 1100);
|
|
+ usleep_range(5000, 5100);
|
|
reset_control_deassert(priv->rstc);
|
|
} else {
|
|
gpiod_set_value_cansleep(priv->reset, 0);
|
|
- usleep_range(1000, 1100);
|
|
+ usleep_range(5000, 5100);
|
|
gpiod_set_value_cansleep(priv->reset, 1);
|
|
}
|
|
|
|
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
|
|
index 6202b0f8c3f34..fa2afa67ceb07 100644
|
|
--- a/drivers/net/dsa/mt7530.h
|
|
+++ b/drivers/net/dsa/mt7530.h
|
|
@@ -63,14 +63,33 @@ enum mt753x_id {
|
|
|
|
/* Registers for BPDU and PAE frame control*/
|
|
#define MT753X_BPC 0x24
|
|
-#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
|
|
+#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
|
|
+#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
|
|
#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
|
|
#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
|
|
+#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6)
|
|
+#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x)
|
|
+#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
|
|
+
|
|
+/* Register for :01 and :02 MAC DA frame control */
|
|
+#define MT753X_RGAC1 0x28
|
|
+#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
|
|
+#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
|
|
+#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
|
|
+#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
|
|
+#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
|
|
+#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
|
|
+#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
|
|
|
|
/* Register for :03 and :0E MAC DA frame control */
|
|
#define MT753X_RGAC2 0x2c
|
|
+#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
|
|
+#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
|
|
#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
|
|
#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
|
|
+#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
|
|
+#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
|
|
+#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
|
|
|
|
enum mt753x_bpdu_port_fw {
|
|
MT753X_BPDU_FOLLOW_MFC,
|
|
@@ -251,6 +270,7 @@ enum mt7530_port_mode {
|
|
enum mt7530_vlan_port_eg_tag {
|
|
MT7530_VLAN_EG_DISABLED = 0,
|
|
MT7530_VLAN_EG_CONSISTENT = 1,
|
|
+ MT7530_VLAN_EG_UNTAGGED = 4,
|
|
};
|
|
|
|
enum mt7530_vlan_port_attr {
|
|
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
index 044b8afde69a0..9e82e7b9c3b72 100644
|
|
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
@@ -3174,22 +3174,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
-static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|
- struct net_device *sb_dev)
|
|
-{
|
|
- u16 qid;
|
|
- /* we suspect that this is good for in--kernel network services that
|
|
- * want to loop incoming skb rx to tx in normal user generated traffic,
|
|
- * most probably we will not get to this
|
|
- */
|
|
- if (skb_rx_queue_recorded(skb))
|
|
- qid = skb_get_rx_queue(skb);
|
|
- else
|
|
- qid = netdev_pick_tx(dev, skb, NULL);
|
|
-
|
|
- return qid;
|
|
-}
|
|
-
|
|
static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
@@ -3359,7 +3343,6 @@ static const struct net_device_ops ena_netdev_ops = {
|
|
.ndo_open = ena_open,
|
|
.ndo_stop = ena_close,
|
|
.ndo_start_xmit = ena_start_xmit,
|
|
- .ndo_select_queue = ena_select_queue,
|
|
.ndo_get_stats64 = ena_get_stats64,
|
|
.ndo_tx_timeout = ena_tx_timeout,
|
|
.ndo_change_mtu = ena_change_mtu,
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
|
|
index d8b1824c334d3..0bc1367fd6492 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
|
|
@@ -1002,9 +1002,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
|
|
static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
|
|
struct bnx2x_alloc_pool *pool)
|
|
{
|
|
- if (!pool->page)
|
|
- return;
|
|
-
|
|
put_page(pool->page);
|
|
|
|
pool->page = NULL;
|
|
@@ -1015,6 +1012,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
|
|
{
|
|
int i;
|
|
|
|
+ if (!fp->page_pool.page)
|
|
+ return;
|
|
+
|
|
if (fp->mode == TPA_MODE_DISABLED)
|
|
return;
|
|
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
|
|
index 3b6dbf158b98d..f72dc0cee30e5 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
|
|
@@ -76,7 +76,7 @@ static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
|
|
if (hns3_nic_resetting(ndev))
|
|
return -EBUSY;
|
|
|
|
- if (h->kinfo.dcb_ops->ieee_setapp)
|
|
+ if (h->kinfo.dcb_ops->ieee_delapp)
|
|
return h->kinfo.dcb_ops->ieee_delapp(h, app);
|
|
|
|
return -EOPNOTSUPP;
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
index 48b0cb5ec5d29..27037ce795902 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
@@ -2990,7 +2990,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
|
|
int ret;
|
|
|
|
hdev->support_sfp_query = true;
|
|
- hdev->hw.mac.duplex = HCLGE_MAC_FULL;
|
|
+
|
|
+ if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
|
|
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
|
|
+
|
|
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
|
|
hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
|
|
if (ret)
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
|
|
index a40b1583f1149..0f06f95b09bc2 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
|
|
@@ -120,7 +120,7 @@ void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
|
|
u64 ns = nsec;
|
|
u32 sec_h;
|
|
|
|
- if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
|
|
+ if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
|
|
return;
|
|
|
|
/* Since the BD does not have enough space for the higher 16 bits of
|
|
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
|
|
index 45ce4ed16146e..81d9a5338be5e 100644
|
|
--- a/drivers/net/ethernet/intel/igb/igb_main.c
|
|
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
|
|
@@ -6926,44 +6926,31 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
|
|
static void igb_tsync_interrupt(struct igb_adapter *adapter)
|
|
{
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
- u32 ack = 0, tsicr = rd32(E1000_TSICR);
|
|
+ u32 tsicr = rd32(E1000_TSICR);
|
|
struct ptp_clock_event event;
|
|
|
|
if (tsicr & TSINTR_SYS_WRAP) {
|
|
event.type = PTP_CLOCK_PPS;
|
|
if (adapter->ptp_caps.pps)
|
|
ptp_clock_event(adapter->ptp_clock, &event);
|
|
- ack |= TSINTR_SYS_WRAP;
|
|
}
|
|
|
|
if (tsicr & E1000_TSICR_TXTS) {
|
|
/* retrieve hardware timestamp */
|
|
schedule_work(&adapter->ptp_tx_work);
|
|
- ack |= E1000_TSICR_TXTS;
|
|
}
|
|
|
|
- if (tsicr & TSINTR_TT0) {
|
|
+ if (tsicr & TSINTR_TT0)
|
|
igb_perout(adapter, 0);
|
|
- ack |= TSINTR_TT0;
|
|
- }
|
|
|
|
- if (tsicr & TSINTR_TT1) {
|
|
+ if (tsicr & TSINTR_TT1)
|
|
igb_perout(adapter, 1);
|
|
- ack |= TSINTR_TT1;
|
|
- }
|
|
|
|
- if (tsicr & TSINTR_AUTT0) {
|
|
+ if (tsicr & TSINTR_AUTT0)
|
|
igb_extts(adapter, 0);
|
|
- ack |= TSINTR_AUTT0;
|
|
- }
|
|
|
|
- if (tsicr & TSINTR_AUTT1) {
|
|
+ if (tsicr & TSINTR_AUTT1)
|
|
igb_extts(adapter, 1);
|
|
- ack |= TSINTR_AUTT1;
|
|
- }
|
|
-
|
|
- /* acknowledge the interrupts */
|
|
- wr32(E1000_TSICR, ack);
|
|
}
|
|
|
|
static irqreturn_t igb_msix_other(int irq, void *data)
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
|
|
index 90be87dc105d3..e6fe599f7bf3a 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
|
|
@@ -1346,7 +1346,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
|
|
|
|
/* Release thread waiting for completion */
|
|
lmac->cmd_pend = false;
|
|
- wake_up_interruptible(&lmac->wq_cmd_cmplt);
|
|
+ wake_up(&lmac->wq_cmd_cmplt);
|
|
break;
|
|
case CGX_EVT_ASYNC:
|
|
if (cgx_event_is_linkevent(event))
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
|
|
index 9690ac01f02c8..7d741e3ba8c51 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
|
|
@@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
|
|
}
|
|
EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
|
|
|
|
-void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
|
|
+static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
|
|
{
|
|
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
|
|
struct mbox_hdr *tx_hdr, *rx_hdr;
|
|
void *hw_mbase = mdev->hwbase;
|
|
+ u64 intr_val;
|
|
|
|
tx_hdr = hw_mbase + mbox->tx_start;
|
|
rx_hdr = hw_mbase + mbox->rx_start;
|
|
@@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
|
|
|
|
spin_unlock(&mdev->mbox_lock);
|
|
|
|
+ /* Check if interrupt pending */
|
|
+ intr_val = readq((void __iomem *)mbox->reg_base +
|
|
+ (mbox->trigger | (devid << mbox->tr_shift)));
|
|
+
|
|
+ intr_val |= data;
|
|
/* The interrupt should be fired after num_msgs is written
|
|
* to the shared memory
|
|
*/
|
|
- writeq(1, (void __iomem *)mbox->reg_base +
|
|
+ writeq(intr_val, (void __iomem *)mbox->reg_base +
|
|
(mbox->trigger | (devid << mbox->tr_shift)));
|
|
}
|
|
+
|
|
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
|
|
+{
|
|
+ otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
|
|
+}
|
|
EXPORT_SYMBOL(otx2_mbox_msg_send);
|
|
|
|
+void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
|
|
+{
|
|
+ otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
|
|
+}
|
|
+EXPORT_SYMBOL(otx2_mbox_msg_send_up);
|
|
+
|
|
+bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
|
|
+{
|
|
+ u64 data;
|
|
+
|
|
+ data = readq((void __iomem *)mbox->reg_base +
|
|
+ (mbox->trigger | (devid << mbox->tr_shift)));
|
|
+
|
|
+ /* If data is non-zero wait for ~1ms and return to caller
|
|
+ * whether data has changed to zero or not after the wait.
|
|
+ */
|
|
+ if (!data)
|
|
+ return true;
|
|
+
|
|
+ usleep_range(950, 1000);
|
|
+
|
|
+ data = readq((void __iomem *)mbox->reg_base +
|
|
+ (mbox->trigger | (devid << mbox->tr_shift)));
|
|
+
|
|
+ return data == 0;
|
|
+}
|
|
+EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
|
|
+
|
|
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
|
|
int size, int size_rsp)
|
|
{
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
|
|
index 03ebabd616353..be70269e91684 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
|
|
@@ -16,6 +16,9 @@
|
|
|
|
#define MBOX_SIZE SZ_64K
|
|
|
|
+#define MBOX_DOWN_MSG 1
|
|
+#define MBOX_UP_MSG 2
|
|
+
|
|
/* AF/PF: PF initiated, PF/VF VF initiated */
|
|
#define MBOX_DOWN_RX_START 0
|
|
#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
|
|
@@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
|
|
struct pci_dev *pdev, void __force *reg_base,
|
|
int direction, int ndevs, unsigned long *bmap);
|
|
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
|
|
+void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid);
|
|
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
|
|
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
|
|
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
|
|
@@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
|
|
return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
|
|
}
|
|
|
|
+bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid);
|
|
+
|
|
/* Mailbox message types */
|
|
#define MBOX_MSG_MASK 0xFFFF
|
|
#define MBOX_MSG_INVALID 0xFFFE
|
|
@@ -196,6 +202,9 @@ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
|
|
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
|
|
msg_rsp) \
|
|
M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
|
|
+M(CPT_LF_RESET, 0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp) \
|
|
+M(CPT_FLT_ENG_INFO, 0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \
|
|
+ cpt_flt_eng_info_rsp) \
|
|
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
|
|
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
|
|
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
|
|
@@ -1702,6 +1711,28 @@ struct cpt_inst_lmtst_req {
|
|
u64 rsvd;
|
|
};
|
|
|
|
+/* Mailbox message format to request for CPT LF reset */
|
|
+struct cpt_lf_rst_req {
|
|
+ struct mbox_msghdr hdr;
|
|
+ u32 slot;
|
|
+ u32 rsvd;
|
|
+};
|
|
+
|
|
+/* Mailbox message format to request for CPT faulted engines */
|
|
+struct cpt_flt_eng_info_req {
|
|
+ struct mbox_msghdr hdr;
|
|
+ int blkaddr;
|
|
+ bool reset;
|
|
+ u32 rsvd;
|
|
+};
|
|
+
|
|
+struct cpt_flt_eng_info_rsp {
|
|
+ struct mbox_msghdr hdr;
|
|
+ u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
|
|
+ u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
|
|
+ u64 rsvd;
|
|
+};
|
|
+
|
|
struct sdp_node_info {
|
|
/* Node to which this PF belons to */
|
|
u8 node_id;
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
|
|
index dfd23580e3b8e..d39d86e694ccf 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
|
|
@@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
|
|
static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
|
|
{
|
|
struct mcs_intr_info *req;
|
|
- int err, pf;
|
|
+ int pf;
|
|
|
|
pf = rvu_get_pf(event->pcifunc);
|
|
|
|
+ mutex_lock(&rvu->mbox_lock);
|
|
+
|
|
req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
|
|
- if (!req)
|
|
+ if (!req) {
|
|
+ mutex_unlock(&rvu->mbox_lock);
|
|
return -ENOMEM;
|
|
+ }
|
|
|
|
req->mcs_id = event->mcs_id;
|
|
req->intr_mask = event->intr_mask;
|
|
@@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
|
|
req->hdr.pcifunc = event->pcifunc;
|
|
req->lmac_id = event->lmac_id;
|
|
|
|
- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
|
|
- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
|
|
- if (err)
|
|
- dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
|
|
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
|
|
+
|
|
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
|
|
+
|
|
+ mutex_unlock(&rvu->mbox_lock);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
|
|
index d88d86bf07b03..a7034b47ed6c9 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
|
|
@@ -1164,8 +1164,16 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
|
|
goto nix_err;
|
|
}
|
|
|
|
+ err = rvu_cpt_init(rvu);
|
|
+ if (err) {
|
|
+ dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
|
|
+ goto mcs_err;
|
|
+ }
|
|
+
|
|
return 0;
|
|
|
|
+mcs_err:
|
|
+ rvu_mcs_exit(rvu);
|
|
nix_err:
|
|
rvu_nix_freemem(rvu);
|
|
npa_err:
|
|
@@ -2106,7 +2114,7 @@ MBOX_MESSAGES
|
|
}
|
|
}
|
|
|
|
-static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
|
|
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
|
|
{
|
|
struct rvu *rvu = mwork->rvu;
|
|
int offset, err, id, devid;
|
|
@@ -2173,6 +2181,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
|
|
}
|
|
mw->mbox_wrk[devid].num_msgs = 0;
|
|
|
|
+ if (poll)
|
|
+ otx2_mbox_wait_for_zero(mbox, devid);
|
|
+
|
|
/* Send mbox responses to VF/PF */
|
|
otx2_mbox_msg_send(mbox, devid);
|
|
}
|
|
@@ -2180,15 +2191,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
|
|
static inline void rvu_afpf_mbox_handler(struct work_struct *work)
|
|
{
|
|
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
|
|
+ struct rvu *rvu = mwork->rvu;
|
|
|
|
- __rvu_mbox_handler(mwork, TYPE_AFPF);
|
|
+ mutex_lock(&rvu->mbox_lock);
|
|
+ __rvu_mbox_handler(mwork, TYPE_AFPF, true);
|
|
+ mutex_unlock(&rvu->mbox_lock);
|
|
}
|
|
|
|
static inline void rvu_afvf_mbox_handler(struct work_struct *work)
|
|
{
|
|
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
|
|
|
|
- __rvu_mbox_handler(mwork, TYPE_AFVF);
|
|
+ __rvu_mbox_handler(mwork, TYPE_AFVF, false);
|
|
}
|
|
|
|
static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
|
|
@@ -2363,6 +2377,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
|
|
}
|
|
}
|
|
|
|
+ mutex_init(&rvu->mbox_lock);
|
|
+
|
|
mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
|
|
if (!mbox_regions) {
|
|
err = -ENOMEM;
|
|
@@ -2512,10 +2528,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
|
|
}
|
|
}
|
|
|
|
-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
|
|
+static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
|
|
{
|
|
struct rvu *rvu = (struct rvu *)rvu_irq;
|
|
- int vfs = rvu->vfs;
|
|
u64 intr;
|
|
|
|
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
|
|
@@ -2529,6 +2544,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
|
|
|
|
rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
|
|
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
|
|
+{
|
|
+ struct rvu *rvu = (struct rvu *)rvu_irq;
|
|
+ int vfs = rvu->vfs;
|
|
+ u64 intr;
|
|
+
|
|
+ /* Sync with mbox memory region */
|
|
+ rmb();
|
|
+
|
|
/* Handle VF interrupts */
|
|
if (vfs > 64) {
|
|
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
|
|
@@ -2865,7 +2892,7 @@ static int rvu_register_interrupts(struct rvu *rvu)
|
|
/* Register mailbox interrupt handler */
|
|
sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
|
|
ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
|
|
- rvu_mbox_intr_handler, 0,
|
|
+ rvu_mbox_pf_intr_handler, 0,
|
|
&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
|
|
if (ret) {
|
|
dev_err(rvu->dev,
|
|
@@ -3039,9 +3066,8 @@ static int rvu_flr_init(struct rvu *rvu)
|
|
cfg | BIT_ULL(22));
|
|
}
|
|
|
|
- rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
|
|
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
|
|
- 1);
|
|
+ rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr",
|
|
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
|
|
if (!rvu->flr_wq)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
|
|
index 0b76dfa979d4e..a3ae21398ca74 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
|
|
@@ -109,6 +109,8 @@ struct rvu_block {
|
|
u64 lfreset_reg;
|
|
unsigned char name[NAME_SIZE];
|
|
struct rvu *rvu;
|
|
+ u64 cpt_flt_eng_map[3];
|
|
+ u64 cpt_rcvrd_eng_map[3];
|
|
};
|
|
|
|
struct nix_mcast {
|
|
@@ -506,6 +508,7 @@ struct rvu {
|
|
struct ptp *ptp;
|
|
|
|
int mcs_blk_cnt;
|
|
+ int cpt_pf_num;
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
struct rvu_debugfs rvu_dbg;
|
|
@@ -520,6 +523,10 @@ struct rvu {
|
|
struct list_head mcs_intrq_head;
|
|
/* mcs interrupt queue lock */
|
|
spinlock_t mcs_intrq_lock;
|
|
+ /* CPT interrupt lock */
|
|
+ spinlock_t cpt_intr_lock;
|
|
+
|
|
+ struct mutex mbox_lock; /* Serialize mbox up and down msgs */
|
|
};
|
|
|
|
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
|
|
@@ -872,6 +879,7 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu);
|
|
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
|
|
int slot);
|
|
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
|
|
+int rvu_cpt_init(struct rvu *rvu);
|
|
|
|
#define NDC_AF_BANK_MASK GENMASK_ULL(7, 0)
|
|
#define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16)
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
|
|
index bcb4385d0621c..d1e6b12ecfa70 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
|
|
@@ -232,7 +232,7 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
|
|
struct cgx_link_user_info *linfo;
|
|
struct cgx_link_info_msg *msg;
|
|
unsigned long pfmap;
|
|
- int err, pfid;
|
|
+ int pfid;
|
|
|
|
linfo = &event->link_uinfo;
|
|
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
|
|
@@ -250,16 +250,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
|
|
continue;
|
|
}
|
|
|
|
+ mutex_lock(&rvu->mbox_lock);
|
|
+
|
|
/* Send mbox message to PF */
|
|
msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
|
|
- if (!msg)
|
|
+ if (!msg) {
|
|
+ mutex_unlock(&rvu->mbox_lock);
|
|
continue;
|
|
+ }
|
|
+
|
|
msg->link_info = *linfo;
|
|
- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
|
|
- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
|
|
- if (err)
|
|
- dev_warn(rvu->dev, "notification to pf %d failed\n",
|
|
- pfid);
|
|
+
|
|
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
|
|
+
|
|
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
|
|
+
|
|
+ mutex_unlock(&rvu->mbox_lock);
|
|
} while (pfmap);
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
|
|
index 38bbae5d9ae05..6fb02b93c1718 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
|
|
@@ -37,34 +37,68 @@
|
|
(_rsp)->free_sts_##etype = free_sts; \
|
|
})
|
|
|
|
-static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
|
|
+static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
|
|
{
|
|
struct rvu_block *block = ptr;
|
|
struct rvu *rvu = block->rvu;
|
|
int blkaddr = block->addr;
|
|
- u64 reg0, reg1, reg2;
|
|
-
|
|
- reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
|
|
- reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
|
|
- if (!is_rvu_otx2(rvu)) {
|
|
- reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
|
|
- dev_err_ratelimited(rvu->dev,
|
|
- "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
|
|
- reg0, reg1, reg2);
|
|
- } else {
|
|
- dev_err_ratelimited(rvu->dev,
|
|
- "Received CPTAF FLT irq : 0x%llx, 0x%llx",
|
|
- reg0, reg1);
|
|
+ u64 reg, val;
|
|
+ int i, eng;
|
|
+ u8 grp;
|
|
+
|
|
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec));
|
|
+ dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg);
|
|
+
|
|
+ i = -1;
|
|
+ while ((i = find_next_bit((unsigned long *)®, 64, i + 1)) < 64) {
|
|
+ switch (vec) {
|
|
+ case 0:
|
|
+ eng = i;
|
|
+ break;
|
|
+ case 1:
|
|
+ eng = i + 64;
|
|
+ break;
|
|
+ case 2:
|
|
+ eng = i + 128;
|
|
+ break;
|
|
+ }
|
|
+ grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF;
|
|
+ /* Disable and enable the engine which triggers fault */
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
|
|
+ val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
|
|
+
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
|
|
+
|
|
+ spin_lock(&rvu->cpt_intr_lock);
|
|
+ block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
|
|
+ val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
|
|
+ val = val & 0x3;
|
|
+ if (val == 0x1 || val == 0x2)
|
|
+ block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
|
|
+ spin_unlock(&rvu->cpt_intr_lock);
|
|
}
|
|
-
|
|
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
|
|
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
|
|
- if (!is_rvu_otx2(rvu))
|
|
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
+static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr)
|
|
+{
|
|
+ return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr);
|
|
+}
|
|
+
|
|
+static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr)
|
|
+{
|
|
+ return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr);
|
|
+}
|
|
+
|
|
+static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr)
|
|
+{
|
|
+ return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr);
|
|
+}
|
|
+
|
|
static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
|
|
{
|
|
struct rvu_block *block = ptr;
|
|
@@ -119,8 +153,10 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
|
|
int i;
|
|
|
|
/* Disable all CPT AF interrupts */
|
|
- for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
|
|
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
|
|
+
|
|
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
|
|
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
|
|
|
|
@@ -151,7 +187,7 @@ static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
|
|
|
|
/* Disable all CPT AF interrupts */
|
|
for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
|
|
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
|
|
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
|
|
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
|
|
|
|
@@ -172,16 +208,31 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
|
|
{
|
|
struct rvu *rvu = block->rvu;
|
|
int blkaddr = block->addr;
|
|
+ irq_handler_t flt_fn;
|
|
int i, ret;
|
|
|
|
for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
|
|
sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
|
|
+
|
|
+ switch (i) {
|
|
+ case CPT_10K_AF_INT_VEC_FLT0:
|
|
+ flt_fn = rvu_cpt_af_flt0_intr_handler;
|
|
+ break;
|
|
+ case CPT_10K_AF_INT_VEC_FLT1:
|
|
+ flt_fn = rvu_cpt_af_flt1_intr_handler;
|
|
+ break;
|
|
+ case CPT_10K_AF_INT_VEC_FLT2:
|
|
+ flt_fn = rvu_cpt_af_flt2_intr_handler;
|
|
+ break;
|
|
+ }
|
|
ret = rvu_cpt_do_register_interrupt(block, off + i,
|
|
- rvu_cpt_af_flt_intr_handler,
|
|
- &rvu->irq_name[(off + i) * NAME_SIZE]);
|
|
+ flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
|
|
if (ret)
|
|
goto err;
|
|
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
|
|
+ if (i == CPT_10K_AF_INT_VEC_FLT2)
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
|
|
+ else
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
|
|
}
|
|
|
|
ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
|
|
@@ -208,8 +259,8 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
|
|
{
|
|
struct rvu_hwinfo *hw = rvu->hw;
|
|
struct rvu_block *block;
|
|
+ irq_handler_t flt_fn;
|
|
int i, offs, ret = 0;
|
|
- char irq_name[16];
|
|
|
|
if (!is_block_implemented(rvu->hw, blkaddr))
|
|
return 0;
|
|
@@ -226,13 +277,20 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
|
|
return cpt_10k_register_interrupts(block, offs);
|
|
|
|
for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
|
|
- snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
|
|
+ sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i);
|
|
+ switch (i) {
|
|
+ case CPT_AF_INT_VEC_FLT0:
|
|
+ flt_fn = rvu_cpt_af_flt0_intr_handler;
|
|
+ break;
|
|
+ case CPT_AF_INT_VEC_FLT1:
|
|
+ flt_fn = rvu_cpt_af_flt1_intr_handler;
|
|
+ break;
|
|
+ }
|
|
ret = rvu_cpt_do_register_interrupt(block, offs + i,
|
|
- rvu_cpt_af_flt_intr_handler,
|
|
- irq_name);
|
|
+ flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]);
|
|
if (ret)
|
|
goto err;
|
|
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
|
|
}
|
|
|
|
ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
|
|
@@ -290,7 +348,7 @@ static int get_cpt_pf_num(struct rvu *rvu)
|
|
|
|
static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
|
|
{
|
|
- int cpt_pf_num = get_cpt_pf_num(rvu);
|
|
+ int cpt_pf_num = rvu->cpt_pf_num;
|
|
|
|
if (rvu_get_pf(pcifunc) != cpt_pf_num)
|
|
return false;
|
|
@@ -302,7 +360,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
|
|
|
|
static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
|
|
{
|
|
- int cpt_pf_num = get_cpt_pf_num(rvu);
|
|
+ int cpt_pf_num = rvu->cpt_pf_num;
|
|
|
|
if (rvu_get_pf(pcifunc) != cpt_pf_num)
|
|
return false;
|
|
@@ -801,6 +859,64 @@ int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
|
|
return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
|
|
}
|
|
|
|
+int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
|
|
+ struct msg_rsp *rsp)
|
|
+{
|
|
+ u16 pcifunc = req->hdr.pcifunc;
|
|
+ struct rvu_block *block;
|
|
+ int cptlf, blkaddr, ret;
|
|
+ u16 actual_slot;
|
|
+ u64 ctl, ctl2;
|
|
+
|
|
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
|
|
+ req->slot, &actual_slot);
|
|
+ if (blkaddr < 0)
|
|
+ return CPT_AF_ERR_LF_INVALID;
|
|
+
|
|
+ block = &rvu->hw->block[blkaddr];
|
|
+
|
|
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
|
|
+ if (cptlf < 0)
|
|
+ return CPT_AF_ERR_LF_INVALID;
|
|
+ ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
|
|
+ ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
|
|
+
|
|
+ ret = rvu_lf_reset(rvu, block, cptlf);
|
|
+ if (ret)
|
|
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
|
|
+ block->addr, cptlf);
|
|
+
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl);
|
|
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
|
|
+ struct cpt_flt_eng_info_rsp *rsp)
|
|
+{
|
|
+ struct rvu_block *block;
|
|
+ unsigned long flags;
|
|
+ int blkaddr, vec;
|
|
+
|
|
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
|
|
+ if (blkaddr < 0)
|
|
+ return blkaddr;
|
|
+
|
|
+ block = &rvu->hw->block[blkaddr];
|
|
+ for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
|
|
+ spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
|
|
+ rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
|
|
+ rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
|
|
+ if (req->reset) {
|
|
+ block->cpt_flt_eng_map[vec] = 0x0;
|
|
+ block->cpt_rcvrd_eng_map[vec] = 0x0;
|
|
+ }
|
|
+ spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
|
|
{
|
|
struct cpt_rxc_time_cfg_req req;
|
|
@@ -940,7 +1056,7 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
|
|
static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
|
|
int nix_blkaddr)
|
|
{
|
|
- int cpt_pf_num = get_cpt_pf_num(rvu);
|
|
+ int cpt_pf_num = rvu->cpt_pf_num;
|
|
struct cpt_inst_lmtst_req *req;
|
|
dma_addr_t res_daddr;
|
|
int timeout = 3000;
|
|
@@ -1084,3 +1200,12 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
|
|
|
|
return 0;
|
|
}
|
|
+
|
|
+int rvu_cpt_init(struct rvu *rvu)
|
|
+{
|
|
+ /* Retrieve CPT PF number */
|
|
+ rvu->cpt_pf_num = get_cpt_pf_num(rvu);
|
|
+ spin_lock_init(&rvu->cpt_intr_lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
|
|
index 44950c2542bb7..c15d1864a6371 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
|
|
@@ -785,7 +785,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
|
|
|
|
if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
|
|
return 0;
|
|
- otx2_mbox_msg_send(&mbox->mbox_up, devid);
|
|
+ otx2_mbox_msg_send_up(&mbox->mbox_up, devid);
|
|
err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
|
|
if (err)
|
|
return err;
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
|
|
index a2d8ac6204054..7e2c30927c312 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
|
|
@@ -272,8 +272,7 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
|
|
{
|
|
int vf;
|
|
|
|
- pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
|
|
- WQ_UNBOUND | WQ_HIGHPRI, 1);
|
|
+ pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
|
|
if (!pf->flr_wq)
|
|
return -ENOMEM;
|
|
|
|
@@ -292,8 +291,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
|
|
return 0;
|
|
}
|
|
|
|
-static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
|
|
- int first, int mdevs, u64 intr, int type)
|
|
+static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
|
|
+ int first, int mdevs, u64 intr)
|
|
{
|
|
struct otx2_mbox_dev *mdev;
|
|
struct otx2_mbox *mbox;
|
|
@@ -307,40 +306,26 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
|
|
|
|
mbox = &mw->mbox;
|
|
mdev = &mbox->dev[i];
|
|
- if (type == TYPE_PFAF)
|
|
- otx2_sync_mbox_bbuf(mbox, i);
|
|
hdr = mdev->mbase + mbox->rx_start;
|
|
/* The hdr->num_msgs is set to zero immediately in the interrupt
|
|
- * handler to ensure that it holds a correct value next time
|
|
- * when the interrupt handler is called.
|
|
- * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
|
|
- * pf>mbox.up_num_msgs holds the data for use in
|
|
- * pfaf_mbox_up_handler.
|
|
+ * handler to ensure that it holds a correct value next time
|
|
+ * when the interrupt handler is called. pf->mw[i].num_msgs
|
|
+ * holds the data for use in otx2_pfvf_mbox_handler and
|
|
+ * pf->mw[i].up_num_msgs holds the data for use in
|
|
+ * otx2_pfvf_mbox_up_handler.
|
|
*/
|
|
if (hdr->num_msgs) {
|
|
mw[i].num_msgs = hdr->num_msgs;
|
|
hdr->num_msgs = 0;
|
|
- if (type == TYPE_PFAF)
|
|
- memset(mbox->hwbase + mbox->rx_start, 0,
|
|
- ALIGN(sizeof(struct mbox_hdr),
|
|
- sizeof(u64)));
|
|
-
|
|
queue_work(mbox_wq, &mw[i].mbox_wrk);
|
|
}
|
|
|
|
mbox = &mw->mbox_up;
|
|
mdev = &mbox->dev[i];
|
|
- if (type == TYPE_PFAF)
|
|
- otx2_sync_mbox_bbuf(mbox, i);
|
|
hdr = mdev->mbase + mbox->rx_start;
|
|
if (hdr->num_msgs) {
|
|
mw[i].up_num_msgs = hdr->num_msgs;
|
|
hdr->num_msgs = 0;
|
|
- if (type == TYPE_PFAF)
|
|
- memset(mbox->hwbase + mbox->rx_start, 0,
|
|
- ALIGN(sizeof(struct mbox_hdr),
|
|
- sizeof(u64)));
|
|
-
|
|
queue_work(mbox_wq, &mw[i].mbox_up_wrk);
|
|
}
|
|
}
|
|
@@ -356,8 +341,10 @@ static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
|
|
/* Msgs are already copied, trigger VF's mbox irq */
|
|
smp_wmb();
|
|
|
|
+ otx2_mbox_wait_for_zero(pfvf_mbox, devid);
|
|
+
|
|
offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
|
|
- writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
|
|
+ writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
|
|
|
|
/* Restore VF's mbox bounce buffer region address */
|
|
src_mdev->mbase = bbuf_base;
|
|
@@ -547,7 +534,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
|
|
end:
|
|
offset = mbox->rx_start + msg->next_msgoff;
|
|
if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
|
|
- __otx2_mbox_reset(mbox, 0);
|
|
+ __otx2_mbox_reset(mbox, vf_idx);
|
|
mdev->msgs_acked++;
|
|
}
|
|
}
|
|
@@ -564,8 +551,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
|
|
if (vfs > 64) {
|
|
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
|
|
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
|
|
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
|
|
- TYPE_PFVF);
|
|
+ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
|
|
if (intr)
|
|
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
|
|
vfs = 64;
|
|
@@ -574,7 +560,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
|
|
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
|
|
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
|
|
|
|
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
|
|
+ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
|
|
|
|
if (intr)
|
|
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
|
|
@@ -599,7 +585,7 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
|
|
|
|
pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
|
|
WQ_UNBOUND | WQ_HIGHPRI |
|
|
- WQ_MEM_RECLAIM, 1);
|
|
+ WQ_MEM_RECLAIM, 0);
|
|
if (!pf->mbox_pfvf_wq)
|
|
return -ENOMEM;
|
|
|
|
@@ -822,20 +808,22 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
|
|
struct mbox *af_mbox;
|
|
struct otx2_nic *pf;
|
|
int offset, id;
|
|
+ u16 num_msgs;
|
|
|
|
af_mbox = container_of(work, struct mbox, mbox_wrk);
|
|
mbox = &af_mbox->mbox;
|
|
mdev = &mbox->dev[0];
|
|
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
|
|
+ num_msgs = rsp_hdr->num_msgs;
|
|
|
|
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
|
|
pf = af_mbox->pfvf;
|
|
|
|
- for (id = 0; id < af_mbox->num_msgs; id++) {
|
|
+ for (id = 0; id < num_msgs; id++) {
|
|
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
|
|
otx2_process_pfaf_mbox_msg(pf, msg);
|
|
offset = mbox->rx_start + msg->next_msgoff;
|
|
- if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
|
|
+ if (mdev->msgs_acked == (num_msgs - 1))
|
|
__otx2_mbox_reset(mbox, 0);
|
|
mdev->msgs_acked++;
|
|
}
|
|
@@ -946,12 +934,14 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
|
|
int offset, id, devid = 0;
|
|
struct mbox_hdr *rsp_hdr;
|
|
struct mbox_msghdr *msg;
|
|
+ u16 num_msgs;
|
|
|
|
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
|
|
+ num_msgs = rsp_hdr->num_msgs;
|
|
|
|
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
|
|
|
|
- for (id = 0; id < af_mbox->up_num_msgs; id++) {
|
|
+ for (id = 0; id < num_msgs; id++) {
|
|
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
|
|
|
|
devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
|
|
@@ -960,10 +950,11 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
|
|
otx2_process_mbox_msg_up(pf, msg);
|
|
offset = mbox->rx_start + msg->next_msgoff;
|
|
}
|
|
- if (devid) {
|
|
+ /* Forward to VF iff VFs are really present */
|
|
+ if (devid && pci_num_vf(pf->pdev)) {
|
|
otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
|
|
MBOX_DIR_PFVF_UP, devid - 1,
|
|
- af_mbox->up_num_msgs);
|
|
+ num_msgs);
|
|
return;
|
|
}
|
|
|
|
@@ -973,16 +964,49 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
|
|
static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
|
|
{
|
|
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
|
|
- struct mbox *mbox;
|
|
+ struct mbox *mw = &pf->mbox;
|
|
+ struct otx2_mbox_dev *mdev;
|
|
+ struct otx2_mbox *mbox;
|
|
+ struct mbox_hdr *hdr;
|
|
+ u64 mbox_data;
|
|
|
|
/* Clear the IRQ */
|
|
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
|
|
|
|
- mbox = &pf->mbox;
|
|
|
|
- trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
|
|
+ mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
|
|
+
|
|
+ if (mbox_data & MBOX_UP_MSG) {
|
|
+ mbox_data &= ~MBOX_UP_MSG;
|
|
+ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
|
|
+
|
|
+ mbox = &mw->mbox_up;
|
|
+ mdev = &mbox->dev[0];
|
|
+ otx2_sync_mbox_bbuf(mbox, 0);
|
|
+
|
|
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
|
|
+ if (hdr->num_msgs)
|
|
+ queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
|
|
+
|
|
+ trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
|
|
+ BIT_ULL(0));
|
|
+ }
|
|
+
|
|
+ if (mbox_data & MBOX_DOWN_MSG) {
|
|
+ mbox_data &= ~MBOX_DOWN_MSG;
|
|
+ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
|
|
+
|
|
+ mbox = &mw->mbox;
|
|
+ mdev = &mbox->dev[0];
|
|
+ otx2_sync_mbox_bbuf(mbox, 0);
|
|
+
|
|
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
|
|
+ if (hdr->num_msgs)
|
|
+ queue_work(pf->mbox_wq, &mw->mbox_wrk);
|
|
|
|
- otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
|
|
+ trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
|
|
+ BIT_ULL(0));
|
|
+ }
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
@@ -1063,9 +1087,8 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
|
|
int err;
|
|
|
|
mbox->pfvf = pf;
|
|
- pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
|
|
- WQ_UNBOUND | WQ_HIGHPRI |
|
|
- WQ_MEM_RECLAIM, 1);
|
|
+ pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox",
|
|
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
|
|
if (!pf->mbox_wq)
|
|
return -ENOMEM;
|
|
|
|
@@ -3030,6 +3053,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
|
|
struct otx2_vf_config *config;
|
|
struct cgx_link_info_msg *req;
|
|
struct mbox_msghdr *msghdr;
|
|
+ struct delayed_work *dwork;
|
|
struct otx2_nic *pf;
|
|
int vf_idx;
|
|
|
|
@@ -3038,10 +3062,24 @@ static void otx2_vf_link_event_task(struct work_struct *work)
|
|
vf_idx = config - config->pf->vf_configs;
|
|
pf = config->pf;
|
|
|
|
+ if (config->intf_down)
|
|
+ return;
|
|
+
|
|
+ mutex_lock(&pf->mbox.lock);
|
|
+
|
|
+ dwork = &config->link_event_work;
|
|
+
|
|
+ if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
|
|
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
|
|
+ mutex_unlock(&pf->mbox.lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
|
|
sizeof(*req), sizeof(struct msg_rsp));
|
|
if (!msghdr) {
|
|
dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
|
|
+ mutex_unlock(&pf->mbox.lock);
|
|
return;
|
|
}
|
|
|
|
@@ -3050,7 +3088,11 @@ static void otx2_vf_link_event_task(struct work_struct *work)
|
|
req->hdr.sig = OTX2_MBOX_REQ_SIG;
|
|
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
|
|
|
|
+ otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
|
|
+
|
|
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
|
|
+
|
|
+ mutex_unlock(&pf->mbox.lock);
|
|
}
|
|
|
|
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
|
|
index 404855bccb4b6..dcb8190de2407 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
|
|
@@ -89,16 +89,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
|
|
struct otx2_mbox *mbox;
|
|
struct mbox *af_mbox;
|
|
int offset, id;
|
|
+ u16 num_msgs;
|
|
|
|
af_mbox = container_of(work, struct mbox, mbox_wrk);
|
|
mbox = &af_mbox->mbox;
|
|
mdev = &mbox->dev[0];
|
|
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
|
|
- if (af_mbox->num_msgs == 0)
|
|
+ num_msgs = rsp_hdr->num_msgs;
|
|
+
|
|
+ if (num_msgs == 0)
|
|
return;
|
|
+
|
|
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
|
|
|
|
- for (id = 0; id < af_mbox->num_msgs; id++) {
|
|
+ for (id = 0; id < num_msgs; id++) {
|
|
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
|
|
otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
|
|
offset = mbox->rx_start + msg->next_msgoff;
|
|
@@ -151,6 +155,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
|
|
struct mbox *vf_mbox;
|
|
struct otx2_nic *vf;
|
|
int offset, id;
|
|
+ u16 num_msgs;
|
|
|
|
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
|
|
vf = vf_mbox->pfvf;
|
|
@@ -158,12 +163,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
|
|
mdev = &mbox->dev[0];
|
|
|
|
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
|
|
- if (vf_mbox->up_num_msgs == 0)
|
|
+ num_msgs = rsp_hdr->num_msgs;
|
|
+
|
|
+ if (num_msgs == 0)
|
|
return;
|
|
|
|
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
|
|
|
|
- for (id = 0; id < vf_mbox->up_num_msgs; id++) {
|
|
+ for (id = 0; id < num_msgs; id++) {
|
|
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
|
|
otx2vf_process_mbox_msg_up(vf, msg);
|
|
offset = mbox->rx_start + msg->next_msgoff;
|
|
@@ -178,40 +185,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
|
|
struct otx2_mbox_dev *mdev;
|
|
struct otx2_mbox *mbox;
|
|
struct mbox_hdr *hdr;
|
|
+ u64 mbox_data;
|
|
|
|
/* Clear the IRQ */
|
|
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
|
|
|
|
+ mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0);
|
|
+
|
|
/* Read latest mbox data */
|
|
smp_rmb();
|
|
|
|
- /* Check for PF => VF response messages */
|
|
- mbox = &vf->mbox.mbox;
|
|
- mdev = &mbox->dev[0];
|
|
- otx2_sync_mbox_bbuf(mbox, 0);
|
|
+ if (mbox_data & MBOX_DOWN_MSG) {
|
|
+ mbox_data &= ~MBOX_DOWN_MSG;
|
|
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
|
|
|
|
- trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
|
|
+ /* Check for PF => VF response messages */
|
|
+ mbox = &vf->mbox.mbox;
|
|
+ mdev = &mbox->dev[0];
|
|
+ otx2_sync_mbox_bbuf(mbox, 0);
|
|
|
|
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
|
|
- if (hdr->num_msgs) {
|
|
- vf->mbox.num_msgs = hdr->num_msgs;
|
|
- hdr->num_msgs = 0;
|
|
- memset(mbox->hwbase + mbox->rx_start, 0,
|
|
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
|
|
- queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
|
|
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
|
|
+ if (hdr->num_msgs)
|
|
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
|
|
+
|
|
+ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF",
|
|
+ BIT_ULL(0));
|
|
}
|
|
- /* Check for PF => VF notification messages */
|
|
- mbox = &vf->mbox.mbox_up;
|
|
- mdev = &mbox->dev[0];
|
|
- otx2_sync_mbox_bbuf(mbox, 0);
|
|
|
|
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
|
|
- if (hdr->num_msgs) {
|
|
- vf->mbox.up_num_msgs = hdr->num_msgs;
|
|
- hdr->num_msgs = 0;
|
|
- memset(mbox->hwbase + mbox->rx_start, 0,
|
|
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
|
|
- queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
|
|
+ if (mbox_data & MBOX_UP_MSG) {
|
|
+ mbox_data &= ~MBOX_UP_MSG;
|
|
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
|
|
+
|
|
+ /* Check for PF => VF notification messages */
|
|
+ mbox = &vf->mbox.mbox_up;
|
|
+ mdev = &mbox->dev[0];
|
|
+ otx2_sync_mbox_bbuf(mbox, 0);
|
|
+
|
|
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
|
|
+ if (hdr->num_msgs)
|
|
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
|
|
+
|
|
+ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF",
|
|
+ BIT_ULL(0));
|
|
}
|
|
|
|
return IRQ_HANDLED;
|
|
@@ -293,9 +308,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
|
|
int err;
|
|
|
|
mbox->pfvf = vf;
|
|
- vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
|
|
- WQ_UNBOUND | WQ_HIGHPRI |
|
|
- WQ_MEM_RECLAIM, 1);
|
|
+ vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox",
|
|
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
|
|
if (!vf->mbox_wq)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
index 17e6ac4445afc..fecf3dd22dfaa 100644
|
|
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
@@ -561,8 +561,7 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
|
|
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
|
|
mcr_new = mcr_cur;
|
|
mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
|
|
- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
|
|
- MAC_MCR_RX_FIFO_CLR_DIS;
|
|
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
|
|
|
|
/* Only update control register when needed! */
|
|
if (mcr_new != mcr_cur)
|
|
@@ -610,7 +609,7 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
|
|
phylink_config);
|
|
u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
|
|
|
|
- mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
|
|
+ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
|
|
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
|
|
}
|
|
|
|
@@ -649,7 +648,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
|
|
if (rx_pause)
|
|
mcr |= MAC_MCR_FORCE_RX_FC;
|
|
|
|
- mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
|
|
+ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
|
|
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
|
index d6eed204574a9..c64211e22ae70 100644
|
|
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
|
@@ -811,7 +811,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
|
|
MTK_PPE_KEEPALIVE_DISABLE) |
|
|
FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
|
|
FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
|
|
- MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
|
|
+ MTK_PPE_SCAN_MODE_CHECK_AGE) |
|
|
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
|
|
MTK_PPE_ENTRIES_SHIFT);
|
|
if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
|
|
@@ -895,17 +895,21 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
|
|
|
|
mtk_ppe_cache_enable(ppe, false);
|
|
|
|
- /* disable offload engine */
|
|
- ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
|
|
- ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
|
|
-
|
|
/* disable aging */
|
|
val = MTK_PPE_TB_CFG_AGE_NON_L4 |
|
|
MTK_PPE_TB_CFG_AGE_UNBIND |
|
|
MTK_PPE_TB_CFG_AGE_TCP |
|
|
MTK_PPE_TB_CFG_AGE_UDP |
|
|
- MTK_PPE_TB_CFG_AGE_TCP_FIN;
|
|
+ MTK_PPE_TB_CFG_AGE_TCP_FIN |
|
|
+ MTK_PPE_TB_CFG_SCAN_MODE;
|
|
ppe_clear(ppe, MTK_PPE_TB_CFG, val);
|
|
|
|
- return mtk_ppe_wait_busy(ppe);
|
|
+ if (mtk_ppe_wait_busy(ppe))
|
|
+ return -ETIMEDOUT;
|
|
+
|
|
+ /* disable offload engine */
|
|
+ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
|
|
+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
|
|
+
|
|
+ return 0;
|
|
}
|
|
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
|
|
index e92860e20a24a..c6a2c302a8c8b 100644
|
|
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
|
|
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
|
|
@@ -308,6 +308,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
|
|
|
|
acti_netdevs = kmalloc_array(entry->slave_cnt,
|
|
sizeof(*acti_netdevs), GFP_KERNEL);
|
|
+ if (!acti_netdevs) {
|
|
+ schedule_delayed_work(&lag->work,
|
|
+ NFP_FL_LAG_DELAY);
|
|
+ continue;
|
|
+ }
|
|
|
|
/* Include sanity check in the loop. It may be that a bond has
|
|
* changed between processing the last notification and the
|
|
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
|
|
index 267e6fd3d4448..57411ee1d8374 100644
|
|
--- a/drivers/net/phy/dp83822.c
|
|
+++ b/drivers/net/phy/dp83822.c
|
|
@@ -380,7 +380,7 @@ static int dp83822_config_init(struct phy_device *phydev)
|
|
{
|
|
struct dp83822_private *dp83822 = phydev->priv;
|
|
struct device *dev = &phydev->mdio.dev;
|
|
- int rgmii_delay;
|
|
+ int rgmii_delay = 0;
|
|
s32 rx_int_delay;
|
|
s32 tx_int_delay;
|
|
int err = 0;
|
|
@@ -390,30 +390,33 @@ static int dp83822_config_init(struct phy_device *phydev)
|
|
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
|
|
true);
|
|
|
|
- if (rx_int_delay <= 0)
|
|
- rgmii_delay = 0;
|
|
- else
|
|
- rgmii_delay = DP83822_RX_CLK_SHIFT;
|
|
+ /* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */
|
|
+ if (rx_int_delay > 0)
|
|
+ rgmii_delay |= DP83822_RX_CLK_SHIFT;
|
|
|
|
tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
|
|
false);
|
|
+
|
|
+ /* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */
|
|
if (tx_int_delay <= 0)
|
|
- rgmii_delay &= ~DP83822_TX_CLK_SHIFT;
|
|
- else
|
|
rgmii_delay |= DP83822_TX_CLK_SHIFT;
|
|
|
|
- if (rgmii_delay) {
|
|
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
|
|
- MII_DP83822_RCSR, rgmii_delay);
|
|
- if (err)
|
|
- return err;
|
|
- }
|
|
+ err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
|
|
+ DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
|
|
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
|
|
|
|
- phy_set_bits_mmd(phydev, DP83822_DEVADDR,
|
|
- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
|
|
+ if (err)
|
|
+ return err;
|
|
} else {
|
|
- phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
|
|
- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
|
|
+ err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
|
|
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
|
|
+
|
|
+ if (err)
|
|
+ return err;
|
|
}
|
|
|
|
if (dp83822->fx_enabled) {
|
|
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
|
|
index 944f76e6fc8eb..f25b0d338ca8d 100644
|
|
--- a/drivers/net/phy/phy_device.c
|
|
+++ b/drivers/net/phy/phy_device.c
|
|
@@ -2640,8 +2640,8 @@ EXPORT_SYMBOL(genphy_resume);
|
|
int genphy_loopback(struct phy_device *phydev, bool enable)
|
|
{
|
|
if (enable) {
|
|
- u16 val, ctl = BMCR_LOOPBACK;
|
|
- int ret;
|
|
+ u16 ctl = BMCR_LOOPBACK;
|
|
+ int ret, val;
|
|
|
|
ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
|
|
|
|
@@ -2893,7 +2893,7 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
|
|
if (delay < 0)
|
|
return delay;
|
|
|
|
- if (delay && size == 0)
|
|
+ if (size == 0)
|
|
return delay;
|
|
|
|
if (delay < delay_values[0] || delay > delay_values[size - 1]) {
|
|
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
|
|
index a530f20ee2575..2fa46baa589e5 100644
|
|
--- a/drivers/net/usb/smsc95xx.c
|
|
+++ b/drivers/net/usb/smsc95xx.c
|
|
@@ -2104,6 +2104,11 @@ static const struct usb_device_id products[] = {
|
|
USB_DEVICE(0x0424, 0x9E08),
|
|
.driver_info = (unsigned long) &smsc95xx_info,
|
|
},
|
|
+ {
|
|
+ /* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */
|
|
+ USB_DEVICE(0x0878, 0x1400),
|
|
+ .driver_info = (unsigned long)&smsc95xx_info,
|
|
+ },
|
|
{
|
|
/* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
|
|
USB_DEVICE(0x184F, 0x0051),
|
|
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
|
|
index f5e19f3ef6cdd..4de5144821835 100644
|
|
--- a/drivers/net/usb/sr9800.c
|
|
+++ b/drivers/net/usb/sr9800.c
|
|
@@ -737,7 +737,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
|
|
|
|
data->eeprom_len = SR9800_EEPROM_LEN;
|
|
|
|
- usbnet_get_endpoints(dev, intf);
|
|
+ ret = usbnet_get_endpoints(dev, intf);
|
|
+ if (ret)
|
|
+ goto out;
|
|
|
|
/* LED Setting Rule :
|
|
* AABB:CCDD
|
|
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
|
|
index dd9f5f1461921..8dcd3b6e143b9 100644
|
|
--- a/drivers/net/veth.c
|
|
+++ b/drivers/net/veth.c
|
|
@@ -1444,8 +1444,6 @@ static netdev_features_t veth_fix_features(struct net_device *dev,
|
|
if (peer_priv->_xdp_prog)
|
|
features &= ~NETIF_F_GSO_SOFTWARE;
|
|
}
|
|
- if (priv->_xdp_prog)
|
|
- features |= NETIF_F_GRO;
|
|
|
|
return features;
|
|
}
|
|
@@ -1542,14 +1540,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|
}
|
|
|
|
if (!old_prog) {
|
|
- if (!veth_gro_requested(dev)) {
|
|
- /* user-space did not require GRO, but adding
|
|
- * XDP is supposed to get GRO working
|
|
- */
|
|
- dev->features |= NETIF_F_GRO;
|
|
- netdev_features_change(dev);
|
|
- }
|
|
-
|
|
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
|
|
peer->max_mtu = max_mtu;
|
|
}
|
|
@@ -1560,14 +1550,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|
if (dev->flags & IFF_UP)
|
|
veth_disable_xdp(dev);
|
|
|
|
- /* if user-space did not require GRO, since adding XDP
|
|
- * enabled it, clear it now
|
|
- */
|
|
- if (!veth_gro_requested(dev)) {
|
|
- dev->features &= ~NETIF_F_GRO;
|
|
- netdev_features_change(dev);
|
|
- }
|
|
-
|
|
if (peer) {
|
|
peer->hw_features |= NETIF_F_GSO_SOFTWARE;
|
|
peer->max_mtu = ETH_MAX_MTU;
|
|
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
|
|
index a176653c88616..db01ec03bda00 100644
|
|
--- a/drivers/net/wireguard/receive.c
|
|
+++ b/drivers/net/wireguard/receive.c
|
|
@@ -251,7 +251,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
|
|
|
|
if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
|
|
wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
|
|
- keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
|
|
+ READ_ONCE(keypair->receiving_counter.counter) >= REJECT_AFTER_MESSAGES)) {
|
|
WRITE_ONCE(keypair->receiving.is_valid, false);
|
|
return false;
|
|
}
|
|
@@ -318,7 +318,7 @@ static bool counter_validate(struct noise_replay_counter *counter, u64 their_cou
|
|
for (i = 1; i <= top; ++i)
|
|
counter->backtrack[(i + index_current) &
|
|
((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
|
|
- counter->counter = their_counter;
|
|
+ WRITE_ONCE(counter->counter, their_counter);
|
|
}
|
|
|
|
index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
|
|
@@ -463,7 +463,7 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
|
|
net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
|
|
peer->device->dev->name,
|
|
PACKET_CB(skb)->nonce,
|
|
- keypair->receiving_counter.counter);
|
|
+ READ_ONCE(keypair->receiving_counter.counter));
|
|
goto next;
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
|
|
index 876410a47d1d2..4d5009604eee7 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
|
|
@@ -844,6 +844,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
}
|
|
|
|
ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
|
|
+ if (!ev) {
|
|
+ kfree(tb);
|
|
+ return -EPROTO;
|
|
+ }
|
|
|
|
arg->desc_id = ev->desc_id;
|
|
arg->status = ev->status;
|
|
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
|
|
index 21c6b36dc6ebb..51fc77e93de5c 100644
|
|
--- a/drivers/net/wireless/ath/ath11k/mac.c
|
|
+++ b/drivers/net/wireless/ath/ath11k/mac.c
|
|
@@ -2112,6 +2112,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
|
|
mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
|
|
mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
|
|
|
|
+ /* Initialize rx_mcs_160 to 9 which is an invalid value */
|
|
+ rx_mcs_160 = 9;
|
|
if (support_160) {
|
|
for (i = 7; i >= 0; i--) {
|
|
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
|
|
@@ -2123,6 +2125,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
|
|
}
|
|
}
|
|
|
|
+ /* Initialize rx_mcs_80 to 9 which is an invalid value */
|
|
+ rx_mcs_80 = 9;
|
|
for (i = 7; i >= 0; i--) {
|
|
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
|
|
index 237f4ec2cffd7..6c33e898b3000 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/htc.h
|
|
+++ b/drivers/net/wireless/ath/ath9k/htc.h
|
|
@@ -306,7 +306,6 @@ struct ath9k_htc_tx {
|
|
DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
|
|
struct timer_list cleanup_timer;
|
|
spinlock_t tx_lock;
|
|
- bool initialized;
|
|
};
|
|
|
|
struct ath9k_htc_tx_ctl {
|
|
@@ -515,6 +514,7 @@ struct ath9k_htc_priv {
|
|
unsigned long ps_usecount;
|
|
bool ps_enabled;
|
|
bool ps_idle;
|
|
+ bool initialized;
|
|
|
|
#ifdef CONFIG_MAC80211_LEDS
|
|
enum led_brightness brightness;
|
|
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
|
|
index 96a3185a96d75..b014185373f34 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
|
|
@@ -966,6 +966,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
|
|
|
|
htc_handle->drv_priv = priv;
|
|
|
|
+ /* Allow ath9k_wmi_event_tasklet() to operate. */
|
|
+ smp_wmb();
|
|
+ priv->initialized = true;
|
|
+
|
|
return 0;
|
|
|
|
err_init:
|
|
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
|
|
index d6a3f001dacb9..2fdd27885f543 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
|
|
@@ -815,10 +815,6 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv)
|
|
skb_queue_head_init(&priv->tx.data_vo_queue);
|
|
skb_queue_head_init(&priv->tx.tx_failed);
|
|
|
|
- /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
|
|
- smp_wmb();
|
|
- priv->tx.initialized = true;
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
|
|
index 1476b42b52a91..805ad31edba2b 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/wmi.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
|
|
@@ -155,6 +155,12 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
|
|
}
|
|
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
|
|
|
|
+ /* Check if ath9k_htc_probe_device() completed. */
|
|
+ if (!data_race(priv->initialized)) {
|
|
+ kfree_skb(skb);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
hdr = (struct wmi_cmd_hdr *) skb->data;
|
|
cmd_id = be16_to_cpu(hdr->command_id);
|
|
wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
|
|
@@ -169,10 +175,6 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
|
|
&wmi->drv_priv->fatal_work);
|
|
break;
|
|
case WMI_TXSTATUS_EVENTID:
|
|
- /* Check if ath9k_tx_init() completed. */
|
|
- if (!data_race(priv->tx.initialized))
|
|
- break;
|
|
-
|
|
spin_lock_bh(&priv->tx.tx_lock);
|
|
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
|
|
spin_unlock_bh(&priv->tx.tx_lock);
|
|
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
|
|
index 67b4bac048e58..c0d8fc0b22fb2 100644
|
|
--- a/drivers/net/wireless/broadcom/b43/b43.h
|
|
+++ b/drivers/net/wireless/broadcom/b43/b43.h
|
|
@@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
|
|
return dev->__using_pio_transfers;
|
|
}
|
|
|
|
+static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio)
|
|
+{
|
|
+ if (dev->qos_enabled)
|
|
+ ieee80211_wake_queue(dev->wl->hw, queue_prio);
|
|
+ else
|
|
+ ieee80211_wake_queue(dev->wl->hw, 0);
|
|
+}
|
|
+
|
|
+static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio)
|
|
+{
|
|
+ if (dev->qos_enabled)
|
|
+ ieee80211_stop_queue(dev->wl->hw, queue_prio);
|
|
+ else
|
|
+ ieee80211_stop_queue(dev->wl->hw, 0);
|
|
+}
|
|
+
|
|
/* Message printing */
|
|
__printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
|
|
__printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
|
|
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
|
|
index 9a7c62bd5e431..cfaf2f9d67b22 100644
|
|
--- a/drivers/net/wireless/broadcom/b43/dma.c
|
|
+++ b/drivers/net/wireless/broadcom/b43/dma.c
|
|
@@ -1399,7 +1399,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
|
|
should_inject_overflow(ring)) {
|
|
/* This TX ring is full. */
|
|
unsigned int skb_mapping = skb_get_queue_mapping(skb);
|
|
- ieee80211_stop_queue(dev->wl->hw, skb_mapping);
|
|
+ b43_stop_queue(dev, skb_mapping);
|
|
dev->wl->tx_queue_stopped[skb_mapping] = true;
|
|
ring->stopped = true;
|
|
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
|
|
@@ -1570,7 +1570,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
|
|
} else {
|
|
/* If the driver queue is running wake the corresponding
|
|
* mac80211 queue. */
|
|
- ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
|
|
+ b43_wake_queue(dev, ring->queue_prio);
|
|
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
|
|
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
|
|
}
|
|
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
|
|
index b2539a916fd04..bdfa68cc7ee2a 100644
|
|
--- a/drivers/net/wireless/broadcom/b43/main.c
|
|
+++ b/drivers/net/wireless/broadcom/b43/main.c
|
|
@@ -2587,7 +2587,8 @@ static void b43_request_firmware(struct work_struct *work)
|
|
|
|
start_ieee80211:
|
|
wl->hw->queues = B43_QOS_QUEUE_NUM;
|
|
- if (!modparam_qos || dev->fw.opensource)
|
|
+ if (!modparam_qos || dev->fw.opensource ||
|
|
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM4331)
|
|
wl->hw->queues = 1;
|
|
|
|
err = ieee80211_register_hw(wl->hw);
|
|
@@ -3603,7 +3604,7 @@ static void b43_tx_work(struct work_struct *work)
|
|
err = b43_dma_tx(dev, skb);
|
|
if (err == -ENOSPC) {
|
|
wl->tx_queue_stopped[queue_num] = true;
|
|
- ieee80211_stop_queue(wl->hw, queue_num);
|
|
+ b43_stop_queue(dev, queue_num);
|
|
skb_queue_head(&wl->tx_queue[queue_num], skb);
|
|
break;
|
|
}
|
|
@@ -3627,6 +3628,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
|
|
struct sk_buff *skb)
|
|
{
|
|
struct b43_wl *wl = hw_to_b43_wl(hw);
|
|
+ u16 skb_queue_mapping;
|
|
|
|
if (unlikely(skb->len < 2 + 2 + 6)) {
|
|
/* Too short, this can't be a valid frame. */
|
|
@@ -3635,12 +3637,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
|
|
}
|
|
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
|
|
|
|
- skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
|
|
- if (!wl->tx_queue_stopped[skb->queue_mapping]) {
|
|
+ skb_queue_mapping = skb_get_queue_mapping(skb);
|
|
+ skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb);
|
|
+ if (!wl->tx_queue_stopped[skb_queue_mapping])
|
|
ieee80211_queue_work(wl->hw, &wl->tx_work);
|
|
- } else {
|
|
- ieee80211_stop_queue(wl->hw, skb->queue_mapping);
|
|
- }
|
|
+ else
|
|
+ b43_stop_queue(wl->current_dev, skb_queue_mapping);
|
|
}
|
|
|
|
static void b43_qos_params_upload(struct b43_wldev *dev,
|
|
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
|
|
index 8c28a9250cd19..cc19b589fa70d 100644
|
|
--- a/drivers/net/wireless/broadcom/b43/pio.c
|
|
+++ b/drivers/net/wireless/broadcom/b43/pio.c
|
|
@@ -525,7 +525,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
|
|
if (total_len > (q->buffer_size - q->buffer_used)) {
|
|
/* Not enough memory on the queue. */
|
|
err = -EBUSY;
|
|
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
|
|
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
|
|
q->stopped = true;
|
|
goto out;
|
|
}
|
|
@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
|
|
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
|
|
(q->free_packet_slots == 0)) {
|
|
/* The queue is full. */
|
|
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
|
|
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
|
|
q->stopped = true;
|
|
}
|
|
|
|
@@ -587,7 +587,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
|
|
list_add(&pack->list, &q->packets_list);
|
|
|
|
if (q->stopped) {
|
|
- ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
|
|
+ b43_wake_queue(dev, q->queue_prio);
|
|
q->stopped = false;
|
|
}
|
|
}
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
|
|
index ccc621b8ed9f2..4a1fe982a948e 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
|
|
@@ -383,8 +383,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
|
|
return sh;
|
|
}
|
|
|
|
-static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
|
|
+static void wlc_phy_timercb_phycal(void *ptr)
|
|
{
|
|
+ struct brcms_phy *pi = ptr;
|
|
uint delay = 5;
|
|
|
|
if (PHY_PERICAL_MPHASE_PENDING(pi)) {
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
|
|
index a0de5db0cd646..b723817915365 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
|
|
@@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
|
|
}
|
|
|
|
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
|
|
- void (*fn)(struct brcms_phy *pi),
|
|
+ void (*fn)(void *pi),
|
|
void *arg, const char *name)
|
|
{
|
|
return (struct wlapi_timer *)
|
|
- brcms_init_timer(physhim->wl, (void (*)(void *))fn,
|
|
- arg, name);
|
|
+ brcms_init_timer(physhim->wl, fn, arg, name);
|
|
}
|
|
|
|
void wlapi_free_timer(struct wlapi_timer *t)
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
|
|
index dd8774717adee..27d0934e600ed 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
|
|
@@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim);
|
|
|
|
/* PHY to WL utility functions */
|
|
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
|
|
- void (*fn)(struct brcms_phy *pi),
|
|
+ void (*fn)(void *pi),
|
|
void *arg, const char *name);
|
|
void wlapi_free_timer(struct wlapi_timer *t);
|
|
void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
|
|
index f5fcc547de391..235963e1d7a9a 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
|
|
@@ -725,7 +725,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
|
* from index 1, so the maximum value allowed here is
|
|
* ACPI_SAR_PROFILES_NUM - 1.
|
|
*/
|
|
- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
|
|
+ if (n_profiles >= ACPI_SAR_PROFILE_NUM) {
|
|
ret = -EINVAL;
|
|
goto out_free;
|
|
}
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
|
|
index 5979d904bbbd2..677c9e0b46f10 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
|
|
@@ -103,6 +103,12 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
|
|
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
|
|
return -EINVAL;
|
|
|
|
+ /* we use this as a string, ensure input was NUL terminated */
|
|
+ if (strnlen(debug_info->debug_cfg_name,
|
|
+ sizeof(debug_info->debug_cfg_name)) ==
|
|
+ sizeof(debug_info->debug_cfg_name))
|
|
+ return -EINVAL;
|
|
+
|
|
IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
|
|
debug_info->debug_cfg_name);
|
|
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
|
|
index 2748459d12279..88f4f429d875c 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
|
|
@@ -461,12 +461,10 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
|
|
struct wowlan_key_rsc_v5_data data = {};
|
|
int i;
|
|
|
|
- data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL);
|
|
+ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
|
|
if (!data.rsc)
|
|
return -ENOMEM;
|
|
|
|
- memset(data.rsc, 0xff, sizeof(*data.rsc));
|
|
-
|
|
for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
|
|
data.rsc->mcast_key_id_map[i] =
|
|
IWL_MCAST_KEY_MAP_INVALID;
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
|
|
index f268a31ce26d9..105f283b777d2 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
|
|
@@ -299,6 +299,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
|
|
u32 status,
|
|
struct ieee80211_rx_status *stats)
|
|
{
|
|
+ struct wireless_dev *wdev;
|
|
struct iwl_mvm_sta *mvmsta;
|
|
struct iwl_mvm_vif *mvmvif;
|
|
u8 keyid;
|
|
@@ -320,9 +321,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
|
|
if (!ieee80211_is_beacon(hdr->frame_control))
|
|
return 0;
|
|
|
|
+ if (!sta)
|
|
+ return -1;
|
|
+
|
|
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
|
|
+
|
|
/* key mismatch - will also report !MIC_OK but we shouldn't count it */
|
|
if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
|
|
- return -1;
|
|
+ goto report;
|
|
|
|
/* good cases */
|
|
if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
|
|
@@ -331,13 +338,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
|
|
return 0;
|
|
}
|
|
|
|
- if (!sta)
|
|
- return -1;
|
|
-
|
|
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
-
|
|
- mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
|
|
-
|
|
/*
|
|
* both keys will have the same cipher and MIC length, use
|
|
* whichever one is available
|
|
@@ -346,11 +346,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
|
|
if (!key) {
|
|
key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
|
|
if (!key)
|
|
- return -1;
|
|
+ goto report;
|
|
}
|
|
|
|
if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
|
|
- return -1;
|
|
+ goto report;
|
|
|
|
/* get the real key ID */
|
|
keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
|
|
@@ -364,7 +364,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
|
|
return -1;
|
|
key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
|
|
if (!key)
|
|
- return -1;
|
|
+ goto report;
|
|
}
|
|
|
|
/* Report status to mac80211 */
|
|
@@ -372,6 +372,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
|
|
ieee80211_key_mic_failure(key);
|
|
else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
|
|
ieee80211_key_replay(key);
|
|
+report:
|
|
+ wdev = ieee80211_vif_to_wdev(mvmsta->vif);
|
|
+ if (wdev->netdev)
|
|
+ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
|
|
|
|
return -1;
|
|
}
|
|
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
|
|
index 104d2b6dc9af6..5a525da434c28 100644
|
|
--- a/drivers/net/wireless/marvell/libertas/cmd.c
|
|
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
|
|
@@ -1132,7 +1132,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
|
|
if (!cmdarray[i].cmdbuf) {
|
|
lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
|
|
ret = -1;
|
|
- goto done;
|
|
+ goto free_cmd_array;
|
|
}
|
|
}
|
|
|
|
@@ -1140,8 +1140,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
|
|
init_waitqueue_head(&cmdarray[i].cmdwait_q);
|
|
lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]);
|
|
}
|
|
- ret = 0;
|
|
+ return 0;
|
|
|
|
+free_cmd_array:
|
|
+ for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
|
|
+ if (cmdarray[i].cmdbuf) {
|
|
+ kfree(cmdarray[i].cmdbuf);
|
|
+ cmdarray[i].cmdbuf = NULL;
|
|
+ }
|
|
+ }
|
|
+ kfree(priv->cmd_array);
|
|
+ priv->cmd_array = NULL;
|
|
done:
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
|
|
index 63f232c723374..55ca5b287fe7f 100644
|
|
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
|
|
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
|
|
@@ -964,9 +964,6 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
|
|
priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
|
|
mwifiex_dfs_dir);
|
|
|
|
- if (!priv->dfs_dev_dir)
|
|
- return;
|
|
-
|
|
MWIFIEX_DFS_ADD_FILE(info);
|
|
MWIFIEX_DFS_ADD_FILE(debug);
|
|
MWIFIEX_DFS_ADD_FILE(getlog);
|
|
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
|
|
index b545d93c6e374..6f3245a43aef1 100644
|
|
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
|
|
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
|
|
@@ -1615,7 +1615,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
|
|
cfg80211_unregister_netdevice(vif->ndev);
|
|
vif->monitor_flag = 0;
|
|
|
|
- wilc_set_operation_mode(vif, 0, 0, 0);
|
|
mutex_lock(&wl->vif_mutex);
|
|
list_del_rcu(&vif->list);
|
|
wl->vif_num--;
|
|
@@ -1810,15 +1809,24 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
|
|
INIT_LIST_HEAD(&wl->rxq_head.list);
|
|
INIT_LIST_HEAD(&wl->vif_list);
|
|
|
|
+ wl->hif_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
|
|
+ wiphy_name(wl->wiphy));
|
|
+ if (!wl->hif_workqueue) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_cfg;
|
|
+ }
|
|
vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
|
|
NL80211_IFTYPE_STATION, false);
|
|
if (IS_ERR(vif)) {
|
|
ret = PTR_ERR(vif);
|
|
- goto free_cfg;
|
|
+ goto free_hq;
|
|
}
|
|
|
|
return 0;
|
|
|
|
+free_hq:
|
|
+ destroy_workqueue(wl->hif_workqueue);
|
|
+
|
|
free_cfg:
|
|
wilc_wlan_cfg_deinit(wl);
|
|
|
|
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
|
|
index a1b75feec6edf..5eb02902e875a 100644
|
|
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
|
|
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
|
|
@@ -374,38 +374,49 @@ static void handle_connect_timeout(struct work_struct *work)
|
|
void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
|
|
struct cfg80211_crypto_settings *crypto)
|
|
{
|
|
- struct wilc_join_bss_param *param;
|
|
- struct ieee80211_p2p_noa_attr noa_attr;
|
|
- u8 rates_len = 0;
|
|
- const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
|
|
+ const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
|
|
const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
|
|
+ struct ieee80211_p2p_noa_attr noa_attr;
|
|
+ const struct cfg80211_bss_ies *ies;
|
|
+ struct wilc_join_bss_param *param;
|
|
+ u8 rates_len = 0, ies_len;
|
|
int ret;
|
|
- const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
|
|
|
|
param = kzalloc(sizeof(*param), GFP_KERNEL);
|
|
if (!param)
|
|
return NULL;
|
|
|
|
+ rcu_read_lock();
|
|
+ ies = rcu_dereference(bss->ies);
|
|
+ ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC);
|
|
+ if (!ies_data) {
|
|
+ rcu_read_unlock();
|
|
+ kfree(param);
|
|
+ return NULL;
|
|
+ }
|
|
+ ies_len = ies->len;
|
|
+ rcu_read_unlock();
|
|
+
|
|
param->beacon_period = cpu_to_le16(bss->beacon_interval);
|
|
param->cap_info = cpu_to_le16(bss->capability);
|
|
param->bss_type = WILC_FW_BSS_TYPE_INFRA;
|
|
param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
|
|
ether_addr_copy(param->bssid, bss->bssid);
|
|
|
|
- ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
|
|
+ ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len);
|
|
if (ssid_elm) {
|
|
if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
|
|
memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
|
|
}
|
|
|
|
- tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
|
|
+ tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len);
|
|
if (tim_elm && tim_elm[1] >= 2)
|
|
param->dtim_period = tim_elm[3];
|
|
|
|
memset(param->p_suites, 0xFF, 3);
|
|
memset(param->akm_suites, 0xFF, 3);
|
|
|
|
- rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
|
|
+ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len);
|
|
if (rates_ie) {
|
|
rates_len = rates_ie[1];
|
|
if (rates_len > WILC_MAX_RATES_SUPPORTED)
|
|
@@ -416,7 +427,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
|
|
|
|
if (rates_len < WILC_MAX_RATES_SUPPORTED) {
|
|
supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
|
|
- ies->data, ies->len);
|
|
+ ies_data, ies_len);
|
|
if (supp_rates_ie) {
|
|
u8 ext_rates = supp_rates_ie[1];
|
|
|
|
@@ -431,11 +442,11 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
|
|
}
|
|
}
|
|
|
|
- ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
|
|
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len);
|
|
if (ht_ie)
|
|
param->ht_capable = true;
|
|
|
|
- ret = cfg80211_get_p2p_attr(ies->data, ies->len,
|
|
+ ret = cfg80211_get_p2p_attr(ies_data, ies_len,
|
|
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
|
|
(u8 *)&noa_attr, sizeof(noa_attr));
|
|
if (ret > 0) {
|
|
@@ -459,7 +470,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
|
|
}
|
|
wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
|
|
WLAN_OUI_TYPE_MICROSOFT_WMM,
|
|
- ies->data, ies->len);
|
|
+ ies_data, ies_len);
|
|
if (wmm_ie) {
|
|
struct ieee80211_wmm_param_ie *ie;
|
|
|
|
@@ -474,13 +485,13 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
|
|
|
|
wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
|
|
WLAN_OUI_TYPE_MICROSOFT_WPA,
|
|
- ies->data, ies->len);
|
|
+ ies_data, ies_len);
|
|
if (wpa_ie) {
|
|
param->mode_802_11i = 1;
|
|
param->rsn_found = true;
|
|
}
|
|
|
|
- rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
|
|
+ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len);
|
|
if (rsn_ie) {
|
|
int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
|
|
int offset = 8;
|
|
@@ -514,6 +525,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
|
|
param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
|
|
}
|
|
|
|
+ kfree(ies_data);
|
|
return (void *)param;
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
|
|
index e9f59de31b0b9..b714da48eaa17 100644
|
|
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
|
|
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
|
|
@@ -878,8 +878,7 @@ static const struct net_device_ops wilc_netdev_ops = {
|
|
|
|
void wilc_netdev_cleanup(struct wilc *wilc)
|
|
{
|
|
- struct wilc_vif *vif;
|
|
- int srcu_idx, ifc_cnt = 0;
|
|
+ struct wilc_vif *vif, *vif_tmp;
|
|
|
|
if (!wilc)
|
|
return;
|
|
@@ -889,32 +888,19 @@ void wilc_netdev_cleanup(struct wilc *wilc)
|
|
wilc->firmware = NULL;
|
|
}
|
|
|
|
- srcu_idx = srcu_read_lock(&wilc->srcu);
|
|
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
|
|
+ list_for_each_entry_safe(vif, vif_tmp, &wilc->vif_list, list) {
|
|
+ mutex_lock(&wilc->vif_mutex);
|
|
+ list_del_rcu(&vif->list);
|
|
+ wilc->vif_num--;
|
|
+ mutex_unlock(&wilc->vif_mutex);
|
|
+ synchronize_srcu(&wilc->srcu);
|
|
if (vif->ndev)
|
|
unregister_netdev(vif->ndev);
|
|
}
|
|
- srcu_read_unlock(&wilc->srcu, srcu_idx);
|
|
|
|
wilc_wfi_deinit_mon_interface(wilc, false);
|
|
destroy_workqueue(wilc->hif_workqueue);
|
|
|
|
- while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
|
|
- mutex_lock(&wilc->vif_mutex);
|
|
- if (wilc->vif_num <= 0) {
|
|
- mutex_unlock(&wilc->vif_mutex);
|
|
- break;
|
|
- }
|
|
- vif = wilc_get_wl_to_vif(wilc);
|
|
- if (!IS_ERR(vif))
|
|
- list_del_rcu(&vif->list);
|
|
-
|
|
- wilc->vif_num--;
|
|
- mutex_unlock(&wilc->vif_mutex);
|
|
- synchronize_srcu(&wilc->srcu);
|
|
- ifc_cnt++;
|
|
- }
|
|
-
|
|
wilc_wlan_cfg_deinit(wilc);
|
|
wlan_deinit_locks(wilc);
|
|
wiphy_unregister(wilc->wiphy);
|
|
@@ -977,13 +963,6 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
|
|
goto error;
|
|
}
|
|
|
|
- wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM,
|
|
- ndev->name);
|
|
- if (!wl->hif_workqueue) {
|
|
- ret = -ENOMEM;
|
|
- goto unregister_netdev;
|
|
- }
|
|
-
|
|
ndev->needs_free_netdev = true;
|
|
vif->iftype = vif_type;
|
|
vif->idx = wilc_get_available_idx(wl);
|
|
@@ -996,12 +975,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
|
|
|
|
return vif;
|
|
|
|
-unregister_netdev:
|
|
+error:
|
|
if (rtnl_locked)
|
|
cfg80211_unregister_netdevice(ndev);
|
|
else
|
|
unregister_netdev(ndev);
|
|
- error:
|
|
free_netdev(ndev);
|
|
return ERR_PTR(ret);
|
|
}
|
|
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
|
|
index b0fc5e68feeca..5877e2c1fa0fc 100644
|
|
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
|
|
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
|
|
@@ -191,11 +191,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
|
|
/* assert ENABLE: */
|
|
gpiod_set_value(gpios->enable, 1);
|
|
mdelay(5);
|
|
- /* assert RESET: */
|
|
- gpiod_set_value(gpios->reset, 1);
|
|
- } else {
|
|
/* deassert RESET: */
|
|
gpiod_set_value(gpios->reset, 0);
|
|
+ } else {
|
|
+ /* assert RESET: */
|
|
+ gpiod_set_value(gpios->reset, 1);
|
|
/* deassert ENABLE: */
|
|
gpiod_set_value(gpios->enable, 0);
|
|
}
|
|
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
index 6dd5ec1e4d8c3..ccac47dd781d6 100644
|
|
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
@@ -6542,6 +6542,7 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
|
|
if (priv->usb_interrupts)
|
|
rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
|
|
|
|
+ cancel_work_sync(&priv->c2hcmd_work);
|
|
cancel_delayed_work_sync(&priv->ra_watchdog);
|
|
|
|
rtl8xxxu_free_rx_resources(priv);
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
|
|
index 4c8164db4a9e4..81f3112923f1c 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/main.c
|
|
+++ b/drivers/net/wireless/realtek/rtw88/main.c
|
|
@@ -1989,8 +1989,6 @@ static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
|
|
rtw_phy_setup_phy_cond(rtwdev, 0);
|
|
|
|
rtw_phy_init_tx_power(rtwdev);
|
|
- if (rfe_def->agc_btg_tbl)
|
|
- rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
|
|
rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
|
|
rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
|
|
rtw_phy_tx_power_by_rate_config(hal);
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
|
|
index bd7d05e080848..fde7b532bc07e 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/phy.c
|
|
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
|
|
@@ -1761,12 +1761,15 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
|
|
|
|
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
|
|
{
|
|
+ const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
|
|
const struct rtw_chip_info *chip = rtwdev->chip;
|
|
u8 rf_path;
|
|
|
|
rtw_load_table(rtwdev, chip->mac_tbl);
|
|
rtw_load_table(rtwdev, chip->bb_tbl);
|
|
rtw_load_table(rtwdev, chip->agc_tbl);
|
|
+ if (rfe_def->agc_btg_tbl)
|
|
+ rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
|
|
rtw_load_rfk_table(rtwdev);
|
|
|
|
for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
|
|
index 609a2b86330d8..50e3e46f7d8aa 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
|
|
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
|
|
@@ -674,9 +674,9 @@ static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev)
|
|
|
|
dm_info->cck_fa_cnt = cck_fa_cnt;
|
|
dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
|
|
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
|
|
if (cck_enable)
|
|
dm_info->total_fa_cnt += cck_fa_cnt;
|
|
- dm_info->total_fa_cnt = ofdm_fa_cnt;
|
|
|
|
crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK);
|
|
dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
|
|
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
|
|
index 073e870b26415..871667650dbef 100644
|
|
--- a/drivers/net/wireless/silabs/wfx/sta.c
|
|
+++ b/drivers/net/wireless/silabs/wfx/sta.c
|
|
@@ -362,6 +362,7 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
|
|
const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
|
|
const int pairwise_cipher_suite_size = 4 / sizeof(u16);
|
|
const int akm_suite_size = 4 / sizeof(u16);
|
|
+ int ret = -EINVAL;
|
|
const u16 *ptr;
|
|
|
|
if (unlikely(!skb))
|
|
@@ -370,22 +371,26 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
|
|
ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
|
|
skb->len - ieoffset);
|
|
if (unlikely(!ptr))
|
|
- return -EINVAL;
|
|
+ goto free_skb;
|
|
|
|
ptr += pairwise_cipher_suite_count_offset;
|
|
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
|
|
- return -EINVAL;
|
|
+ goto free_skb;
|
|
|
|
ptr += 1 + pairwise_cipher_suite_size * *ptr;
|
|
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
|
|
- return -EINVAL;
|
|
+ goto free_skb;
|
|
|
|
ptr += 1 + akm_suite_size * *ptr;
|
|
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
|
|
- return -EINVAL;
|
|
+ goto free_skb;
|
|
|
|
wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
|
|
- return 0;
|
|
+ ret = 0;
|
|
+
|
|
+free_skb:
|
|
+ dev_kfree_skb(skb);
|
|
+ return ret;
|
|
}
|
|
|
|
int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|
diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c
|
|
index 27dd93deff6e5..d702bee780826 100644
|
|
--- a/drivers/ntb/core.c
|
|
+++ b/drivers/ntb/core.c
|
|
@@ -100,6 +100,8 @@ EXPORT_SYMBOL(ntb_unregister_client);
|
|
|
|
int ntb_register_device(struct ntb_dev *ntb)
|
|
{
|
|
+ int ret;
|
|
+
|
|
if (!ntb)
|
|
return -EINVAL;
|
|
if (!ntb->pdev)
|
|
@@ -120,7 +122,11 @@ int ntb_register_device(struct ntb_dev *ntb)
|
|
ntb->ctx_ops = NULL;
|
|
spin_lock_init(&ntb->ctx_lock);
|
|
|
|
- return device_register(&ntb->dev);
|
|
+ ret = device_register(&ntb->dev);
|
|
+ if (ret)
|
|
+ put_device(&ntb->dev);
|
|
+
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL(ntb_register_device);
|
|
|
|
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
|
|
index 0c088db944706..d7516e99275b6 100644
|
|
--- a/drivers/nvme/host/core.c
|
|
+++ b/drivers/nvme/host/core.c
|
|
@@ -4971,7 +4971,8 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
|
|
set->ops = ops;
|
|
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
|
|
if (ctrl->ops->flags & NVME_F_FABRICS)
|
|
- set->reserved_tags = NVMF_RESERVED_TAGS;
|
|
+ /* Reserved for fabric connect and keep alive */
|
|
+ set->reserved_tags = 2;
|
|
set->numa_node = ctrl->numa_node;
|
|
set->flags = BLK_MQ_F_NO_SCHED;
|
|
if (ctrl->ops->flags & NVME_F_BLOCKING)
|
|
@@ -5029,7 +5030,15 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
|
|
memset(set, 0, sizeof(*set));
|
|
set->ops = ops;
|
|
set->queue_depth = ctrl->sqsize + 1;
|
|
- set->reserved_tags = NVMF_RESERVED_TAGS;
|
|
+ /*
|
|
+ * Some Apple controllers requires tags to be unique across admin and
|
|
+ * the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
|
|
+ */
|
|
+ if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
|
|
+ set->reserved_tags = NVME_AQ_DEPTH;
|
|
+ else if (ctrl->ops->flags & NVME_F_FABRICS)
|
|
+ /* Reserved for fabric connect */
|
|
+ set->reserved_tags = 1;
|
|
set->numa_node = ctrl->numa_node;
|
|
set->flags = BLK_MQ_F_SHOULD_MERGE;
|
|
if (ctrl->ops->flags & NVME_F_BLOCKING)
|
|
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
|
|
index dcac3df8a5f76..60c238caf7a97 100644
|
|
--- a/drivers/nvme/host/fabrics.h
|
|
+++ b/drivers/nvme/host/fabrics.h
|
|
@@ -18,13 +18,6 @@
|
|
/* default is -1: the fail fast mechanism is disabled */
|
|
#define NVMF_DEF_FAIL_FAST_TMO -1
|
|
|
|
-/*
|
|
- * Reserved one command for internal usage. This command is used for sending
|
|
- * the connect command, as well as for the keep alive command on the admin
|
|
- * queue once live.
|
|
- */
|
|
-#define NVMF_RESERVED_TAGS 1
|
|
-
|
|
/*
|
|
* Define a host as seen by the target. We allocate one at boot, but also
|
|
* allow the override it when creating controllers. This is both to provide
|
|
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
|
|
index 2c7fb683441ef..de81bbf4be100 100644
|
|
--- a/drivers/opp/debugfs.c
|
|
+++ b/drivers/opp/debugfs.c
|
|
@@ -37,10 +37,12 @@ static ssize_t bw_name_read(struct file *fp, char __user *userbuf,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
struct icc_path *path = fp->private_data;
|
|
+ const char *name = icc_get_name(path);
|
|
char buf[64];
|
|
- int i;
|
|
+ int i = 0;
|
|
|
|
- i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path));
|
|
+ if (name)
|
|
+ i = scnprintf(buf, sizeof(buf), "%.62s\n", name);
|
|
|
|
return simple_read_from_buffer(userbuf, count, ppos, buf, i);
|
|
}
|
|
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
|
|
index 8c6931210ac4d..b4c1a4f6029d4 100644
|
|
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
|
|
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
|
|
@@ -1281,14 +1281,11 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
ret = ntb_register_device(&ndev->ntb);
|
|
if (ret) {
|
|
dev_err(dev, "Failed to register NTB device\n");
|
|
- goto err_register_dev;
|
|
+ return ret;
|
|
}
|
|
|
|
dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
|
|
return 0;
|
|
-
|
|
-err_register_dev:
|
|
- return -EINVAL;
|
|
}
|
|
|
|
static struct pci_device_id pci_vntb_table[] = {
|
|
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
|
|
index e1d02b7c60294..9950deeb047a7 100644
|
|
--- a/drivers/pci/pci.h
|
|
+++ b/drivers/pci/pci.h
|
|
@@ -357,11 +357,6 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
|
|
return 0;
|
|
}
|
|
|
|
-static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
|
|
-{
|
|
- return dev->error_state == pci_channel_io_perm_failure;
|
|
-}
|
|
-
|
|
/* pci_dev priv_flags */
|
|
#define PCI_DEV_ADDED 0
|
|
#define PCI_DPC_RECOVERED 1
|
|
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
|
|
index a5d7c69b764e0..08800282825e1 100644
|
|
--- a/drivers/pci/pcie/dpc.c
|
|
+++ b/drivers/pci/pcie/dpc.c
|
|
@@ -231,7 +231,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
|
|
|
|
for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
|
|
pci_read_config_dword(pdev,
|
|
- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
|
|
+ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
|
|
pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
|
|
}
|
|
clear_status:
|
|
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
|
|
index 51d634fbdfb8e..c175b70a984c6 100644
|
|
--- a/drivers/pci/quirks.c
|
|
+++ b/drivers/pci/quirks.c
|
|
@@ -5415,6 +5415,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
|
|
|
|
pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
|
|
}
|
|
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags);
|
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
|
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
|
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
|
|
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
|
|
index 3f3320d0a4f8f..d05a482639e3c 100644
|
|
--- a/drivers/pci/switch/switchtec.c
|
|
+++ b/drivers/pci/switch/switchtec.c
|
|
@@ -1674,7 +1674,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
|
|
rc = switchtec_init_isr(stdev);
|
|
if (rc) {
|
|
dev_err(&stdev->dev, "failed to init isr.\n");
|
|
- goto err_put;
|
|
+ goto err_exit_pci;
|
|
}
|
|
|
|
iowrite32(SWITCHTEC_EVENT_CLEAR |
|
|
@@ -1695,6 +1695,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
|
|
|
|
err_devadd:
|
|
stdev_kill(stdev);
|
|
+err_exit_pci:
|
|
+ switchtec_exit_pci(stdev);
|
|
err_put:
|
|
ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
|
|
put_device(&stdev->dev);
|
|
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
|
|
index 47e7c3206939f..899e4ed49905c 100644
|
|
--- a/drivers/perf/arm-cmn.c
|
|
+++ b/drivers/perf/arm-cmn.c
|
|
@@ -2178,6 +2178,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
|
|
dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
|
|
continue;
|
|
}
|
|
+ /*
|
|
+ * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus
|
|
+ * child count larger than the number of valid child pointers.
|
|
+ * A child offset of 0 can only occur on CMN-600; otherwise it
|
|
+ * would imply the root node being its own grandchild, which
|
|
+ * we can safely dismiss in general.
|
|
+ */
|
|
+ if (reg == 0 && cmn->part != PART_CMN600) {
|
|
+ dev_dbg(cmn->dev, "bogus child pointer?\n");
|
|
+ continue;
|
|
+ }
|
|
|
|
arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
|
|
|
|
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8186.c b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
|
|
index a02f7c3269707..09edcf47effec 100644
|
|
--- a/drivers/pinctrl/mediatek/pinctrl-mt8186.c
|
|
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
|
|
@@ -1198,7 +1198,6 @@ static const struct mtk_pin_reg_calc mt8186_reg_cals[PINCTRL_PIN_REG_MAX] = {
|
|
[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8186_pin_dir_range),
|
|
[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8186_pin_di_range),
|
|
[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8186_pin_do_range),
|
|
- [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8186_pin_dir_range),
|
|
[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8186_pin_smt_range),
|
|
[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8186_pin_ies_range),
|
|
[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8186_pin_pu_range),
|
|
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
|
|
index 9695f4ec6aba9..f120268c00f56 100644
|
|
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
|
|
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
|
|
@@ -1379,7 +1379,6 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
|
|
[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8192_pin_dir_range),
|
|
[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8192_pin_di_range),
|
|
[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8192_pin_do_range),
|
|
- [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8192_pin_dir_range),
|
|
[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8192_pin_smt_range),
|
|
[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8192_pin_ies_range),
|
|
[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8192_pin_pu_range),
|
|
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
|
|
index f7d02513d8cc1..e79037dc85796 100644
|
|
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
|
|
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
|
|
@@ -1571,8 +1571,10 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
|
|
* Then mask the pins that need to be sleeping now when we're
|
|
* switching to the ALT C function.
|
|
*/
|
|
- for (i = 0; i < g->grp.npins; i++)
|
|
- slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->grp.pins[i]);
|
|
+ for (i = 0; i < g->grp.npins; i++) {
|
|
+ unsigned int bit = g->grp.pins[i] % NMK_GPIO_PER_CHIP;
|
|
+ slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(bit);
|
|
+ }
|
|
nmk_gpio_glitch_slpm_init(slpm);
|
|
}
|
|
|
|
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
|
|
index 43a63a21a6fb5..acf7664ea835b 100644
|
|
--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
|
|
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
|
|
@@ -2360,6 +2360,30 @@ static const unsigned int scif_clk_mux[] = {
|
|
SCIF_CLK_MARK,
|
|
};
|
|
|
|
+static const unsigned int scif_clk2_pins[] = {
|
|
+ /* SCIF_CLK2 */
|
|
+ RCAR_GP_PIN(8, 11),
|
|
+};
|
|
+static const unsigned int scif_clk2_mux[] = {
|
|
+ SCIF_CLK2_MARK,
|
|
+};
|
|
+
|
|
+/* - SSI ------------------------------------------------- */
|
|
+static const unsigned int ssi_data_pins[] = {
|
|
+ /* SSI_SD */
|
|
+ RCAR_GP_PIN(1, 20),
|
|
+};
|
|
+static const unsigned int ssi_data_mux[] = {
|
|
+ SSI_SD_MARK,
|
|
+};
|
|
+static const unsigned int ssi_ctrl_pins[] = {
|
|
+ /* SSI_SCK, SSI_WS */
|
|
+ RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 19),
|
|
+};
|
|
+static const unsigned int ssi_ctrl_mux[] = {
|
|
+ SSI_SCK_MARK, SSI_WS_MARK,
|
|
+};
|
|
+
|
|
/* - TPU ------------------------------------------------------------------- */
|
|
static const unsigned int tpu_to0_pins[] = {
|
|
/* TPU0TO0 */
|
|
@@ -2651,6 +2675,10 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
|
|
SH_PFC_PIN_GROUP(scif4_clk),
|
|
SH_PFC_PIN_GROUP(scif4_ctrl),
|
|
SH_PFC_PIN_GROUP(scif_clk),
|
|
+ SH_PFC_PIN_GROUP(scif_clk2),
|
|
+
|
|
+ SH_PFC_PIN_GROUP(ssi_data),
|
|
+ SH_PFC_PIN_GROUP(ssi_ctrl),
|
|
|
|
SH_PFC_PIN_GROUP(tpu_to0), /* suffix might be updated */
|
|
SH_PFC_PIN_GROUP(tpu_to0_a), /* suffix might be updated */
|
|
@@ -2964,6 +2992,15 @@ static const char * const scif_clk_groups[] = {
|
|
"scif_clk",
|
|
};
|
|
|
|
+static const char * const scif_clk2_groups[] = {
|
|
+ "scif_clk2",
|
|
+};
|
|
+
|
|
+static const char * const ssi_groups[] = {
|
|
+ "ssi_data",
|
|
+ "ssi_ctrl",
|
|
+};
|
|
+
|
|
static const char * const tpu_groups[] = {
|
|
/* suffix might be updated */
|
|
"tpu_to0",
|
|
@@ -3044,6 +3081,9 @@ static const struct sh_pfc_function pinmux_functions[] = {
|
|
SH_PFC_FUNCTION(scif3),
|
|
SH_PFC_FUNCTION(scif4),
|
|
SH_PFC_FUNCTION(scif_clk),
|
|
+ SH_PFC_FUNCTION(scif_clk2),
|
|
+
|
|
+ SH_PFC_FUNCTION(ssi),
|
|
|
|
SH_PFC_FUNCTION(tpu),
|
|
|
|
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
|
|
index 9193c3b8edebe..ae7ee611978ba 100644
|
|
--- a/drivers/powercap/dtpm_cpu.c
|
|
+++ b/drivers/powercap/dtpm_cpu.c
|
|
@@ -219,7 +219,7 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
|
|
ret = freq_qos_add_request(&policy->constraints,
|
|
&dtpm_cpu->qos_req, FREQ_QOS_MAX,
|
|
pd->table[pd->nr_perf_states - 1].frequency);
|
|
- if (ret)
|
|
+ if (ret < 0)
|
|
goto out_dtpm_unregister;
|
|
|
|
cpufreq_cpu_put(policy);
|
|
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
|
|
index a43b2babc8093..3e9c94a8d7f72 100644
|
|
--- a/drivers/pwm/pwm-atmel-hlcdc.c
|
|
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
|
|
@@ -38,11 +38,11 @@ static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip)
|
|
return container_of(chip, struct atmel_hlcdc_pwm, chip);
|
|
}
|
|
|
|
-static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
|
|
+static int atmel_hlcdc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
|
|
const struct pwm_state *state)
|
|
{
|
|
- struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
|
|
- struct atmel_hlcdc *hlcdc = chip->hlcdc;
|
|
+ struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip);
|
|
+ struct atmel_hlcdc *hlcdc = atmel->hlcdc;
|
|
unsigned int status;
|
|
int ret;
|
|
|
|
@@ -54,7 +54,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
|
|
u32 pwmcfg;
|
|
int pres;
|
|
|
|
- if (!chip->errata || !chip->errata->slow_clk_erratum) {
|
|
+ if (!atmel->errata || !atmel->errata->slow_clk_erratum) {
|
|
clk_freq = clk_get_rate(new_clk);
|
|
if (!clk_freq)
|
|
return -EINVAL;
|
|
@@ -64,7 +64,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
|
|
}
|
|
|
|
/* Errata: cannot use slow clk on some IP revisions */
|
|
- if ((chip->errata && chip->errata->slow_clk_erratum) ||
|
|
+ if ((atmel->errata && atmel->errata->slow_clk_erratum) ||
|
|
clk_period_ns > state->period) {
|
|
new_clk = hlcdc->sys_clk;
|
|
clk_freq = clk_get_rate(new_clk);
|
|
@@ -77,8 +77,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
|
|
|
|
for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) {
|
|
/* Errata: cannot divide by 1 on some IP revisions */
|
|
- if (!pres && chip->errata &&
|
|
- chip->errata->div1_clk_erratum)
|
|
+ if (!pres && atmel->errata &&
|
|
+ atmel->errata->div1_clk_erratum)
|
|
continue;
|
|
|
|
if ((clk_period_ns << pres) >= state->period)
|
|
@@ -90,7 +90,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
|
|
|
|
pwmcfg = ATMEL_HLCDC_PWMPS(pres);
|
|
|
|
- if (new_clk != chip->cur_clk) {
|
|
+ if (new_clk != atmel->cur_clk) {
|
|
u32 gencfg = 0;
|
|
int ret;
|
|
|
|
@@ -98,8 +98,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
|
|
if (ret)
|
|
return ret;
|
|
|
|
- clk_disable_unprepare(chip->cur_clk);
|
|
- chip->cur_clk = new_clk;
|
|
+ clk_disable_unprepare(atmel->cur_clk);
|
|
+ atmel->cur_clk = new_clk;
|
|
|
|
if (new_clk == hlcdc->sys_clk)
|
|
gencfg = ATMEL_HLCDC_CLKPWMSEL;
|
|
@@ -160,8 +160,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
|
|
if (ret)
|
|
return ret;
|
|
|
|
- clk_disable_unprepare(chip->cur_clk);
|
|
- chip->cur_clk = NULL;
|
|
+ clk_disable_unprepare(atmel->cur_clk);
|
|
+ atmel->cur_clk = NULL;
|
|
}
|
|
|
|
return 0;
|
|
@@ -183,31 +183,32 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
|
|
#ifdef CONFIG_PM_SLEEP
|
|
static int atmel_hlcdc_pwm_suspend(struct device *dev)
|
|
{
|
|
- struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev);
|
|
+ struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
|
|
|
|
/* Keep the periph clock enabled if the PWM is still running. */
|
|
- if (pwm_is_enabled(&chip->chip.pwms[0]))
|
|
- clk_disable_unprepare(chip->hlcdc->periph_clk);
|
|
+ if (!pwm_is_enabled(&atmel->chip.pwms[0]))
|
|
+ clk_disable_unprepare(atmel->hlcdc->periph_clk);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int atmel_hlcdc_pwm_resume(struct device *dev)
|
|
{
|
|
- struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev);
|
|
+ struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
|
|
struct pwm_state state;
|
|
int ret;
|
|
|
|
- pwm_get_state(&chip->chip.pwms[0], &state);
|
|
+ pwm_get_state(&atmel->chip.pwms[0], &state);
|
|
|
|
/* Re-enable the periph clock it was stopped during suspend. */
|
|
if (!state.enabled) {
|
|
- ret = clk_prepare_enable(chip->hlcdc->periph_clk);
|
|
+ ret = clk_prepare_enable(atmel->hlcdc->periph_clk);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
- return atmel_hlcdc_pwm_apply(&chip->chip, &chip->chip.pwms[0], &state);
|
|
+ return atmel_hlcdc_pwm_apply(&atmel->chip, &atmel->chip.pwms[0],
|
|
+ &state);
|
|
}
|
|
#endif
|
|
|
|
@@ -244,14 +245,14 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
|
|
{
|
|
const struct of_device_id *match;
|
|
struct device *dev = &pdev->dev;
|
|
- struct atmel_hlcdc_pwm *chip;
|
|
+ struct atmel_hlcdc_pwm *atmel;
|
|
struct atmel_hlcdc *hlcdc;
|
|
int ret;
|
|
|
|
hlcdc = dev_get_drvdata(dev->parent);
|
|
|
|
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
|
|
- if (!chip)
|
|
+ atmel = devm_kzalloc(dev, sizeof(*atmel), GFP_KERNEL);
|
|
+ if (!atmel)
|
|
return -ENOMEM;
|
|
|
|
ret = clk_prepare_enable(hlcdc->periph_clk);
|
|
@@ -260,33 +261,31 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
|
|
|
|
match = of_match_node(atmel_hlcdc_dt_ids, dev->parent->of_node);
|
|
if (match)
|
|
- chip->errata = match->data;
|
|
+ atmel->errata = match->data;
|
|
|
|
- chip->hlcdc = hlcdc;
|
|
- chip->chip.ops = &atmel_hlcdc_pwm_ops;
|
|
- chip->chip.dev = dev;
|
|
- chip->chip.npwm = 1;
|
|
+ atmel->hlcdc = hlcdc;
|
|
+ atmel->chip.ops = &atmel_hlcdc_pwm_ops;
|
|
+ atmel->chip.dev = dev;
|
|
+ atmel->chip.npwm = 1;
|
|
|
|
- ret = pwmchip_add(&chip->chip);
|
|
+ ret = pwmchip_add(&atmel->chip);
|
|
if (ret) {
|
|
clk_disable_unprepare(hlcdc->periph_clk);
|
|
return ret;
|
|
}
|
|
|
|
- platform_set_drvdata(pdev, chip);
|
|
+ platform_set_drvdata(pdev, atmel);
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static int atmel_hlcdc_pwm_remove(struct platform_device *pdev)
|
|
+static void atmel_hlcdc_pwm_remove(struct platform_device *pdev)
|
|
{
|
|
- struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev);
|
|
+ struct atmel_hlcdc_pwm *atmel = platform_get_drvdata(pdev);
|
|
|
|
- pwmchip_remove(&chip->chip);
|
|
+ pwmchip_remove(&atmel->chip);
|
|
|
|
- clk_disable_unprepare(chip->hlcdc->periph_clk);
|
|
-
|
|
- return 0;
|
|
+ clk_disable_unprepare(atmel->hlcdc->periph_clk);
|
|
}
|
|
|
|
static const struct of_device_id atmel_hlcdc_pwm_dt_ids[] = {
|
|
@@ -301,7 +300,7 @@ static struct platform_driver atmel_hlcdc_pwm_driver = {
|
|
.pm = &atmel_hlcdc_pwm_pm_ops,
|
|
},
|
|
.probe = atmel_hlcdc_pwm_probe,
|
|
- .remove = atmel_hlcdc_pwm_remove,
|
|
+ .remove_new = atmel_hlcdc_pwm_remove,
|
|
};
|
|
module_platform_driver(atmel_hlcdc_pwm_driver);
|
|
|
|
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
|
|
index 652fdb8dc7bfa..0a7920cbd4949 100644
|
|
--- a/drivers/pwm/pwm-sti.c
|
|
+++ b/drivers/pwm/pwm-sti.c
|
|
@@ -395,8 +395,17 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
|
|
static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
|
|
const struct pwm_state *state)
|
|
{
|
|
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
|
|
+ struct sti_pwm_compat_data *cdata = pc->cdata;
|
|
+ struct device *dev = pc->dev;
|
|
int err;
|
|
|
|
+ if (pwm->hwpwm >= cdata->pwm_num_devs) {
|
|
+ dev_err(dev, "device %u is not valid for pwm mode\n",
|
|
+ pwm->hwpwm);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
if (state->polarity != PWM_POLARITY_NORMAL)
|
|
return -EINVAL;
|
|
|
|
@@ -647,7 +656,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
|
|
|
|
pc->chip.dev = dev;
|
|
pc->chip.ops = &sti_pwm_ops;
|
|
- pc->chip.npwm = pc->cdata->pwm_num_devs;
|
|
+ pc->chip.npwm = max(cdata->pwm_num_devs, cdata->cpt_num_devs);
|
|
|
|
for (i = 0; i < cdata->cpt_num_devs; i++) {
|
|
struct sti_cpt_ddata *ddata = &cdata->ddata[i];
|
|
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
|
|
index 1660197866531..d93113b6ffaa1 100644
|
|
--- a/drivers/remoteproc/Kconfig
|
|
+++ b/drivers/remoteproc/Kconfig
|
|
@@ -313,7 +313,7 @@ config ST_SLIM_REMOTEPROC
|
|
|
|
config STM32_RPROC
|
|
tristate "STM32 remoteproc support"
|
|
- depends on ARCH_STM32
|
|
+ depends on ARCH_STM32 || COMPILE_TEST
|
|
depends on REMOTEPROC
|
|
select MAILBOX
|
|
help
|
|
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
|
|
index 8746cbb1f168d..74da0393172c5 100644
|
|
--- a/drivers/remoteproc/stm32_rproc.c
|
|
+++ b/drivers/remoteproc/stm32_rproc.c
|
|
@@ -118,10 +118,10 @@ static int stm32_rproc_mem_alloc(struct rproc *rproc,
|
|
struct device *dev = rproc->dev.parent;
|
|
void *va;
|
|
|
|
- dev_dbg(dev, "map memory: %pa+%x\n", &mem->dma, mem->len);
|
|
- va = ioremap_wc(mem->dma, mem->len);
|
|
+ dev_dbg(dev, "map memory: %pad+%zx\n", &mem->dma, mem->len);
|
|
+ va = (__force void *)ioremap_wc(mem->dma, mem->len);
|
|
if (IS_ERR_OR_NULL(va)) {
|
|
- dev_err(dev, "Unable to map memory region: %pa+%x\n",
|
|
+ dev_err(dev, "Unable to map memory region: %pad+0x%zx\n",
|
|
&mem->dma, mem->len);
|
|
return -ENOMEM;
|
|
}
|
|
@@ -136,7 +136,7 @@ static int stm32_rproc_mem_release(struct rproc *rproc,
|
|
struct rproc_mem_entry *mem)
|
|
{
|
|
dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
|
|
- iounmap(mem->va);
|
|
+ iounmap((__force __iomem void *)mem->va);
|
|
|
|
return 0;
|
|
}
|
|
@@ -627,7 +627,7 @@ stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
|
|
|
|
ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
|
|
if (IS_ERR_OR_NULL(ddata->rsc_va)) {
|
|
- dev_err(dev, "Unable to map memory region: %pa+%zx\n",
|
|
+ dev_err(dev, "Unable to map memory region: %pa+%x\n",
|
|
&rsc_pa, RSC_TBL_SIZE);
|
|
ddata->rsc_va = NULL;
|
|
return ERR_PTR(-ENOMEM);
|
|
@@ -641,7 +641,7 @@ stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
|
|
* entire area by overwriting it with the initial values stored in rproc->clean_table.
|
|
*/
|
|
*table_sz = RSC_TBL_SIZE;
|
|
- return (struct resource_table *)ddata->rsc_va;
|
|
+ return (__force struct resource_table *)ddata->rsc_va;
|
|
}
|
|
|
|
static const struct rproc_ops st_rproc_ops = {
|
|
@@ -889,7 +889,7 @@ static int stm32_rproc_remove(struct platform_device *pdev)
|
|
return 0;
|
|
}
|
|
|
|
-static int __maybe_unused stm32_rproc_suspend(struct device *dev)
|
|
+static int stm32_rproc_suspend(struct device *dev)
|
|
{
|
|
struct rproc *rproc = dev_get_drvdata(dev);
|
|
struct stm32_rproc *ddata = rproc->priv;
|
|
@@ -900,7 +900,7 @@ static int __maybe_unused stm32_rproc_suspend(struct device *dev)
|
|
return 0;
|
|
}
|
|
|
|
-static int __maybe_unused stm32_rproc_resume(struct device *dev)
|
|
+static int stm32_rproc_resume(struct device *dev)
|
|
{
|
|
struct rproc *rproc = dev_get_drvdata(dev);
|
|
struct stm32_rproc *ddata = rproc->priv;
|
|
@@ -911,16 +911,16 @@ static int __maybe_unused stm32_rproc_resume(struct device *dev)
|
|
return 0;
|
|
}
|
|
|
|
-static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
|
|
- stm32_rproc_suspend, stm32_rproc_resume);
|
|
+static DEFINE_SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
|
|
+ stm32_rproc_suspend, stm32_rproc_resume);
|
|
|
|
static struct platform_driver stm32_rproc_driver = {
|
|
.probe = stm32_rproc_probe,
|
|
.remove = stm32_rproc_remove,
|
|
.driver = {
|
|
.name = "stm32-rproc",
|
|
- .pm = &stm32_rproc_pm_ops,
|
|
- .of_match_table = of_match_ptr(stm32_rproc_match),
|
|
+ .pm = pm_ptr(&stm32_rproc_pm_ops),
|
|
+ .of_match_table = stm32_rproc_match,
|
|
},
|
|
};
|
|
module_platform_driver(stm32_rproc_driver);
|
|
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
|
|
index bb63edb507da4..87dc050ca004c 100644
|
|
--- a/drivers/rtc/Kconfig
|
|
+++ b/drivers/rtc/Kconfig
|
|
@@ -1843,7 +1843,8 @@ config RTC_DRV_MT2712
|
|
|
|
config RTC_DRV_MT6397
|
|
tristate "MediaTek PMIC based RTC"
|
|
- depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
|
|
+ depends on MFD_MT6397 || COMPILE_TEST
|
|
+ select IRQ_DOMAIN
|
|
help
|
|
This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
|
|
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
|
|
diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
|
|
index d5caf36c56cdc..225c859d6da55 100644
|
|
--- a/drivers/rtc/lib_test.c
|
|
+++ b/drivers/rtc/lib_test.c
|
|
@@ -54,7 +54,7 @@ static void rtc_time64_to_tm_test_date_range(struct kunit *test)
|
|
|
|
days = div_s64(secs, 86400);
|
|
|
|
- #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \
|
|
+ #define FAIL_MSG "%d/%02d/%02d (%2d) : %lld", \
|
|
year, month, mday, yday, days
|
|
|
|
KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
|
|
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
|
|
index f207de4a87a0f..341d65acd715d 100644
|
|
--- a/drivers/s390/block/dasd.c
|
|
+++ b/drivers/s390/block/dasd.c
|
|
@@ -8,9 +8,6 @@
|
|
* Copyright IBM Corp. 1999, 2009
|
|
*/
|
|
|
|
-#define KMSG_COMPONENT "dasd"
|
|
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
-
|
|
#include <linux/kmod.h>
|
|
#include <linux/init.h>
|
|
#include <linux/interrupt.h>
|
|
@@ -73,7 +70,8 @@ static void dasd_profile_init(struct dasd_profile *, struct dentry *);
|
|
static void dasd_profile_exit(struct dasd_profile *);
|
|
static void dasd_hosts_init(struct dentry *, struct dasd_device *);
|
|
static void dasd_hosts_exit(struct dasd_device *);
|
|
-
|
|
+static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *,
|
|
+ unsigned int);
|
|
/*
|
|
* SECTION: Operations on the device structure.
|
|
*/
|
|
@@ -2327,7 +2325,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
|
|
/* Non-temporary stop condition will trigger fail fast */
|
|
if (device->stopped & ~DASD_STOPPED_PENDING &&
|
|
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
|
|
- (!dasd_eer_enabled(device))) {
|
|
+ !dasd_eer_enabled(device) && device->aq_mask == 0) {
|
|
cqr->status = DASD_CQR_FAILED;
|
|
cqr->intrc = -ENOLINK;
|
|
continue;
|
|
@@ -2803,20 +2801,18 @@ static void __dasd_process_block_ccw_queue(struct dasd_block *block,
|
|
dasd_log_sense(cqr, &cqr->irb);
|
|
}
|
|
|
|
- /* First of all call extended error reporting. */
|
|
- if (dasd_eer_enabled(base) &&
|
|
- cqr->status == DASD_CQR_FAILED) {
|
|
- dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
|
|
-
|
|
- /* restart request */
|
|
+ /*
|
|
+ * First call extended error reporting and check for autoquiesce
|
|
+ */
|
|
+ spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
|
|
+ if (cqr->status == DASD_CQR_FAILED &&
|
|
+ dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) {
|
|
cqr->status = DASD_CQR_FILLED;
|
|
cqr->retries = 255;
|
|
- spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
|
|
- dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
|
|
- spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
|
|
- flags);
|
|
+ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
|
|
goto restart;
|
|
}
|
|
+ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
|
|
|
|
/* Process finished ERP request. */
|
|
if (cqr->refers) {
|
|
@@ -2858,7 +2854,7 @@ static void __dasd_block_start_head(struct dasd_block *block)
|
|
/* Non-temporary stop condition will trigger fail fast */
|
|
if (block->base->stopped & ~DASD_STOPPED_PENDING &&
|
|
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
|
|
- (!dasd_eer_enabled(block->base))) {
|
|
+ !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) {
|
|
cqr->status = DASD_CQR_FAILED;
|
|
cqr->intrc = -ENOLINK;
|
|
dasd_schedule_block_bh(block);
|
|
@@ -3391,8 +3387,7 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
|
|
|
|
ret = ccw_device_set_online(cdev);
|
|
if (ret)
|
|
- pr_warn("%s: Setting the DASD online failed with rc=%d\n",
|
|
- dev_name(&cdev->dev), ret);
|
|
+ dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret);
|
|
}
|
|
|
|
/*
|
|
@@ -3479,8 +3474,11 @@ int dasd_generic_set_online(struct ccw_device *cdev,
|
|
{
|
|
struct dasd_discipline *discipline;
|
|
struct dasd_device *device;
|
|
+ struct device *dev;
|
|
int rc;
|
|
|
|
+ dev = &cdev->dev;
|
|
+
|
|
/* first online clears initial online feature flag */
|
|
dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
|
|
device = dasd_create_device(cdev);
|
|
@@ -3493,11 +3491,10 @@ int dasd_generic_set_online(struct ccw_device *cdev,
|
|
/* Try to load the required module. */
|
|
rc = request_module(DASD_DIAG_MOD);
|
|
if (rc) {
|
|
- pr_warn("%s Setting the DASD online failed "
|
|
- "because the required module %s "
|
|
- "could not be loaded (rc=%d)\n",
|
|
- dev_name(&cdev->dev), DASD_DIAG_MOD,
|
|
- rc);
|
|
+ dev_warn(dev, "Setting the DASD online failed "
|
|
+ "because the required module %s "
|
|
+ "could not be loaded (rc=%d)\n",
|
|
+ DASD_DIAG_MOD, rc);
|
|
dasd_delete_device(device);
|
|
return -ENODEV;
|
|
}
|
|
@@ -3505,8 +3502,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
|
|
/* Module init could have failed, so check again here after
|
|
* request_module(). */
|
|
if (!dasd_diag_discipline_pointer) {
|
|
- pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
|
|
- dev_name(&cdev->dev));
|
|
+ dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n");
|
|
dasd_delete_device(device);
|
|
return -ENODEV;
|
|
}
|
|
@@ -3516,37 +3512,33 @@ int dasd_generic_set_online(struct ccw_device *cdev,
|
|
dasd_delete_device(device);
|
|
return -EINVAL;
|
|
}
|
|
+ device->base_discipline = base_discipline;
|
|
if (!try_module_get(discipline->owner)) {
|
|
- module_put(base_discipline->owner);
|
|
dasd_delete_device(device);
|
|
return -EINVAL;
|
|
}
|
|
- device->base_discipline = base_discipline;
|
|
device->discipline = discipline;
|
|
|
|
/* check_device will allocate block device if necessary */
|
|
rc = discipline->check_device(device);
|
|
if (rc) {
|
|
- pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
|
|
- dev_name(&cdev->dev), discipline->name, rc);
|
|
- module_put(discipline->owner);
|
|
- module_put(base_discipline->owner);
|
|
+ dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n",
|
|
+ discipline->name, rc);
|
|
dasd_delete_device(device);
|
|
return rc;
|
|
}
|
|
|
|
dasd_set_target_state(device, DASD_STATE_ONLINE);
|
|
if (device->state <= DASD_STATE_KNOWN) {
|
|
- pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
|
|
- dev_name(&cdev->dev));
|
|
+ dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n");
|
|
rc = -ENODEV;
|
|
dasd_set_target_state(device, DASD_STATE_NEW);
|
|
if (device->block)
|
|
dasd_free_block(device->block);
|
|
dasd_delete_device(device);
|
|
- } else
|
|
- pr_debug("dasd_generic device %s found\n",
|
|
- dev_name(&cdev->dev));
|
|
+ } else {
|
|
+ dev_dbg(dev, "dasd_generic device found\n");
|
|
+ }
|
|
|
|
wait_event(dasd_init_waitq, _wait_for_device(device));
|
|
|
|
@@ -3557,10 +3549,13 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
|
|
|
|
int dasd_generic_set_offline(struct ccw_device *cdev)
|
|
{
|
|
+ int max_count, open_count, rc;
|
|
struct dasd_device *device;
|
|
struct dasd_block *block;
|
|
- int max_count, open_count, rc;
|
|
unsigned long flags;
|
|
+ struct device *dev;
|
|
+
|
|
+ dev = &cdev->dev;
|
|
|
|
rc = 0;
|
|
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
|
|
@@ -3581,11 +3576,10 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
|
|
open_count = atomic_read(&device->block->open_count);
|
|
if (open_count > max_count) {
|
|
if (open_count > 0)
|
|
- pr_warn("%s: The DASD cannot be set offline with open count %i\n",
|
|
- dev_name(&cdev->dev), open_count);
|
|
+ dev_warn(dev, "The DASD cannot be set offline with open count %i\n",
|
|
+ open_count);
|
|
else
|
|
- pr_warn("%s: The DASD cannot be set offline while it is in use\n",
|
|
- dev_name(&cdev->dev));
|
|
+ dev_warn(dev, "The DASD cannot be set offline while it is in use\n");
|
|
rc = -EBUSY;
|
|
goto out_err;
|
|
}
|
|
@@ -3682,8 +3676,8 @@ int dasd_generic_last_path_gone(struct dasd_device *device)
|
|
dev_warn(&device->cdev->dev, "No operational channel path is left "
|
|
"for the device\n");
|
|
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
|
|
- /* First of all call extended error reporting. */
|
|
- dasd_eer_write(device, NULL, DASD_EER_NOPATH);
|
|
+ /* First call extended error reporting and check for autoquiesce. */
|
|
+ dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
|
|
|
|
if (device->state < DASD_STATE_BASIC)
|
|
return 0;
|
|
@@ -3815,7 +3809,8 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
|
|
"No verified channel paths remain for the device\n");
|
|
DBF_DEV_EVENT(DBF_WARNING, device,
|
|
"%s", "last verified path gone");
|
|
- dasd_eer_write(device, NULL, DASD_EER_NOPATH);
|
|
+ /* First call extended error reporting and check for autoquiesce. */
|
|
+ dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
|
|
dasd_device_set_stop_bits(device,
|
|
DASD_STOPPED_DC_WAIT);
|
|
}
|
|
@@ -3837,7 +3832,8 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
|
|
void dasd_generic_space_exhaust(struct dasd_device *device,
|
|
struct dasd_ccw_req *cqr)
|
|
{
|
|
- dasd_eer_write(device, NULL, DASD_EER_NOSPC);
|
|
+ /* First call extended error reporting and check for autoquiesce. */
|
|
+ dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC);
|
|
|
|
if (device->state < DASD_STATE_BASIC)
|
|
return;
|
|
@@ -3931,6 +3927,31 @@ void dasd_schedule_requeue(struct dasd_device *device)
|
|
}
|
|
EXPORT_SYMBOL(dasd_schedule_requeue);
|
|
|
|
+static int dasd_handle_autoquiesce(struct dasd_device *device,
|
|
+ struct dasd_ccw_req *cqr,
|
|
+ unsigned int reason)
|
|
+{
|
|
+ /* in any case write eer message with reason */
|
|
+ if (dasd_eer_enabled(device))
|
|
+ dasd_eer_write(device, cqr, reason);
|
|
+
|
|
+ if (!test_bit(reason, &device->aq_mask))
|
|
+ return 0;
|
|
+
|
|
+ /* notify eer about autoquiesce */
|
|
+ if (dasd_eer_enabled(device))
|
|
+ dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
|
|
+
|
|
+ dev_info(&device->cdev->dev,
|
|
+ "The DASD has been put in the quiesce state\n");
|
|
+ dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
|
|
+
|
|
+ if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
|
|
+ dasd_schedule_requeue(device);
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
|
|
int rdc_buffer_size,
|
|
int magic)
|
|
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
|
|
index d4d31cd11d261..d16c699b9ac6d 100644
|
|
--- a/drivers/s390/block/dasd_eer.c
|
|
+++ b/drivers/s390/block/dasd_eer.c
|
|
@@ -387,6 +387,7 @@ void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
|
|
break;
|
|
case DASD_EER_NOPATH:
|
|
case DASD_EER_NOSPC:
|
|
+ case DASD_EER_AUTOQUIESCE:
|
|
dasd_eer_write_standard_trigger(device, NULL, id);
|
|
break;
|
|
case DASD_EER_STATECHANGE:
|
|
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
|
|
index f50932518f83a..00bcd177264ac 100644
|
|
--- a/drivers/s390/block/dasd_int.h
|
|
+++ b/drivers/s390/block/dasd_int.h
|
|
@@ -464,6 +464,7 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
|
|
#define DASD_EER_STATECHANGE 3
|
|
#define DASD_EER_PPRCSUSPEND 4
|
|
#define DASD_EER_NOSPC 5
|
|
+#define DASD_EER_AUTOQUIESCE 31
|
|
|
|
/* DASD path handling */
|
|
|
|
@@ -641,6 +642,7 @@ struct dasd_device {
|
|
struct dasd_format_entry format_entry;
|
|
struct kset *paths_info;
|
|
struct dasd_copy_relation *copy;
|
|
+ unsigned long aq_mask;
|
|
};
|
|
|
|
struct dasd_block {
|
|
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
|
|
index 7bd2ba1ad4d11..f30fe324e6ecc 100644
|
|
--- a/drivers/scsi/bfa/bfa.h
|
|
+++ b/drivers/scsi/bfa/bfa.h
|
|
@@ -20,7 +20,6 @@
|
|
struct bfa_s;
|
|
|
|
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
|
|
-typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
|
|
|
|
/*
|
|
* Interrupt message handlers
|
|
@@ -437,4 +436,12 @@ struct bfa_cb_pending_q_s {
|
|
(__qe)->data = (__data); \
|
|
} while (0)
|
|
|
|
+#define bfa_pending_q_init_status(__qe, __cbfn, __cbarg, __data) do { \
|
|
+ bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
|
|
+ (__qe)->hcb_qe.cbfn_status = (__cbfn); \
|
|
+ (__qe)->hcb_qe.cbarg = (__cbarg); \
|
|
+ (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
|
|
+ (__qe)->data = (__data); \
|
|
+} while (0)
|
|
+
|
|
#endif /* __BFA_H__ */
|
|
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
|
|
index 6846ca8f7313c..3438d0b8ba062 100644
|
|
--- a/drivers/scsi/bfa/bfa_core.c
|
|
+++ b/drivers/scsi/bfa/bfa_core.c
|
|
@@ -1907,15 +1907,13 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
|
|
struct list_head *qe;
|
|
struct list_head *qen;
|
|
struct bfa_cb_qe_s *hcb_qe;
|
|
- bfa_cb_cbfn_status_t cbfn;
|
|
|
|
list_for_each_safe(qe, qen, comp_q) {
|
|
hcb_qe = (struct bfa_cb_qe_s *) qe;
|
|
if (hcb_qe->pre_rmv) {
|
|
/* qe is invalid after return, dequeue before cbfn() */
|
|
list_del(qe);
|
|
- cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
|
|
- cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
|
|
+ hcb_qe->cbfn_status(hcb_qe->cbarg, hcb_qe->fw_status);
|
|
} else
|
|
hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
|
|
}
|
|
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
|
|
index 933a1c3890ff5..5e568d6d7b261 100644
|
|
--- a/drivers/scsi/bfa/bfa_ioc.h
|
|
+++ b/drivers/scsi/bfa/bfa_ioc.h
|
|
@@ -361,14 +361,18 @@ struct bfa_reqq_wait_s {
|
|
void *cbarg;
|
|
};
|
|
|
|
-typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
|
|
+typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
|
|
+typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
|
|
|
|
/*
|
|
* Generic BFA callback element.
|
|
*/
|
|
struct bfa_cb_qe_s {
|
|
struct list_head qe;
|
|
- bfa_cb_cbfn_t cbfn;
|
|
+ union {
|
|
+ bfa_cb_cbfn_status_t cbfn_status;
|
|
+ bfa_cb_cbfn_t cbfn;
|
|
+ };
|
|
bfa_boolean_t once;
|
|
bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
|
|
bfa_status_t fw_status; /* to access fw status in comp proc */
|
|
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
|
|
index be8dfbe13e904..524e4e6979c9f 100644
|
|
--- a/drivers/scsi/bfa/bfad_bsg.c
|
|
+++ b/drivers/scsi/bfa/bfad_bsg.c
|
|
@@ -2135,8 +2135,7 @@ bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
|
|
struct bfa_cb_pending_q_s cb_qe;
|
|
|
|
init_completion(&fcomp.comp);
|
|
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
|
|
- &fcomp, &iocmd->stats);
|
|
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
|
|
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
|
iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
|
|
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
|
@@ -2159,7 +2158,7 @@ bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
|
|
struct bfa_cb_pending_q_s cb_qe;
|
|
|
|
init_completion(&fcomp.comp);
|
|
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
|
|
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
|
|
|
|
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
|
iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
|
|
@@ -2443,8 +2442,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
|
|
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
|
|
|
|
init_completion(&fcomp.comp);
|
|
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
|
|
- &fcomp, &iocmd->stats);
|
|
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
|
|
|
|
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
|
WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
|
|
@@ -2474,8 +2472,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
|
|
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
|
|
|
|
init_completion(&fcomp.comp);
|
|
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
|
|
- &fcomp, NULL);
|
|
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
|
|
|
|
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
|
WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
|
|
diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
|
|
index c38017b4af982..e50e93e7fe5a1 100644
|
|
--- a/drivers/scsi/csiostor/csio_defs.h
|
|
+++ b/drivers/scsi/csiostor/csio_defs.h
|
|
@@ -73,7 +73,21 @@ csio_list_deleted(struct list_head *list)
|
|
#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
|
|
|
|
/* State machine */
|
|
-typedef void (*csio_sm_state_t)(void *, uint32_t);
|
|
+struct csio_lnode;
|
|
+
|
|
+/* State machine evets */
|
|
+enum csio_ln_ev {
|
|
+ CSIO_LNE_NONE = (uint32_t)0,
|
|
+ CSIO_LNE_LINKUP,
|
|
+ CSIO_LNE_FAB_INIT_DONE,
|
|
+ CSIO_LNE_LINK_DOWN,
|
|
+ CSIO_LNE_DOWN_LINK,
|
|
+ CSIO_LNE_LOGO,
|
|
+ CSIO_LNE_CLOSE,
|
|
+ CSIO_LNE_MAX_EVENT,
|
|
+};
|
|
+
|
|
+typedef void (*csio_sm_state_t)(struct csio_lnode *ln, enum csio_ln_ev evt);
|
|
|
|
struct csio_sm {
|
|
struct list_head sm_list;
|
|
@@ -83,7 +97,7 @@ struct csio_sm {
|
|
static inline void
|
|
csio_set_state(void *smp, void *state)
|
|
{
|
|
- ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
|
|
+ ((struct csio_sm *)smp)->sm_state = state;
|
|
}
|
|
|
|
static inline void
|
|
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
|
|
index d5ac938970232..5b3ffefae476d 100644
|
|
--- a/drivers/scsi/csiostor/csio_lnode.c
|
|
+++ b/drivers/scsi/csiostor/csio_lnode.c
|
|
@@ -1095,7 +1095,7 @@ csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
|
|
int
|
|
csio_is_lnode_ready(struct csio_lnode *ln)
|
|
{
|
|
- return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
|
|
+ return (csio_get_state(ln) == csio_lns_ready);
|
|
}
|
|
|
|
/*****************************************************************************/
|
|
@@ -1366,15 +1366,15 @@ csio_free_fcfinfo(struct kref *kref)
|
|
void
|
|
csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
|
|
{
|
|
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
|
|
+ if (csio_get_state(ln) == csio_lns_uninit) {
|
|
strcpy(str, "UNINIT");
|
|
return;
|
|
}
|
|
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
|
|
+ if (csio_get_state(ln) == csio_lns_ready) {
|
|
strcpy(str, "READY");
|
|
return;
|
|
}
|
|
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
|
|
+ if (csio_get_state(ln) == csio_lns_offline) {
|
|
strcpy(str, "OFFLINE");
|
|
return;
|
|
}
|
|
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
|
|
index 372a67d122d38..607698a0f0631 100644
|
|
--- a/drivers/scsi/csiostor/csio_lnode.h
|
|
+++ b/drivers/scsi/csiostor/csio_lnode.h
|
|
@@ -53,19 +53,6 @@
|
|
extern int csio_fcoe_rnodes;
|
|
extern int csio_fdmi_enable;
|
|
|
|
-/* State machine evets */
|
|
-enum csio_ln_ev {
|
|
- CSIO_LNE_NONE = (uint32_t)0,
|
|
- CSIO_LNE_LINKUP,
|
|
- CSIO_LNE_FAB_INIT_DONE,
|
|
- CSIO_LNE_LINK_DOWN,
|
|
- CSIO_LNE_DOWN_LINK,
|
|
- CSIO_LNE_LOGO,
|
|
- CSIO_LNE_CLOSE,
|
|
- CSIO_LNE_MAX_EVENT,
|
|
-};
|
|
-
|
|
-
|
|
struct csio_fcf_info {
|
|
struct list_head list;
|
|
uint8_t priority;
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
index 809be43f440dc..8e6ac08e553bb 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
@@ -7398,7 +7398,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
|
|
return -EFAULT;
|
|
}
|
|
|
|
- issue_diag_reset:
|
|
+ return 0;
|
|
+
|
|
+issue_diag_reset:
|
|
rc = _base_diag_reset(ioc);
|
|
return rc;
|
|
}
|
|
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
|
|
index 1d2b27e3ea63f..b811446e0fa55 100644
|
|
--- a/drivers/soc/fsl/dpio/dpio-service.c
|
|
+++ b/drivers/soc/fsl/dpio/dpio-service.c
|
|
@@ -523,7 +523,7 @@ int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
|
|
struct qbman_eq_desc *ed;
|
|
int i, ret;
|
|
|
|
- ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
|
|
+ ed = kcalloc(32, sizeof(struct qbman_eq_desc), GFP_KERNEL);
|
|
if (!ed)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
|
|
index eb656b33156ba..f19e74d342aa2 100644
|
|
--- a/drivers/soc/microchip/Kconfig
|
|
+++ b/drivers/soc/microchip/Kconfig
|
|
@@ -1,5 +1,5 @@
|
|
config POLARFIRE_SOC_SYS_CTRL
|
|
- tristate "POLARFIRE_SOC_SYS_CTRL"
|
|
+ tristate "Microchip PolarFire SoC (MPFS) system controller support"
|
|
depends on POLARFIRE_SOC_MAILBOX
|
|
help
|
|
This driver adds support for the PolarFire SoC (MPFS) system controller.
|
|
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
|
|
index 9a90f241bb97f..6efe36aeb48e9 100644
|
|
--- a/drivers/soc/qcom/rpmhpd.c
|
|
+++ b/drivers/soc/qcom/rpmhpd.c
|
|
@@ -195,7 +195,6 @@ static struct rpmhpd *sa8540p_rpmhpds[] = {
|
|
[SC8280XP_CX] = &cx,
|
|
[SC8280XP_CX_AO] = &cx_ao,
|
|
[SC8280XP_EBI] = &ebi,
|
|
- [SC8280XP_GFX] = &gfx,
|
|
[SC8280XP_LCX] = &lcx,
|
|
[SC8280XP_LMX] = &lmx,
|
|
[SC8280XP_MMCX] = &mmcx,
|
|
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
|
|
index 6e95efb50acbc..f9ec8742917a6 100644
|
|
--- a/drivers/spi/spi-mt65xx.c
|
|
+++ b/drivers/spi/spi-mt65xx.c
|
|
@@ -787,17 +787,19 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
|
|
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
|
|
mtk_spi_setup_packet(master);
|
|
|
|
- cnt = mdata->xfer_len / 4;
|
|
- iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
|
|
- trans->tx_buf + mdata->num_xfered, cnt);
|
|
+ if (trans->tx_buf) {
|
|
+ cnt = mdata->xfer_len / 4;
|
|
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
|
|
+ trans->tx_buf + mdata->num_xfered, cnt);
|
|
|
|
- remainder = mdata->xfer_len % 4;
|
|
- if (remainder > 0) {
|
|
- reg_val = 0;
|
|
- memcpy(®_val,
|
|
- trans->tx_buf + (cnt * 4) + mdata->num_xfered,
|
|
- remainder);
|
|
- writel(reg_val, mdata->base + SPI_TX_DATA_REG);
|
|
+ remainder = mdata->xfer_len % 4;
|
|
+ if (remainder > 0) {
|
|
+ reg_val = 0;
|
|
+ memcpy(®_val,
|
|
+ trans->tx_buf + (cnt * 4) + mdata->num_xfered,
|
|
+ remainder);
|
|
+ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
|
|
+ }
|
|
}
|
|
|
|
mtk_spi_enable_transfer(master);
|
|
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
|
|
index 87d36948c6106..c6bd86a5335ab 100644
|
|
--- a/drivers/staging/greybus/light.c
|
|
+++ b/drivers/staging/greybus/light.c
|
|
@@ -100,15 +100,15 @@ static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
|
|
static struct gb_channel *get_channel_from_mode(struct gb_light *light,
|
|
u32 mode)
|
|
{
|
|
- struct gb_channel *channel = NULL;
|
|
+ struct gb_channel *channel;
|
|
int i;
|
|
|
|
for (i = 0; i < light->channels_count; i++) {
|
|
channel = &light->channels[i];
|
|
- if (channel && channel->mode == mode)
|
|
- break;
|
|
+ if (channel->mode == mode)
|
|
+ return channel;
|
|
}
|
|
- return channel;
|
|
+ return NULL;
|
|
}
|
|
|
|
static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
|
|
diff --git a/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c b/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c
|
|
index 0c61a2dec2211..81fc4835679f3 100644
|
|
--- a/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c
|
|
+++ b/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c
|
|
@@ -1462,7 +1462,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
|
|
budget_av->has_saa7113 = 1;
|
|
err = saa7146_vv_init(dev, &vv_data);
|
|
if (err != 0) {
|
|
- /* fixme: proper cleanup here */
|
|
+ ttpci_budget_deinit(&budget_av->budget);
|
|
+ kfree(budget_av);
|
|
ERR("cannot init vv subsystem\n");
|
|
return err;
|
|
}
|
|
@@ -1471,9 +1472,10 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
|
|
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
|
|
|
|
if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) {
|
|
- /* fixme: proper cleanup here */
|
|
- ERR("cannot register capture v4l2 device\n");
|
|
saa7146_vv_release(dev);
|
|
+ ttpci_budget_deinit(&budget_av->budget);
|
|
+ kfree(budget_av);
|
|
+ ERR("cannot register capture v4l2 device\n");
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
|
|
index 1fd39a2fca98a..95cca281e8a37 100644
|
|
--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
|
|
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
|
|
@@ -803,6 +803,7 @@ static int ipu_csc_scaler_release(struct file *file)
|
|
|
|
dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
|
|
|
|
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdlr);
|
|
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
|
|
v4l2_fh_del(&ctx->fh);
|
|
v4l2_fh_exit(&ctx->fh);
|
|
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
|
|
index 93a2196006f73..cb99610f3e128 100644
|
|
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
|
|
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
|
|
@@ -109,6 +109,11 @@ struct cedrus_buffer {
|
|
unsigned int position;
|
|
enum cedrus_h264_pic_type pic_type;
|
|
} h264;
|
|
+ struct {
|
|
+ void *mv_col_buf;
|
|
+ dma_addr_t mv_col_buf_dma;
|
|
+ ssize_t mv_col_buf_size;
|
|
+ } h265;
|
|
} codec;
|
|
};
|
|
|
|
@@ -142,10 +147,6 @@ struct cedrus_ctx {
|
|
ssize_t intra_pred_buf_size;
|
|
} h264;
|
|
struct {
|
|
- void *mv_col_buf;
|
|
- dma_addr_t mv_col_buf_addr;
|
|
- ssize_t mv_col_buf_size;
|
|
- ssize_t mv_col_buf_unit_size;
|
|
void *neighbor_info_buf;
|
|
dma_addr_t neighbor_info_buf_addr;
|
|
void *entry_points_buf;
|
|
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
|
|
index 625f77a8c5bde..9f13c942a806b 100644
|
|
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
|
|
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
|
|
@@ -90,12 +90,13 @@ static void cedrus_h265_sram_write_data(struct cedrus_dev *dev, void *data,
|
|
}
|
|
|
|
static inline dma_addr_t
|
|
-cedrus_h265_frame_info_mv_col_buf_addr(struct cedrus_ctx *ctx,
|
|
- unsigned int index, unsigned int field)
|
|
+cedrus_h265_frame_info_mv_col_buf_addr(struct vb2_buffer *buf,
|
|
+ unsigned int field)
|
|
{
|
|
- return ctx->codec.h265.mv_col_buf_addr + index *
|
|
- ctx->codec.h265.mv_col_buf_unit_size +
|
|
- field * ctx->codec.h265.mv_col_buf_unit_size / 2;
|
|
+ struct cedrus_buffer *cedrus_buf = vb2_to_cedrus_buffer(buf);
|
|
+
|
|
+ return cedrus_buf->codec.h265.mv_col_buf_dma +
|
|
+ field * cedrus_buf->codec.h265.mv_col_buf_size / 2;
|
|
}
|
|
|
|
static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
|
|
@@ -108,9 +109,8 @@ static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
|
|
dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buf, 0);
|
|
dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buf, 1);
|
|
dma_addr_t mv_col_buf_addr[2] = {
|
|
- cedrus_h265_frame_info_mv_col_buf_addr(ctx, buf->index, 0),
|
|
- cedrus_h265_frame_info_mv_col_buf_addr(ctx, buf->index,
|
|
- field_pic ? 1 : 0)
|
|
+ cedrus_h265_frame_info_mv_col_buf_addr(buf, 0),
|
|
+ cedrus_h265_frame_info_mv_col_buf_addr(buf, field_pic ? 1 : 0)
|
|
};
|
|
u32 offset = VE_DEC_H265_SRAM_OFFSET_FRAME_INFO +
|
|
VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT * index;
|
|
@@ -412,12 +412,13 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
|
|
unsigned int width_in_ctb_luma, ctb_size_luma;
|
|
unsigned int log2_max_luma_coding_block_size;
|
|
unsigned int ctb_addr_x, ctb_addr_y;
|
|
+ struct cedrus_buffer *cedrus_buf;
|
|
dma_addr_t src_buf_addr;
|
|
- dma_addr_t src_buf_end_addr;
|
|
u32 chroma_log2_weight_denom;
|
|
u32 num_entry_point_offsets;
|
|
u32 output_pic_list_index;
|
|
u32 pic_order_cnt[2];
|
|
+ size_t slice_bytes;
|
|
u8 padding;
|
|
int count;
|
|
u32 reg;
|
|
@@ -428,6 +429,8 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
|
|
decode_params = run->h265.decode_params;
|
|
pred_weight_table = &slice_params->pred_weight_table;
|
|
num_entry_point_offsets = slice_params->num_entry_point_offsets;
|
|
+ cedrus_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
|
|
+ slice_bytes = vb2_get_plane_payload(&run->src->vb2_buf, 0);
|
|
|
|
/*
|
|
* If entry points offsets are present, we should get them
|
|
@@ -445,31 +448,25 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
|
|
DIV_ROUND_UP(sps->pic_width_in_luma_samples, ctb_size_luma);
|
|
|
|
/* MV column buffer size and allocation. */
|
|
- if (!ctx->codec.h265.mv_col_buf_size) {
|
|
- unsigned int num_buffers =
|
|
- run->dst->vb2_buf.vb2_queue->num_buffers;
|
|
-
|
|
+ if (!cedrus_buf->codec.h265.mv_col_buf_size) {
|
|
/*
|
|
* Each CTB requires a MV col buffer with a specific unit size.
|
|
* Since the address is given with missing lsb bits, 1 KiB is
|
|
* added to each buffer to ensure proper alignment.
|
|
*/
|
|
- ctx->codec.h265.mv_col_buf_unit_size =
|
|
+ cedrus_buf->codec.h265.mv_col_buf_size =
|
|
DIV_ROUND_UP(ctx->src_fmt.width, ctb_size_luma) *
|
|
DIV_ROUND_UP(ctx->src_fmt.height, ctb_size_luma) *
|
|
CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE + SZ_1K;
|
|
|
|
- ctx->codec.h265.mv_col_buf_size = num_buffers *
|
|
- ctx->codec.h265.mv_col_buf_unit_size;
|
|
-
|
|
/* Buffer is never accessed by CPU, so we can skip kernel mapping. */
|
|
- ctx->codec.h265.mv_col_buf =
|
|
+ cedrus_buf->codec.h265.mv_col_buf =
|
|
dma_alloc_attrs(dev->dev,
|
|
- ctx->codec.h265.mv_col_buf_size,
|
|
- &ctx->codec.h265.mv_col_buf_addr,
|
|
+ cedrus_buf->codec.h265.mv_col_buf_size,
|
|
+ &cedrus_buf->codec.h265.mv_col_buf_dma,
|
|
GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
|
|
- if (!ctx->codec.h265.mv_col_buf) {
|
|
- ctx->codec.h265.mv_col_buf_size = 0;
|
|
+ if (!cedrus_buf->codec.h265.mv_col_buf) {
|
|
+ cedrus_buf->codec.h265.mv_col_buf_size = 0;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
@@ -481,7 +478,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
|
|
|
|
cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, 0);
|
|
|
|
- reg = slice_params->bit_size;
|
|
+ reg = slice_bytes * 8;
|
|
cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
|
|
|
|
/* Source beginning and end addresses. */
|
|
@@ -495,10 +492,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
|
|
|
|
cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
|
|
|
|
- src_buf_end_addr = src_buf_addr +
|
|
- DIV_ROUND_UP(slice_params->bit_size, 8);
|
|
-
|
|
- reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr);
|
|
+ reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_addr + slice_bytes);
|
|
cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
|
|
|
|
/* Coding tree block address */
|
|
@@ -816,9 +810,6 @@ static int cedrus_h265_start(struct cedrus_ctx *ctx)
|
|
{
|
|
struct cedrus_dev *dev = ctx->dev;
|
|
|
|
- /* The buffer size is calculated at setup time. */
|
|
- ctx->codec.h265.mv_col_buf_size = 0;
|
|
-
|
|
/* Buffer is never accessed by CPU, so we can skip kernel mapping. */
|
|
ctx->codec.h265.neighbor_info_buf =
|
|
dma_alloc_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
|
|
@@ -845,14 +836,24 @@ static int cedrus_h265_start(struct cedrus_ctx *ctx)
|
|
static void cedrus_h265_stop(struct cedrus_ctx *ctx)
|
|
{
|
|
struct cedrus_dev *dev = ctx->dev;
|
|
+ struct cedrus_buffer *buf;
|
|
+ struct vb2_queue *vq;
|
|
+ unsigned int i;
|
|
|
|
- if (ctx->codec.h265.mv_col_buf_size > 0) {
|
|
- dma_free_attrs(dev->dev, ctx->codec.h265.mv_col_buf_size,
|
|
- ctx->codec.h265.mv_col_buf,
|
|
- ctx->codec.h265.mv_col_buf_addr,
|
|
- DMA_ATTR_NO_KERNEL_MAPPING);
|
|
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
|
|
+
|
|
+ for (i = 0; i < vq->num_buffers; i++) {
|
|
+ buf = vb2_to_cedrus_buffer(vb2_get_buffer(vq, i));
|
|
|
|
- ctx->codec.h265.mv_col_buf_size = 0;
|
|
+ if (buf->codec.h265.mv_col_buf_size > 0) {
|
|
+ dma_free_attrs(dev->dev,
|
|
+ buf->codec.h265.mv_col_buf_size,
|
|
+ buf->codec.h265.mv_col_buf,
|
|
+ buf->codec.h265.mv_col_buf_dma,
|
|
+ DMA_ATTR_NO_KERNEL_MAPPING);
|
|
+
|
|
+ buf->codec.h265.mv_col_buf_size = 0;
|
|
+ }
|
|
}
|
|
|
|
dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
|
|
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
|
|
index dca1abe363248..55451ff846520 100644
|
|
--- a/drivers/tty/serial/8250/8250_exar.c
|
|
+++ b/drivers/tty/serial/8250/8250_exar.c
|
|
@@ -714,6 +714,7 @@ static void exar_pci_remove(struct pci_dev *pcidev)
|
|
for (i = 0; i < priv->nr; i++)
|
|
serial8250_unregister_port(priv->line[i]);
|
|
|
|
+ /* Ensure that every init quirk is properly torn down */
|
|
if (priv->board->exit)
|
|
priv->board->exit(pcidev);
|
|
}
|
|
@@ -728,10 +729,6 @@ static int __maybe_unused exar_suspend(struct device *dev)
|
|
if (priv->line[i] >= 0)
|
|
serial8250_suspend_port(priv->line[i]);
|
|
|
|
- /* Ensure that every init quirk is properly torn down */
|
|
- if (priv->board->exit)
|
|
- priv->board->exit(pcidev);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
|
|
index 163a89f84c9c2..444f89eb2d4b7 100644
|
|
--- a/drivers/tty/serial/max310x.c
|
|
+++ b/drivers/tty/serial/max310x.c
|
|
@@ -1459,7 +1459,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
|
|
if (!ret)
|
|
return 0;
|
|
|
|
- dev_err(dev, "Unable to reguest IRQ %i\n", irq);
|
|
+ dev_err(dev, "Unable to request IRQ %i\n", irq);
|
|
|
|
out_uart:
|
|
for (i = 0; i < devtype->nr; i++) {
|
|
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
|
|
index aa2c51b84116f..589daed19e625 100644
|
|
--- a/drivers/tty/serial/samsung_tty.c
|
|
+++ b/drivers/tty/serial/samsung_tty.c
|
|
@@ -996,11 +996,10 @@ static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
|
|
if ((ufstat & info->tx_fifomask) != 0 ||
|
|
(ufstat & info->tx_fifofull))
|
|
return 0;
|
|
-
|
|
- return 1;
|
|
+ return TIOCSER_TEMT;
|
|
}
|
|
|
|
- return s3c24xx_serial_txempty_nofifo(port);
|
|
+ return s3c24xx_serial_txempty_nofifo(port) ? TIOCSER_TEMT : 0;
|
|
}
|
|
|
|
/* no modem control lines */
|
|
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
|
|
index 981d2bfcf9a5b..9e30ef2b6eb8c 100644
|
|
--- a/drivers/tty/vt/vt.c
|
|
+++ b/drivers/tty/vt/vt.c
|
|
@@ -2515,7 +2515,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
|
|
}
|
|
return;
|
|
case EScsiignore:
|
|
- if (c >= 20 && c <= 0x3f)
|
|
+ if (c >= 0x20 && c <= 0x3f)
|
|
return;
|
|
vc->vc_state = ESnormal;
|
|
return;
|
|
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
|
|
index 538c1b9a28835..c42d5aa99e81a 100644
|
|
--- a/drivers/usb/gadget/udc/net2272.c
|
|
+++ b/drivers/usb/gadget/udc/net2272.c
|
|
@@ -2650,7 +2650,7 @@ net2272_plat_probe(struct platform_device *pdev)
|
|
goto err_req;
|
|
}
|
|
|
|
- ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
|
|
+ ret = net2272_probe_fin(dev, irqflags);
|
|
if (ret)
|
|
goto err_io;
|
|
|
|
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
|
|
index 3dc5c04e7cbf9..953df04b40d40 100644
|
|
--- a/drivers/usb/phy/phy-generic.c
|
|
+++ b/drivers/usb/phy/phy-generic.c
|
|
@@ -265,6 +265,13 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
|
|
return -EPROBE_DEFER;
|
|
}
|
|
|
|
+ nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
|
|
+ if (PTR_ERR(nop->vbus_draw) == -ENODEV)
|
|
+ nop->vbus_draw = NULL;
|
|
+ if (IS_ERR(nop->vbus_draw))
|
|
+ return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
|
|
+ "could not get vbus regulator\n");
|
|
+
|
|
nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
|
|
if (PTR_ERR(nop->vbus_draw) == -ENODEV)
|
|
nop->vbus_draw = NULL;
|
|
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
|
|
index 2b7e796c48897..74d295312466f 100644
|
|
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
|
|
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
|
|
@@ -185,8 +185,6 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev);
|
|
|
|
static bool mlx5_vdpa_debug;
|
|
|
|
-#define MLX5_CVQ_MAX_ENT 16
|
|
-
|
|
#define MLX5_LOG_VIO_FLAG(_feature) \
|
|
do { \
|
|
if (features & BIT_ULL(_feature)) \
|
|
@@ -1980,9 +1978,16 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
|
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
|
struct mlx5_vdpa_virtqueue *mvq;
|
|
|
|
- if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
|
|
+ if (!is_index_valid(mvdev, idx))
|
|
return;
|
|
|
|
+ if (is_ctrl_vq_idx(mvdev, idx)) {
|
|
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
|
|
+
|
|
+ cvq->vring.vring.num = num;
|
|
+ return;
|
|
+ }
|
|
+
|
|
mvq = &ndev->vqs[idx];
|
|
mvq->num_ent = num;
|
|
}
|
|
@@ -2512,7 +2517,7 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
|
|
u16 idx = cvq->vring.last_avail_idx;
|
|
|
|
err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
|
|
- MLX5_CVQ_MAX_ENT, false,
|
|
+ cvq->vring.vring.num, false,
|
|
(struct vring_desc *)(uintptr_t)cvq->desc_addr,
|
|
(struct vring_avail *)(uintptr_t)cvq->driver_addr,
|
|
(struct vring_used *)(uintptr_t)cvq->device_addr);
|
|
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
|
|
index 61bde476cf9c8..e7fc25bfdd237 100644
|
|
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
|
|
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
|
|
@@ -120,7 +120,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
|
|
for (i = 0; i < vdpasim->dev_attr.nas; i++)
|
|
vhost_iotlb_reset(&vdpasim->iommu[i]);
|
|
|
|
- vdpasim->running = true;
|
|
+ vdpasim->running = false;
|
|
spin_unlock(&vdpasim->iommu_lock);
|
|
|
|
vdpasim->features = 0;
|
|
@@ -513,6 +513,7 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
|
|
|
|
spin_lock(&vdpasim->lock);
|
|
vdpasim->status = status;
|
|
+ vdpasim->running = (status & VIRTIO_CONFIG_S_DRIVER_OK) != 0;
|
|
spin_unlock(&vdpasim->lock);
|
|
}
|
|
|
|
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
|
|
index 882359dd288c0..aa00379392a0f 100644
|
|
--- a/drivers/video/backlight/da9052_bl.c
|
|
+++ b/drivers/video/backlight/da9052_bl.c
|
|
@@ -117,6 +117,7 @@ static int da9052_backlight_probe(struct platform_device *pdev)
|
|
wleds->led_reg = platform_get_device_id(pdev)->driver_data;
|
|
wleds->state = DA9052_WLEDS_OFF;
|
|
|
|
+ memset(&props, 0, sizeof(struct backlight_properties));
|
|
props.type = BACKLIGHT_RAW;
|
|
props.max_brightness = DA9052_MAX_BRIGHTNESS;
|
|
|
|
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
|
|
index 475f35635bf67..0d43f6326750f 100644
|
|
--- a/drivers/video/backlight/lm3630a_bl.c
|
|
+++ b/drivers/video/backlight/lm3630a_bl.c
|
|
@@ -231,7 +231,7 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
|
|
if (rval < 0)
|
|
goto out_i2c_err;
|
|
brightness |= rval;
|
|
- goto out;
|
|
+ return brightness;
|
|
}
|
|
|
|
/* disable sleep */
|
|
@@ -242,11 +242,8 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
|
|
rval = lm3630a_read(pchip, REG_BRT_A);
|
|
if (rval < 0)
|
|
goto out_i2c_err;
|
|
- brightness = rval;
|
|
+ return rval;
|
|
|
|
-out:
|
|
- bl->props.brightness = brightness;
|
|
- return bl->props.brightness;
|
|
out_i2c_err:
|
|
dev_err(pchip->dev, "i2c failed to access register\n");
|
|
return 0;
|
|
@@ -306,7 +303,7 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
|
|
if (rval < 0)
|
|
goto out_i2c_err;
|
|
brightness |= rval;
|
|
- goto out;
|
|
+ return brightness;
|
|
}
|
|
|
|
/* disable sleep */
|
|
@@ -317,11 +314,8 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
|
|
rval = lm3630a_read(pchip, REG_BRT_B);
|
|
if (rval < 0)
|
|
goto out_i2c_err;
|
|
- brightness = rval;
|
|
+ return rval;
|
|
|
|
-out:
|
|
- bl->props.brightness = brightness;
|
|
- return bl->props.brightness;
|
|
out_i2c_err:
|
|
dev_err(pchip->dev, "i2c failed to access register\n");
|
|
return 0;
|
|
@@ -339,6 +333,7 @@ static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
|
|
struct backlight_properties props;
|
|
const char *label;
|
|
|
|
+ memset(&props, 0, sizeof(struct backlight_properties));
|
|
props.type = BACKLIGHT_RAW;
|
|
if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
|
|
props.brightness = pdata->leda_init_brt;
|
|
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
|
|
index 6580911671a3e..4c9726a7fa720 100644
|
|
--- a/drivers/video/backlight/lm3639_bl.c
|
|
+++ b/drivers/video/backlight/lm3639_bl.c
|
|
@@ -339,6 +339,7 @@ static int lm3639_probe(struct i2c_client *client,
|
|
}
|
|
|
|
/* backlight */
|
|
+ memset(&props, 0, sizeof(struct backlight_properties));
|
|
props.type = BACKLIGHT_RAW;
|
|
props.brightness = pdata->init_brt_led;
|
|
props.max_brightness = pdata->max_brt_led;
|
|
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
|
|
index ba42f3fe0c739..d9b95dbd40d30 100644
|
|
--- a/drivers/video/backlight/lp8788_bl.c
|
|
+++ b/drivers/video/backlight/lp8788_bl.c
|
|
@@ -191,6 +191,7 @@ static int lp8788_backlight_register(struct lp8788_bl *bl)
|
|
int init_brt;
|
|
char *name;
|
|
|
|
+ memset(&props, 0, sizeof(struct backlight_properties));
|
|
props.type = BACKLIGHT_PLATFORM;
|
|
props.max_brightness = MAX_BRIGHTNESS;
|
|
|
|
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
|
|
index 570a71509d2a9..78d51deab87aa 100644
|
|
--- a/drivers/watchdog/stm32_iwdg.c
|
|
+++ b/drivers/watchdog/stm32_iwdg.c
|
|
@@ -21,6 +21,8 @@
|
|
#include <linux/platform_device.h>
|
|
#include <linux/watchdog.h>
|
|
|
|
+#define DEFAULT_TIMEOUT 10
|
|
+
|
|
/* IWDG registers */
|
|
#define IWDG_KR 0x00 /* Key register */
|
|
#define IWDG_PR 0x04 /* Prescaler Register */
|
|
@@ -249,6 +251,7 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
|
|
wdd->parent = dev;
|
|
wdd->info = &stm32_iwdg_info;
|
|
wdd->ops = &stm32_iwdg_ops;
|
|
+ wdd->timeout = DEFAULT_TIMEOUT;
|
|
wdd->min_timeout = DIV_ROUND_UP((RLR_MIN + 1) * PR_MIN, wdt->rate);
|
|
wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * wdt->data->max_prescaler *
|
|
1000) / wdt->rate;
|
|
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
|
|
index 6e2c967fae6fc..07dc4ec73520c 100644
|
|
--- a/fs/afs/dir.c
|
|
+++ b/fs/afs/dir.c
|
|
@@ -473,16 +473,6 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
|
|
continue;
|
|
}
|
|
|
|
- /* Don't expose silly rename entries to userspace. */
|
|
- if (nlen > 6 &&
|
|
- dire->u.name[0] == '.' &&
|
|
- ctx->actor != afs_lookup_filldir &&
|
|
- ctx->actor != afs_lookup_one_filldir &&
|
|
- memcmp(dire->u.name, ".__afs", 6) == 0) {
|
|
- ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
|
|
- continue;
|
|
- }
|
|
-
|
|
/* found the next entry */
|
|
if (!dir_emit(ctx, dire->u.name, nlen,
|
|
ntohl(dire->u.vnode),
|
|
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
|
|
index 507b44d18572d..4cbf386166209 100644
|
|
--- a/fs/btrfs/block-rsv.c
|
|
+++ b/fs/btrfs/block-rsv.c
|
|
@@ -512,7 +512,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
|
|
|
|
block_rsv = get_block_rsv(trans, root);
|
|
|
|
- if (unlikely(block_rsv->size == 0))
|
|
+ if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
|
|
goto try_reserve;
|
|
again:
|
|
ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
|
|
diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h
|
|
index 578c3497a455c..df87c4949d065 100644
|
|
--- a/fs/btrfs/block-rsv.h
|
|
+++ b/fs/btrfs/block-rsv.h
|
|
@@ -101,4 +101,36 @@ static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
|
|
return data_race(rsv->full);
|
|
}
|
|
|
|
+/*
|
|
+ * Get the reserved mount of a block reserve in a context where getting a stale
|
|
+ * value is acceptable, instead of accessing it directly and trigger data race
|
|
+ * warning from KCSAN.
|
|
+ */
|
|
+static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
|
|
+{
|
|
+ u64 ret;
|
|
+
|
|
+ spin_lock(&rsv->lock);
|
|
+ ret = rsv->reserved;
|
|
+ spin_unlock(&rsv->lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Get the size of a block reserve in a context where getting a stale value is
|
|
+ * acceptable, instead of accessing it directly and trigger data race warning
|
|
+ * from KCSAN.
|
|
+ */
|
|
+static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
|
|
+{
|
|
+ u64 ret;
|
|
+
|
|
+ spin_lock(&rsv->lock);
|
|
+ ret = rsv->size;
|
|
+ spin_unlock(&rsv->lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
#endif /* BTRFS_BLOCK_RSV_H */
|
|
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
|
|
index 2635fb4bffa06..8b75f436a9a3c 100644
|
|
--- a/fs/btrfs/space-info.c
|
|
+++ b/fs/btrfs/space-info.c
|
|
@@ -847,7 +847,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
|
|
static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
|
|
struct btrfs_space_info *space_info)
|
|
{
|
|
- u64 global_rsv_size = fs_info->global_block_rsv.reserved;
|
|
+ const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
|
|
u64 ordered, delalloc;
|
|
u64 total = writable_total_bytes(fs_info, space_info);
|
|
u64 thresh;
|
|
@@ -948,8 +948,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
|
|
ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
|
|
delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
|
|
if (ordered >= delalloc)
|
|
- used += fs_info->delayed_refs_rsv.reserved +
|
|
- fs_info->delayed_block_rsv.reserved;
|
|
+ used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
|
|
+ btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
|
|
else
|
|
used += space_info->bytes_may_use - global_rsv_size;
|
|
|
|
@@ -1164,7 +1164,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
|
|
enum btrfs_flush_state flush;
|
|
u64 delalloc_size = 0;
|
|
u64 to_reclaim, block_rsv_size;
|
|
- u64 global_rsv_size = global_rsv->reserved;
|
|
+ const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
|
|
|
|
loops++;
|
|
|
|
@@ -1176,9 +1176,9 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
|
|
* assume it's tied up in delalloc reservations.
|
|
*/
|
|
block_rsv_size = global_rsv_size +
|
|
- delayed_block_rsv->reserved +
|
|
- delayed_refs_rsv->reserved +
|
|
- trans_rsv->reserved;
|
|
+ btrfs_block_rsv_reserved(delayed_block_rsv) +
|
|
+ btrfs_block_rsv_reserved(delayed_refs_rsv) +
|
|
+ btrfs_block_rsv_reserved(trans_rsv);
|
|
if (block_rsv_size < space_info->bytes_may_use)
|
|
delalloc_size = space_info->bytes_may_use - block_rsv_size;
|
|
|
|
@@ -1198,16 +1198,16 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
|
|
to_reclaim = delalloc_size;
|
|
flush = FLUSH_DELALLOC;
|
|
} else if (space_info->bytes_pinned >
|
|
- (delayed_block_rsv->reserved +
|
|
- delayed_refs_rsv->reserved)) {
|
|
+ (btrfs_block_rsv_reserved(delayed_block_rsv) +
|
|
+ btrfs_block_rsv_reserved(delayed_refs_rsv))) {
|
|
to_reclaim = space_info->bytes_pinned;
|
|
flush = COMMIT_TRANS;
|
|
- } else if (delayed_block_rsv->reserved >
|
|
- delayed_refs_rsv->reserved) {
|
|
- to_reclaim = delayed_block_rsv->reserved;
|
|
+ } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
|
|
+ btrfs_block_rsv_reserved(delayed_refs_rsv)) {
|
|
+ to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
|
|
flush = FLUSH_DELAYED_ITEMS_NR;
|
|
} else {
|
|
- to_reclaim = delayed_refs_rsv->reserved;
|
|
+ to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
|
|
flush = FLUSH_DELAYED_REFS_NR;
|
|
}
|
|
|
|
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
|
|
index eb4d69f53337f..3ec203bbd5593 100644
|
|
--- a/fs/f2fs/checkpoint.c
|
|
+++ b/fs/f2fs/checkpoint.c
|
|
@@ -70,7 +70,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
|
|
.old_blkaddr = index,
|
|
.new_blkaddr = index,
|
|
.encrypted_page = NULL,
|
|
- .is_por = !is_meta,
|
|
+ .is_por = !is_meta ? 1 : 0,
|
|
};
|
|
int err;
|
|
|
|
@@ -234,8 +234,8 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
|
|
.op = REQ_OP_READ,
|
|
.op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
|
|
.encrypted_page = NULL,
|
|
- .in_list = false,
|
|
- .is_por = (type == META_POR),
|
|
+ .in_list = 0,
|
|
+ .is_por = (type == META_POR) ? 1 : 0,
|
|
};
|
|
struct blk_plug plug;
|
|
int err;
|
|
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
|
|
index 967262c37da52..df6dfd7de6d0d 100644
|
|
--- a/fs/f2fs/compress.c
|
|
+++ b/fs/f2fs/compress.c
|
|
@@ -1249,10 +1249,11 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
|
|
.page = NULL,
|
|
.encrypted_page = NULL,
|
|
.compressed_page = NULL,
|
|
- .submitted = false,
|
|
+ .submitted = 0,
|
|
.io_type = io_type,
|
|
.io_wbc = wbc,
|
|
- .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
|
|
+ .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
|
|
+ 1 : 0,
|
|
};
|
|
struct dnode_of_data dn;
|
|
struct node_info ni;
|
|
@@ -1387,8 +1388,6 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
|
|
add_compr_block_stat(inode, cc->valid_nr_cpages);
|
|
|
|
set_inode_flag(cc->inode, FI_APPEND_WRITE);
|
|
- if (cc->cluster_idx == 0)
|
|
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
|
|
|
|
f2fs_put_dnode(&dn);
|
|
if (quota_inode)
|
|
@@ -1436,6 +1435,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
|
|
struct f2fs_sb_info *sbi = bio->bi_private;
|
|
struct compress_io_ctx *cic =
|
|
(struct compress_io_ctx *)page_private(page);
|
|
+ enum count_type type = WB_DATA_TYPE(page,
|
|
+ f2fs_is_compressed_page(page));
|
|
int i;
|
|
|
|
if (unlikely(bio->bi_status))
|
|
@@ -1443,7 +1444,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
|
|
|
|
f2fs_compress_free_page(page);
|
|
|
|
- dec_page_count(sbi, F2FS_WB_DATA);
|
|
+ dec_page_count(sbi, type);
|
|
|
|
if (atomic_dec_return(&cic->pending_pages))
|
|
return;
|
|
@@ -1459,12 +1460,14 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
|
|
}
|
|
|
|
static int f2fs_write_raw_pages(struct compress_ctx *cc,
|
|
- int *submitted,
|
|
+ int *submitted_p,
|
|
struct writeback_control *wbc,
|
|
enum iostat_type io_type)
|
|
{
|
|
struct address_space *mapping = cc->inode->i_mapping;
|
|
- int _submitted, compr_blocks, ret, i;
|
|
+ struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
|
|
+ int submitted, compr_blocks, i;
|
|
+ int ret = 0;
|
|
|
|
compr_blocks = f2fs_compressed_blocks(cc);
|
|
|
|
@@ -1479,6 +1482,10 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
|
|
if (compr_blocks < 0)
|
|
return compr_blocks;
|
|
|
|
+ /* overwrite compressed cluster w/ normal cluster */
|
|
+ if (compr_blocks > 0)
|
|
+ f2fs_lock_op(sbi);
|
|
+
|
|
for (i = 0; i < cc->cluster_size; i++) {
|
|
if (!cc->rpages[i])
|
|
continue;
|
|
@@ -1503,7 +1510,7 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
|
|
if (!clear_page_dirty_for_io(cc->rpages[i]))
|
|
goto continue_unlock;
|
|
|
|
- ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
|
|
+ ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
|
|
NULL, NULL, wbc, io_type,
|
|
compr_blocks, false);
|
|
if (ret) {
|
|
@@ -1511,26 +1518,29 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
|
|
unlock_page(cc->rpages[i]);
|
|
ret = 0;
|
|
} else if (ret == -EAGAIN) {
|
|
+ ret = 0;
|
|
/*
|
|
* for quota file, just redirty left pages to
|
|
* avoid deadlock caused by cluster update race
|
|
* from foreground operation.
|
|
*/
|
|
if (IS_NOQUOTA(cc->inode))
|
|
- return 0;
|
|
- ret = 0;
|
|
+ goto out;
|
|
f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
|
|
goto retry_write;
|
|
}
|
|
- return ret;
|
|
+ goto out;
|
|
}
|
|
|
|
- *submitted += _submitted;
|
|
+ *submitted_p += submitted;
|
|
}
|
|
|
|
- f2fs_balance_fs(F2FS_M_SB(mapping), true);
|
|
+out:
|
|
+ if (compr_blocks > 0)
|
|
+ f2fs_unlock_op(sbi);
|
|
|
|
- return 0;
|
|
+ f2fs_balance_fs(sbi, true);
|
|
+ return ret;
|
|
}
|
|
|
|
int f2fs_write_multi_pages(struct compress_ctx *cc,
|
|
@@ -1833,16 +1843,18 @@ void f2fs_put_page_dic(struct page *page, bool in_task)
|
|
* check whether cluster blocks are contiguous, and add extent cache entry
|
|
* only if cluster blocks are logically and physically contiguous.
|
|
*/
|
|
-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
|
|
+unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
|
|
+ unsigned int ofs_in_node)
|
|
{
|
|
- bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
|
|
+ bool compressed = data_blkaddr(dn->inode, dn->node_page,
|
|
+ ofs_in_node) == COMPRESS_ADDR;
|
|
int i = compressed ? 1 : 0;
|
|
block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
|
- dn->ofs_in_node + i);
|
|
+ ofs_in_node + i);
|
|
|
|
for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
|
|
block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
|
- dn->ofs_in_node + i);
|
|
+ ofs_in_node + i);
|
|
|
|
if (!__is_valid_data_blkaddr(blkaddr))
|
|
break;
|
|
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
|
|
index 8b561af379743..b83b8ac29f430 100644
|
|
--- a/fs/f2fs/data.c
|
|
+++ b/fs/f2fs/data.c
|
|
@@ -50,7 +50,7 @@ void f2fs_destroy_bioset(void)
|
|
bioset_exit(&f2fs_bioset);
|
|
}
|
|
|
|
-static bool __is_cp_guaranteed(struct page *page)
|
|
+bool f2fs_is_cp_guaranteed(struct page *page)
|
|
{
|
|
struct address_space *mapping = page->mapping;
|
|
struct inode *inode;
|
|
@@ -67,8 +67,6 @@ static bool __is_cp_guaranteed(struct page *page)
|
|
S_ISDIR(inode->i_mode))
|
|
return true;
|
|
|
|
- if (f2fs_is_compressed_page(page))
|
|
- return false;
|
|
if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
|
|
page_private_gcing(page))
|
|
return true;
|
|
@@ -327,7 +325,7 @@ static void f2fs_write_end_io(struct bio *bio)
|
|
|
|
bio_for_each_segment_all(bvec, bio, iter_all) {
|
|
struct page *page = bvec->bv_page;
|
|
- enum count_type type = WB_DATA_TYPE(page);
|
|
+ enum count_type type = WB_DATA_TYPE(page, false);
|
|
|
|
if (page_private_dummy(page)) {
|
|
clear_page_private_dummy(page);
|
|
@@ -733,7 +731,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
|
|
wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
|
|
|
|
inc_page_count(fio->sbi, is_read_io(fio->op) ?
|
|
- __read_io_type(page) : WB_DATA_TYPE(fio->page));
|
|
+ __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
|
|
|
|
__submit_bio(fio->sbi, bio, fio->type);
|
|
return 0;
|
|
@@ -941,7 +939,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
|
if (fio->io_wbc)
|
|
wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
|
|
|
|
- inc_page_count(fio->sbi, WB_DATA_TYPE(page));
|
|
+ inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
|
|
|
|
*fio->last_block = fio->new_blkaddr;
|
|
*fio->bio = bio;
|
|
@@ -955,6 +953,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
|
|
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
|
|
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
|
|
struct page *bio_page;
|
|
+ enum count_type type;
|
|
|
|
f2fs_bug_on(sbi, is_read_io(fio->op));
|
|
|
|
@@ -982,9 +981,10 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
|
|
bio_page = fio->page;
|
|
|
|
/* set submitted = true as a return value */
|
|
- fio->submitted = true;
|
|
+ fio->submitted = 1;
|
|
|
|
- inc_page_count(sbi, WB_DATA_TYPE(bio_page));
|
|
+ type = WB_DATA_TYPE(bio_page, fio->compressed_page);
|
|
+ inc_page_count(sbi, type);
|
|
|
|
if (io->bio &&
|
|
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
|
|
@@ -997,8 +997,9 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
|
|
if (F2FS_IO_ALIGNED(sbi) &&
|
|
(fio->type == DATA || fio->type == NODE) &&
|
|
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
|
|
- dec_page_count(sbi, WB_DATA_TYPE(bio_page));
|
|
- fio->retry = true;
|
|
+ dec_page_count(sbi, WB_DATA_TYPE(bio_page,
|
|
+ fio->compressed_page));
|
|
+ fio->retry = 1;
|
|
goto skip;
|
|
}
|
|
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
|
|
@@ -1102,18 +1103,12 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
|
|
return 0;
|
|
}
|
|
|
|
-static void __set_data_blkaddr(struct dnode_of_data *dn)
|
|
+static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
|
|
{
|
|
- struct f2fs_node *rn = F2FS_NODE(dn->node_page);
|
|
- __le32 *addr_array;
|
|
- int base = 0;
|
|
-
|
|
- if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
|
|
- base = get_extra_isize(dn->inode);
|
|
+ __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
|
|
|
|
- /* Get physical address of data block */
|
|
- addr_array = blkaddr_in_node(rn);
|
|
- addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
|
|
+ dn->data_blkaddr = blkaddr;
|
|
+ addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
|
|
}
|
|
|
|
/*
|
|
@@ -1122,18 +1117,17 @@ static void __set_data_blkaddr(struct dnode_of_data *dn)
|
|
* ->node_page
|
|
* update block addresses in the node page
|
|
*/
|
|
-void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
|
|
+void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
|
|
{
|
|
f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
|
|
- __set_data_blkaddr(dn);
|
|
+ __set_data_blkaddr(dn, blkaddr);
|
|
if (set_page_dirty(dn->node_page))
|
|
dn->node_changed = true;
|
|
}
|
|
|
|
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
|
|
{
|
|
- dn->data_blkaddr = blkaddr;
|
|
- f2fs_set_data_blkaddr(dn);
|
|
+ f2fs_set_data_blkaddr(dn, blkaddr);
|
|
f2fs_update_read_extent_cache(dn);
|
|
}
|
|
|
|
@@ -1148,7 +1142,8 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
|
|
|
|
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
|
|
return -EPERM;
|
|
- if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
|
|
+ err = inc_valid_block_count(sbi, dn->inode, &count, true);
|
|
+ if (unlikely(err))
|
|
return err;
|
|
|
|
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
|
|
@@ -1160,8 +1155,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
|
|
block_t blkaddr = f2fs_data_blkaddr(dn);
|
|
|
|
if (blkaddr == NULL_ADDR) {
|
|
- dn->data_blkaddr = NEW_ADDR;
|
|
- __set_data_blkaddr(dn);
|
|
+ __set_data_blkaddr(dn, NEW_ADDR);
|
|
count--;
|
|
}
|
|
}
|
|
@@ -1419,13 +1413,12 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
|
|
return err;
|
|
|
|
dn->data_blkaddr = f2fs_data_blkaddr(dn);
|
|
- if (dn->data_blkaddr != NULL_ADDR)
|
|
- goto alloc;
|
|
-
|
|
- if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
|
|
- return err;
|
|
+ if (dn->data_blkaddr == NULL_ADDR) {
|
|
+ err = inc_valid_block_count(sbi, dn->inode, &count, true);
|
|
+ if (unlikely(err))
|
|
+ return err;
|
|
+ }
|
|
|
|
-alloc:
|
|
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
|
|
old_blkaddr = dn->data_blkaddr;
|
|
f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
|
|
@@ -2739,8 +2732,6 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
|
|
f2fs_outplace_write_data(&dn, fio);
|
|
trace_f2fs_do_write_data_page(page, OPU);
|
|
set_inode_flag(inode, FI_APPEND_WRITE);
|
|
- if (page->index == 0)
|
|
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
|
|
out_writepage:
|
|
f2fs_put_dnode(&dn);
|
|
out:
|
|
@@ -2776,10 +2767,10 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
|
|
.old_blkaddr = NULL_ADDR,
|
|
.page = page,
|
|
.encrypted_page = NULL,
|
|
- .submitted = false,
|
|
+ .submitted = 0,
|
|
.compr_blocks = compr_blocks,
|
|
- .need_lock = LOCK_RETRY,
|
|
- .post_read = f2fs_post_read_required(inode),
|
|
+ .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
|
|
+ .post_read = f2fs_post_read_required(inode) ? 1 : 0,
|
|
.io_type = io_type,
|
|
.io_wbc = wbc,
|
|
.bio = bio,
|
|
@@ -2819,9 +2810,6 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
|
|
|
|
zero_user_segment(page, offset, PAGE_SIZE);
|
|
write:
|
|
- if (f2fs_is_drop_cache(inode))
|
|
- goto out;
|
|
-
|
|
/* Dentry/quota blocks are controlled by checkpoint */
|
|
if (S_ISDIR(inode->i_mode) || quota_inode) {
|
|
/*
|
|
@@ -2858,6 +2846,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
|
|
if (err == -EAGAIN) {
|
|
err = f2fs_do_write_data_page(&fio);
|
|
if (err == -EAGAIN) {
|
|
+ f2fs_bug_on(sbi, compr_blocks);
|
|
fio.need_lock = LOCK_REQ;
|
|
err = f2fs_do_write_data_page(&fio);
|
|
}
|
|
@@ -2902,7 +2891,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
|
|
}
|
|
|
|
if (submitted)
|
|
- *submitted = fio.submitted ? 1 : 0;
|
|
+ *submitted = fio.submitted;
|
|
|
|
return 0;
|
|
|
|
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
|
|
index e5a9498b89c06..5ae1c4aa3ae92 100644
|
|
--- a/fs/f2fs/f2fs.h
|
|
+++ b/fs/f2fs/f2fs.h
|
|
@@ -74,6 +74,11 @@ struct f2fs_fault_info {
|
|
|
|
extern const char *f2fs_fault_name[FAULT_MAX];
|
|
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
|
|
+
|
|
+/* maximum retry count for injected failure */
|
|
+#define DEFAULT_FAILURE_RETRY_COUNT 8
|
|
+#else
|
|
+#define DEFAULT_FAILURE_RETRY_COUNT 1
|
|
#endif
|
|
|
|
/*
|
|
@@ -764,8 +769,6 @@ enum {
|
|
FI_UPDATE_WRITE, /* inode has in-place-update data */
|
|
FI_NEED_IPU, /* used for ipu per file */
|
|
FI_ATOMIC_FILE, /* indicate atomic file */
|
|
- FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
|
|
- FI_DROP_CACHE, /* drop dirty page cache */
|
|
FI_DATA_EXIST, /* indicate data exists */
|
|
FI_INLINE_DOTS, /* indicate inline dot dentries */
|
|
FI_SKIP_WRITES, /* should skip data page writeback */
|
|
@@ -1067,7 +1070,8 @@ struct f2fs_sm_info {
|
|
* f2fs monitors the number of several block types such as on-writeback,
|
|
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
|
|
*/
|
|
-#define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
|
|
+#define WB_DATA_TYPE(p, f) \
|
|
+ (f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
|
|
enum count_type {
|
|
F2FS_DIRTY_DENTS,
|
|
F2FS_DIRTY_DATA,
|
|
@@ -1183,19 +1187,19 @@ struct f2fs_io_info {
|
|
struct page *encrypted_page; /* encrypted page */
|
|
struct page *compressed_page; /* compressed page */
|
|
struct list_head list; /* serialize IOs */
|
|
- bool submitted; /* indicate IO submission */
|
|
- int need_lock; /* indicate we need to lock cp_rwsem */
|
|
- bool in_list; /* indicate fio is in io_list */
|
|
- bool is_por; /* indicate IO is from recovery or not */
|
|
- bool retry; /* need to reallocate block address */
|
|
- int compr_blocks; /* # of compressed block addresses */
|
|
- bool encrypted; /* indicate file is encrypted */
|
|
- bool post_read; /* require post read */
|
|
+ unsigned int compr_blocks; /* # of compressed block addresses */
|
|
+ unsigned int need_lock:8; /* indicate we need to lock cp_rwsem */
|
|
+ unsigned int version:8; /* version of the node */
|
|
+ unsigned int submitted:1; /* indicate IO submission */
|
|
+ unsigned int in_list:1; /* indicate fio is in io_list */
|
|
+ unsigned int is_por:1; /* indicate IO is from recovery or not */
|
|
+ unsigned int retry:1; /* need to reallocate block address */
|
|
+ unsigned int encrypted:1; /* indicate file is encrypted */
|
|
+ unsigned int post_read:1; /* require post read */
|
|
enum iostat_type io_type; /* io type */
|
|
struct writeback_control *io_wbc; /* writeback control */
|
|
struct bio **bio; /* bio for ipu */
|
|
sector_t *last_block; /* last block number in bio */
|
|
- unsigned char version; /* version of the node */
|
|
};
|
|
|
|
struct bio_entry {
|
|
@@ -2287,7 +2291,7 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
|
|
|
|
static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
|
|
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
|
|
- struct inode *inode, blkcnt_t *count)
|
|
+ struct inode *inode, blkcnt_t *count, bool partial)
|
|
{
|
|
blkcnt_t diff = 0, release = 0;
|
|
block_t avail_user_block_count;
|
|
@@ -2328,6 +2332,11 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
|
|
avail_user_block_count = 0;
|
|
}
|
|
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
|
|
+ if (!partial) {
|
|
+ spin_unlock(&sbi->stat_lock);
|
|
+ goto enospc;
|
|
+ }
|
|
+
|
|
diff = sbi->total_valid_block_count - avail_user_block_count;
|
|
if (diff > *count)
|
|
diff = *count;
|
|
@@ -3247,22 +3256,13 @@ static inline bool f2fs_is_cow_file(struct inode *inode)
|
|
return is_inode_flag_set(inode, FI_COW_FILE);
|
|
}
|
|
|
|
-static inline bool f2fs_is_first_block_written(struct inode *inode)
|
|
-{
|
|
- return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
|
|
-}
|
|
-
|
|
-static inline bool f2fs_is_drop_cache(struct inode *inode)
|
|
-{
|
|
- return is_inode_flag_set(inode, FI_DROP_CACHE);
|
|
-}
|
|
-
|
|
+static inline __le32 *get_dnode_addr(struct inode *inode,
|
|
+ struct page *node_page);
|
|
static inline void *inline_data_addr(struct inode *inode, struct page *page)
|
|
{
|
|
- struct f2fs_inode *ri = F2FS_INODE(page);
|
|
- int extra_size = get_extra_isize(inode);
|
|
+ __le32 *addr = get_dnode_addr(inode, page);
|
|
|
|
- return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
|
|
+ return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
|
|
}
|
|
|
|
static inline int f2fs_has_inline_dentry(struct inode *inode)
|
|
@@ -3397,6 +3397,17 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
|
|
return F2FS_I(inode)->i_inline_xattr_size;
|
|
}
|
|
|
|
+static inline __le32 *get_dnode_addr(struct inode *inode,
|
|
+ struct page *node_page)
|
|
+{
|
|
+ int base = 0;
|
|
+
|
|
+ if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
|
|
+ base = get_extra_isize(inode);
|
|
+
|
|
+ return blkaddr_in_node(F2FS_NODE(node_page)) + base;
|
|
+}
|
|
+
|
|
#define f2fs_get_inode_mode(i) \
|
|
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
|
|
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
|
|
@@ -3761,6 +3772,7 @@ void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
|
|
*/
|
|
int __init f2fs_init_bioset(void);
|
|
void f2fs_destroy_bioset(void);
|
|
+bool f2fs_is_cp_guaranteed(struct page *page);
|
|
int f2fs_init_bio_entry_cache(void);
|
|
void f2fs_destroy_bio_entry_cache(void);
|
|
void f2fs_submit_bio(struct f2fs_sb_info *sbi,
|
|
@@ -3779,7 +3791,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio);
|
|
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
|
|
block_t blk_addr, sector_t *sector);
|
|
int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
|
|
-void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
|
|
+void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
|
|
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
|
|
int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
|
|
int f2fs_reserve_new_block(struct dnode_of_data *dn);
|
|
@@ -4246,7 +4258,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
|
|
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
|
|
bool in_task);
|
|
void f2fs_put_page_dic(struct page *page, bool in_task);
|
|
-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
|
|
+unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
|
|
+ unsigned int ofs_in_node);
|
|
int f2fs_init_compress_ctx(struct compress_ctx *cc);
|
|
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
|
|
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
|
|
@@ -4303,7 +4316,8 @@ static inline void f2fs_put_page_dic(struct page *page, bool in_task)
|
|
{
|
|
WARN_ON_ONCE(1);
|
|
}
|
|
-static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
|
|
+static inline unsigned int f2fs_cluster_blocks_are_contiguous(
|
|
+ struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
|
|
static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
|
|
static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
|
|
static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
|
|
@@ -4360,15 +4374,24 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
|
|
{
|
|
struct f2fs_inode_info *fi = F2FS_I(inode);
|
|
|
|
- if (!f2fs_compressed_file(inode))
|
|
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
|
|
+
|
|
+ if (!f2fs_compressed_file(inode)) {
|
|
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
|
|
return true;
|
|
- if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
|
|
+ }
|
|
+ if (f2fs_is_mmap_file(inode) ||
|
|
+ (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
|
|
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
|
|
return false;
|
|
+ }
|
|
|
|
fi->i_flags &= ~F2FS_COMPR_FL;
|
|
stat_dec_compr_inode(inode);
|
|
clear_inode_flag(inode, FI_COMPRESSED_FILE);
|
|
f2fs_mark_inode_dirty_sync(inode, true);
|
|
+
|
|
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
|
|
return true;
|
|
}
|
|
|
|
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
|
|
index 46e4960a9dcf7..2fbc8d89c600b 100644
|
|
--- a/fs/f2fs/file.c
|
|
+++ b/fs/f2fs/file.c
|
|
@@ -560,20 +560,14 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
|
|
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
|
|
- struct f2fs_node *raw_node;
|
|
int nr_free = 0, ofs = dn->ofs_in_node, len = count;
|
|
__le32 *addr;
|
|
- int base = 0;
|
|
bool compressed_cluster = false;
|
|
int cluster_index = 0, valid_blocks = 0;
|
|
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
|
|
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
|
|
|
|
- if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
|
|
- base = get_extra_isize(dn->inode);
|
|
-
|
|
- raw_node = F2FS_NODE(dn->node_page);
|
|
- addr = blkaddr_in_node(raw_node) + base + ofs;
|
|
+ addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
|
|
|
|
/* Assumption: truncateion starts with cluster */
|
|
for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
|
|
@@ -591,8 +585,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
|
|
if (blkaddr == NULL_ADDR)
|
|
continue;
|
|
|
|
- dn->data_blkaddr = NULL_ADDR;
|
|
- f2fs_set_data_blkaddr(dn);
|
|
+ f2fs_set_data_blkaddr(dn, NULL_ADDR);
|
|
|
|
if (__is_valid_data_blkaddr(blkaddr)) {
|
|
if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
|
|
@@ -602,9 +595,6 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
|
|
valid_blocks++;
|
|
}
|
|
|
|
- if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
|
|
- clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
|
|
-
|
|
f2fs_invalidate_blocks(sbi, blkaddr);
|
|
|
|
if (!released || blkaddr != COMPRESS_ADDR)
|
|
@@ -1497,8 +1487,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
|
|
}
|
|
|
|
f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
|
|
- dn->data_blkaddr = NEW_ADDR;
|
|
- f2fs_set_data_blkaddr(dn);
|
|
+ f2fs_set_data_blkaddr(dn, NEW_ADDR);
|
|
}
|
|
|
|
f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
|
|
@@ -3449,8 +3438,7 @@ static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
|
|
if (blkaddr != NEW_ADDR)
|
|
continue;
|
|
|
|
- dn->data_blkaddr = NULL_ADDR;
|
|
- f2fs_set_data_blkaddr(dn);
|
|
+ f2fs_set_data_blkaddr(dn, NULL_ADDR);
|
|
}
|
|
|
|
f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
|
|
@@ -3474,7 +3462,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
|
|
int ret;
|
|
int writecount;
|
|
|
|
- if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
|
|
+ if (!f2fs_sb_has_compression(sbi))
|
|
return -EOPNOTSUPP;
|
|
|
|
if (!f2fs_compressed_file(inode))
|
|
@@ -3487,7 +3475,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- f2fs_balance_fs(F2FS_I_SB(inode), true);
|
|
+ f2fs_balance_fs(sbi, true);
|
|
|
|
inode_lock(inode);
|
|
|
|
@@ -3573,10 +3561,10 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
|
|
return ret;
|
|
}
|
|
|
|
-static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
|
|
+static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
|
|
+ unsigned int *reserved_blocks)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
|
|
- unsigned int reserved_blocks = 0;
|
|
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
|
|
block_t blkaddr;
|
|
int i;
|
|
@@ -3599,41 +3587,53 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
|
|
blkcnt_t reserved;
|
|
int ret;
|
|
|
|
- for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
|
|
- blkaddr = f2fs_data_blkaddr(dn);
|
|
+ for (i = 0; i < cluster_size; i++) {
|
|
+ blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
|
+ dn->ofs_in_node + i);
|
|
|
|
if (i == 0) {
|
|
- if (blkaddr == COMPRESS_ADDR)
|
|
- continue;
|
|
- dn->ofs_in_node += cluster_size;
|
|
- goto next;
|
|
+ if (blkaddr != COMPRESS_ADDR) {
|
|
+ dn->ofs_in_node += cluster_size;
|
|
+ goto next;
|
|
+ }
|
|
+ continue;
|
|
}
|
|
|
|
- if (__is_valid_data_blkaddr(blkaddr)) {
|
|
+ /*
|
|
+ * compressed cluster was not released due to it
|
|
+ * fails in release_compress_blocks(), so NEW_ADDR
|
|
+ * is a possible case.
|
|
+ */
|
|
+ if (blkaddr == NEW_ADDR ||
|
|
+ __is_valid_data_blkaddr(blkaddr)) {
|
|
compr_blocks++;
|
|
continue;
|
|
}
|
|
-
|
|
- dn->data_blkaddr = NEW_ADDR;
|
|
- f2fs_set_data_blkaddr(dn);
|
|
}
|
|
|
|
reserved = cluster_size - compr_blocks;
|
|
- ret = inc_valid_block_count(sbi, dn->inode, &reserved);
|
|
- if (ret)
|
|
+
|
|
+ /* for the case all blocks in cluster were reserved */
|
|
+ if (reserved == 1)
|
|
+ goto next;
|
|
+
|
|
+ ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
|
|
+ if (unlikely(ret))
|
|
return ret;
|
|
|
|
- if (reserved != cluster_size - compr_blocks)
|
|
- return -ENOSPC;
|
|
+ for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
|
|
+ if (f2fs_data_blkaddr(dn) == NULL_ADDR)
|
|
+ f2fs_set_data_blkaddr(dn, NEW_ADDR);
|
|
+ }
|
|
|
|
f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
|
|
|
|
- reserved_blocks += reserved;
|
|
+ *reserved_blocks += reserved;
|
|
next:
|
|
count -= cluster_size;
|
|
}
|
|
|
|
- return reserved_blocks;
|
|
+ return 0;
|
|
}
|
|
|
|
static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
|
@@ -3644,7 +3644,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
|
unsigned int reserved_blocks = 0;
|
|
int ret;
|
|
|
|
- if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
|
|
+ if (!f2fs_sb_has_compression(sbi))
|
|
return -EOPNOTSUPP;
|
|
|
|
if (!f2fs_compressed_file(inode))
|
|
@@ -3657,10 +3657,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
|
|
- goto out;
|
|
-
|
|
- f2fs_balance_fs(F2FS_I_SB(inode), true);
|
|
+ f2fs_balance_fs(sbi, true);
|
|
|
|
inode_lock(inode);
|
|
|
|
@@ -3669,6 +3666,9 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
|
goto unlock_inode;
|
|
}
|
|
|
|
+ if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
|
|
+ goto unlock_inode;
|
|
+
|
|
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
|
filemap_invalidate_lock(inode->i_mapping);
|
|
|
|
@@ -3694,7 +3694,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
|
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
|
|
count = round_up(count, F2FS_I(inode)->i_cluster_size);
|
|
|
|
- ret = reserve_compress_blocks(&dn, count);
|
|
+ ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
|
|
|
|
f2fs_put_dnode(&dn);
|
|
|
|
@@ -3702,23 +3702,21 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
|
break;
|
|
|
|
page_idx += count;
|
|
- reserved_blocks += ret;
|
|
}
|
|
|
|
filemap_invalidate_unlock(inode->i_mapping);
|
|
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
|
|
|
- if (ret >= 0) {
|
|
+ if (!ret) {
|
|
clear_inode_flag(inode, FI_COMPRESS_RELEASED);
|
|
inode->i_ctime = current_time(inode);
|
|
f2fs_mark_inode_dirty_sync(inode, true);
|
|
}
|
|
unlock_inode:
|
|
inode_unlock(inode);
|
|
-out:
|
|
mnt_drop_write_file(filp);
|
|
|
|
- if (ret >= 0) {
|
|
+ if (!ret) {
|
|
ret = put_user(reserved_blocks, (u64 __user *)arg);
|
|
} else if (reserved_blocks &&
|
|
atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
|
|
@@ -3967,16 +3965,20 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
|
|
sizeof(option)))
|
|
return -EFAULT;
|
|
|
|
- if (!f2fs_compressed_file(inode) ||
|
|
- option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
|
|
- option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
|
|
- option.algorithm >= COMPRESS_MAX)
|
|
+ if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
|
|
+ option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
|
|
+ option.algorithm >= COMPRESS_MAX)
|
|
return -EINVAL;
|
|
|
|
file_start_write(filp);
|
|
inode_lock(inode);
|
|
|
|
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
|
+ if (!f2fs_compressed_file(inode)) {
|
|
+ ret = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
|
|
ret = -EBUSY;
|
|
goto out;
|
|
@@ -4066,7 +4068,7 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
|
|
if (!f2fs_compressed_file(inode))
|
|
return -EINVAL;
|
|
|
|
- f2fs_balance_fs(F2FS_I_SB(inode), true);
|
|
+ f2fs_balance_fs(sbi, true);
|
|
|
|
file_start_write(filp);
|
|
inode_lock(inode);
|
|
@@ -4138,7 +4140,7 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
|
|
if (!f2fs_compressed_file(inode))
|
|
return -EINVAL;
|
|
|
|
- f2fs_balance_fs(F2FS_I_SB(inode), true);
|
|
+ f2fs_balance_fs(sbi, true);
|
|
|
|
file_start_write(filp);
|
|
inode_lock(inode);
|
|
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
|
|
index ec7212f7a9b73..d4662ccb94c8f 100644
|
|
--- a/fs/f2fs/gc.c
|
|
+++ b/fs/f2fs/gc.c
|
|
@@ -1187,8 +1187,8 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
|
|
.op = REQ_OP_READ,
|
|
.op_flags = 0,
|
|
.encrypted_page = NULL,
|
|
- .in_list = false,
|
|
- .retry = false,
|
|
+ .in_list = 0,
|
|
+ .retry = 0,
|
|
};
|
|
int err;
|
|
|
|
@@ -1276,8 +1276,8 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
|
.op = REQ_OP_READ,
|
|
.op_flags = 0,
|
|
.encrypted_page = NULL,
|
|
- .in_list = false,
|
|
- .retry = false,
|
|
+ .in_list = 0,
|
|
+ .retry = 0,
|
|
};
|
|
struct dnode_of_data dn;
|
|
struct f2fs_summary sum;
|
|
@@ -1410,8 +1410,6 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
|
|
|
f2fs_update_data_blkaddr(&dn, newaddr);
|
|
set_inode_flag(inode, FI_APPEND_WRITE);
|
|
- if (page->index == 0)
|
|
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
|
|
put_page_out:
|
|
f2fs_put_page(fio.encrypted_page, 1);
|
|
recover_block:
|
|
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
|
|
index 0010579f17368..869bb6ec107cc 100644
|
|
--- a/fs/f2fs/inode.c
|
|
+++ b/fs/f2fs/inode.c
|
|
@@ -59,49 +59,31 @@ void f2fs_set_inode_flags(struct inode *inode)
|
|
S_ENCRYPTED|S_VERITY|S_CASEFOLD);
|
|
}
|
|
|
|
-static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
|
|
+static void __get_inode_rdev(struct inode *inode, struct page *node_page)
|
|
{
|
|
- int extra_size = get_extra_isize(inode);
|
|
+ __le32 *addr = get_dnode_addr(inode, node_page);
|
|
|
|
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
|
|
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
|
|
- if (ri->i_addr[extra_size])
|
|
- inode->i_rdev = old_decode_dev(
|
|
- le32_to_cpu(ri->i_addr[extra_size]));
|
|
+ if (addr[0])
|
|
+ inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0]));
|
|
else
|
|
- inode->i_rdev = new_decode_dev(
|
|
- le32_to_cpu(ri->i_addr[extra_size + 1]));
|
|
+ inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1]));
|
|
}
|
|
}
|
|
|
|
-static int __written_first_block(struct f2fs_sb_info *sbi,
|
|
- struct f2fs_inode *ri)
|
|
+static void __set_inode_rdev(struct inode *inode, struct page *node_page)
|
|
{
|
|
- block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
|
|
-
|
|
- if (!__is_valid_data_blkaddr(addr))
|
|
- return 1;
|
|
- if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) {
|
|
- f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
|
- return -EFSCORRUPTED;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
|
|
-{
|
|
- int extra_size = get_extra_isize(inode);
|
|
+ __le32 *addr = get_dnode_addr(inode, node_page);
|
|
|
|
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
|
|
if (old_valid_dev(inode->i_rdev)) {
|
|
- ri->i_addr[extra_size] =
|
|
- cpu_to_le32(old_encode_dev(inode->i_rdev));
|
|
- ri->i_addr[extra_size + 1] = 0;
|
|
+ addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
|
|
+ addr[1] = 0;
|
|
} else {
|
|
- ri->i_addr[extra_size] = 0;
|
|
- ri->i_addr[extra_size + 1] =
|
|
- cpu_to_le32(new_encode_dev(inode->i_rdev));
|
|
- ri->i_addr[extra_size + 2] = 0;
|
|
+ addr[0] = 0;
|
|
+ addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
|
|
+ addr[2] = 0;
|
|
}
|
|
}
|
|
}
|
|
@@ -336,7 +318,6 @@ static int do_read_inode(struct inode *inode)
|
|
struct page *node_page;
|
|
struct f2fs_inode *ri;
|
|
projid_t i_projid;
|
|
- int err;
|
|
|
|
/* Check if ino is within scope */
|
|
if (f2fs_check_nid_range(sbi, inode->i_ino))
|
|
@@ -415,17 +396,7 @@ static int do_read_inode(struct inode *inode)
|
|
}
|
|
|
|
/* get rdev by using inline_info */
|
|
- __get_inode_rdev(inode, ri);
|
|
-
|
|
- if (S_ISREG(inode->i_mode)) {
|
|
- err = __written_first_block(sbi, ri);
|
|
- if (err < 0) {
|
|
- f2fs_put_page(node_page, 1);
|
|
- return err;
|
|
- }
|
|
- if (!err)
|
|
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
|
|
- }
|
|
+ __get_inode_rdev(inode, node_page);
|
|
|
|
if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
|
|
fi->last_disk_size = inode->i_size;
|
|
@@ -697,7 +668,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
|
|
}
|
|
}
|
|
|
|
- __set_inode_rdev(inode, ri);
|
|
+ __set_inode_rdev(inode, node_page);
|
|
|
|
/* deleted inode */
|
|
if (inode->i_nlink == 0)
|
|
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
|
|
index c6d0e07096326..fcf22a50ff5db 100644
|
|
--- a/fs/f2fs/node.c
|
|
+++ b/fs/f2fs/node.c
|
|
@@ -850,21 +850,29 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
|
|
|
|
if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
|
|
f2fs_sb_has_readonly(sbi)) {
|
|
- unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
|
|
+ unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
|
|
+ unsigned int ofs_in_node = dn->ofs_in_node;
|
|
+ pgoff_t fofs = index;
|
|
+ unsigned int c_len;
|
|
block_t blkaddr;
|
|
|
|
+ /* should align fofs and ofs_in_node to cluster_size */
|
|
+ if (fofs % cluster_size) {
|
|
+ fofs = round_down(fofs, cluster_size);
|
|
+ ofs_in_node = round_down(ofs_in_node, cluster_size);
|
|
+ }
|
|
+
|
|
+ c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
|
|
if (!c_len)
|
|
goto out;
|
|
|
|
- blkaddr = f2fs_data_blkaddr(dn);
|
|
+ blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
|
|
if (blkaddr == COMPRESS_ADDR)
|
|
blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
|
- dn->ofs_in_node + 1);
|
|
+ ofs_in_node + 1);
|
|
|
|
f2fs_update_read_extent_tree_range_compressed(dn->inode,
|
|
- index, blkaddr,
|
|
- F2FS_I(dn->inode)->i_cluster_size,
|
|
- c_len);
|
|
+ fofs, blkaddr, cluster_size, c_len);
|
|
}
|
|
out:
|
|
return 0;
|
|
@@ -1587,7 +1595,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
|
|
.op_flags = wbc_to_write_flags(wbc),
|
|
.page = page,
|
|
.encrypted_page = NULL,
|
|
- .submitted = false,
|
|
+ .submitted = 0,
|
|
.io_type = io_type,
|
|
.io_wbc = wbc,
|
|
};
|
|
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
|
|
index 53a6487f91e44..f5efc37a2b513 100644
|
|
--- a/fs/f2fs/recovery.c
|
|
+++ b/fs/f2fs/recovery.c
|
|
@@ -582,6 +582,19 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
|
|
return 0;
|
|
}
|
|
|
|
+static int f2fs_reserve_new_block_retry(struct dnode_of_data *dn)
|
|
+{
|
|
+ int i, err = 0;
|
|
+
|
|
+ for (i = DEFAULT_FAILURE_RETRY_COUNT; i > 0; i--) {
|
|
+ err = f2fs_reserve_new_block(dn);
|
|
+ if (!err)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
|
|
struct page *page)
|
|
{
|
|
@@ -683,14 +696,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
|
|
*/
|
|
if (dest == NEW_ADDR) {
|
|
f2fs_truncate_data_blocks_range(&dn, 1);
|
|
- do {
|
|
- err = f2fs_reserve_new_block(&dn);
|
|
- if (err == -ENOSPC) {
|
|
- f2fs_bug_on(sbi, 1);
|
|
- break;
|
|
- }
|
|
- } while (err &&
|
|
- IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
|
|
+
|
|
+ err = f2fs_reserve_new_block_retry(&dn);
|
|
if (err)
|
|
goto err;
|
|
continue;
|
|
@@ -698,16 +705,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
|
|
|
|
/* dest is valid block, try to recover from src to dest */
|
|
if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
|
|
-
|
|
if (src == NULL_ADDR) {
|
|
- do {
|
|
- err = f2fs_reserve_new_block(&dn);
|
|
- if (err == -ENOSPC) {
|
|
- f2fs_bug_on(sbi, 1);
|
|
- break;
|
|
- }
|
|
- } while (err &&
|
|
- IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
|
|
+ err = f2fs_reserve_new_block_retry(&dn);
|
|
if (err)
|
|
goto err;
|
|
}
|
|
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
|
|
index 16bf9d5c8d4f9..aa1ba2fdfe00d 100644
|
|
--- a/fs/f2fs/segment.c
|
|
+++ b/fs/f2fs/segment.c
|
|
@@ -247,7 +247,7 @@ static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
|
|
} else {
|
|
blkcnt_t count = 1;
|
|
|
|
- err = inc_valid_block_count(sbi, inode, &count);
|
|
+ err = inc_valid_block_count(sbi, inode, &count, true);
|
|
if (err) {
|
|
f2fs_put_dnode(&dn);
|
|
return err;
|
|
@@ -3312,10 +3312,10 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
|
struct f2fs_bio_info *io;
|
|
|
|
if (F2FS_IO_ALIGNED(sbi))
|
|
- fio->retry = false;
|
|
+ fio->retry = 0;
|
|
|
|
INIT_LIST_HEAD(&fio->list);
|
|
- fio->in_list = true;
|
|
+ fio->in_list = 1;
|
|
io = sbi->write_io[fio->type] + fio->temp;
|
|
spin_lock(&io->io_lock);
|
|
list_add_tail(&fio->list, &io->io_list);
|
|
@@ -3396,7 +3396,7 @@ void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
|
|
.new_blkaddr = page->index,
|
|
.page = page,
|
|
.encrypted_page = NULL,
|
|
- .in_list = false,
|
|
+ .in_list = 0,
|
|
};
|
|
|
|
if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
|
|
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
|
|
index f3951e8ad3948..aa9ad85e0901d 100644
|
|
--- a/fs/f2fs/segment.h
|
|
+++ b/fs/f2fs/segment.h
|
|
@@ -586,23 +586,22 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
|
|
unsigned int node_blocks, unsigned int dent_blocks)
|
|
{
|
|
|
|
- unsigned int segno, left_blocks;
|
|
+ unsigned segno, left_blocks;
|
|
int i;
|
|
|
|
- /* check current node segment */
|
|
+ /* check current node sections in the worst case. */
|
|
for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
|
|
segno = CURSEG_I(sbi, i)->segno;
|
|
- left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
|
|
- get_seg_entry(sbi, segno)->ckpt_valid_blocks;
|
|
-
|
|
+ left_blocks = CAP_BLKS_PER_SEC(sbi) -
|
|
+ get_ckpt_valid_blocks(sbi, segno, true);
|
|
if (node_blocks > left_blocks)
|
|
return false;
|
|
}
|
|
|
|
- /* check current data segment */
|
|
+ /* check current data section for dentry blocks. */
|
|
segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
|
|
- left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
|
|
- get_seg_entry(sbi, segno)->ckpt_valid_blocks;
|
|
+ left_blocks = CAP_BLKS_PER_SEC(sbi) -
|
|
+ get_ckpt_valid_blocks(sbi, segno, true);
|
|
if (dent_blocks > left_blocks)
|
|
return false;
|
|
return true;
|
|
@@ -651,7 +650,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
|
|
|
|
if (free_secs > upper_secs)
|
|
return false;
|
|
- else if (free_secs <= lower_secs)
|
|
+ if (free_secs <= lower_secs)
|
|
return true;
|
|
return !curseg_space;
|
|
}
|
|
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
|
|
index 0c0d0671febea..c529ce5d986cc 100644
|
|
--- a/fs/f2fs/super.c
|
|
+++ b/fs/f2fs/super.c
|
|
@@ -649,7 +649,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
|
|
#ifdef CONFIG_F2FS_FS_ZSTD
|
|
static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
|
|
{
|
|
- unsigned int level;
|
|
+ int level;
|
|
int len = 4;
|
|
|
|
if (strlen(str) == len) {
|
|
@@ -663,9 +663,15 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
|
|
f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
|
|
return -EINVAL;
|
|
}
|
|
- if (kstrtouint(str + 1, 10, &level))
|
|
+ if (kstrtoint(str + 1, 10, &level))
|
|
return -EINVAL;
|
|
|
|
+ /* f2fs does not support negative compress level now */
|
|
+ if (level < 0) {
|
|
+ f2fs_info(sbi, "do not support negative compress level: %d", level);
|
|
+ return -ERANGE;
|
|
+ }
|
|
+
|
|
if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
|
|
f2fs_info(sbi, "invalid zstd compress level: %d", level);
|
|
return -EINVAL;
|
|
diff --git a/fs/fcntl.c b/fs/fcntl.c
|
|
index 146c9ab0cd4b7..0964e5dbf0cac 100644
|
|
--- a/fs/fcntl.c
|
|
+++ b/fs/fcntl.c
|
|
@@ -267,7 +267,7 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
|
|
}
|
|
#endif
|
|
|
|
-static bool rw_hint_valid(enum rw_hint hint)
|
|
+static bool rw_hint_valid(u64 hint)
|
|
{
|
|
switch (hint) {
|
|
case RWH_WRITE_LIFE_NOT_SET:
|
|
@@ -287,19 +287,17 @@ static long fcntl_rw_hint(struct file *file, unsigned int cmd,
|
|
{
|
|
struct inode *inode = file_inode(file);
|
|
u64 __user *argp = (u64 __user *)arg;
|
|
- enum rw_hint hint;
|
|
- u64 h;
|
|
+ u64 hint;
|
|
|
|
switch (cmd) {
|
|
case F_GET_RW_HINT:
|
|
- h = inode->i_write_hint;
|
|
- if (copy_to_user(argp, &h, sizeof(*argp)))
|
|
+ hint = inode->i_write_hint;
|
|
+ if (copy_to_user(argp, &hint, sizeof(*argp)))
|
|
return -EFAULT;
|
|
return 0;
|
|
case F_SET_RW_HINT:
|
|
- if (copy_from_user(&h, argp, sizeof(h)))
|
|
+ if (copy_from_user(&hint, argp, sizeof(hint)))
|
|
return -EFAULT;
|
|
- hint = (enum rw_hint) h;
|
|
if (!rw_hint_valid(hint))
|
|
return -EINVAL;
|
|
|
|
diff --git a/fs/fhandle.c b/fs/fhandle.c
|
|
index f2bc27d1975e1..a8c25557c8c12 100644
|
|
--- a/fs/fhandle.c
|
|
+++ b/fs/fhandle.c
|
|
@@ -37,7 +37,7 @@ static long do_sys_name_to_handle(const struct path *path,
|
|
if (f_handle.handle_bytes > MAX_HANDLE_SZ)
|
|
return -EINVAL;
|
|
|
|
- handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
|
|
+ handle = kzalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
|
|
GFP_KERNEL);
|
|
if (!handle)
|
|
return -ENOMEM;
|
|
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
|
|
index 01596f2d0a1ed..9fe9586a51b71 100644
|
|
--- a/fs/nfs/export.c
|
|
+++ b/fs/nfs/export.c
|
|
@@ -156,7 +156,10 @@ const struct export_operations nfs_export_ops = {
|
|
.fh_to_dentry = nfs_fh_to_dentry,
|
|
.get_parent = nfs_get_parent,
|
|
.fetch_iversion = nfs_fetch_iversion,
|
|
- .flags = EXPORT_OP_NOWCC|EXPORT_OP_NOSUBTREECHK|
|
|
- EXPORT_OP_CLOSE_BEFORE_UNLINK|EXPORT_OP_REMOTE_FS|
|
|
- EXPORT_OP_NOATOMIC_ATTR,
|
|
+ .flags = EXPORT_OP_NOWCC |
|
|
+ EXPORT_OP_NOSUBTREECHK |
|
|
+ EXPORT_OP_CLOSE_BEFORE_UNLINK |
|
|
+ EXPORT_OP_REMOTE_FS |
|
|
+ EXPORT_OP_NOATOMIC_ATTR |
|
|
+ EXPORT_OP_FLUSH_ON_CLOSE,
|
|
};
|
|
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
index 81bbafab18a99..4376881be7918 100644
|
|
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
@@ -2016,7 +2016,7 @@ static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
|
|
for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
|
|
mirror = flseg->mirror_array[idx];
|
|
mirror_ds = mirror->mirror_ds;
|
|
- if (!mirror_ds)
|
|
+ if (IS_ERR_OR_NULL(mirror_ds))
|
|
continue;
|
|
ds = mirror->mirror_ds->ds;
|
|
if (!ds)
|
|
diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
|
|
index b59876b01a1e3..0282d93c8bccb 100644
|
|
--- a/fs/nfs/nfs42.h
|
|
+++ b/fs/nfs/nfs42.h
|
|
@@ -55,11 +55,14 @@ int nfs42_proc_removexattr(struct inode *inode, const char *name);
|
|
* They would be 7 bytes long in the eventual buffer ("user.x\0"), and
|
|
* 8 bytes long XDR-encoded.
|
|
*
|
|
- * Include the trailing eof word as well.
|
|
+ * Include the trailing eof word as well and make the result a multiple
|
|
+ * of 4 bytes.
|
|
*/
|
|
static inline u32 nfs42_listxattr_xdrsize(u32 buflen)
|
|
{
|
|
- return ((buflen / (XATTR_USER_PREFIX_LEN + 2)) * 8) + 4;
|
|
+ u32 size = 8 * buflen / (XATTR_USER_PREFIX_LEN + 2) + 4;
|
|
+
|
|
+ return (size + 3) & ~3;
|
|
}
|
|
#endif /* CONFIG_NFS_V4_2 */
|
|
#endif /* __LINUX_FS_NFS_NFS4_2_H */
|
|
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
|
|
index ec3f0103e1a7f..7cc74f7451d67 100644
|
|
--- a/fs/nfs/nfs4proc.c
|
|
+++ b/fs/nfs/nfs4proc.c
|
|
@@ -10592,29 +10592,33 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
|
|
static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
|
|
{
|
|
ssize_t error, error2, error3;
|
|
+ size_t left = size;
|
|
|
|
- error = generic_listxattr(dentry, list, size);
|
|
+ error = generic_listxattr(dentry, list, left);
|
|
if (error < 0)
|
|
return error;
|
|
if (list) {
|
|
list += error;
|
|
- size -= error;
|
|
+ left -= error;
|
|
}
|
|
|
|
- error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
|
|
+ error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left);
|
|
if (error2 < 0)
|
|
return error2;
|
|
|
|
if (list) {
|
|
list += error2;
|
|
- size -= error2;
|
|
+ left -= error2;
|
|
}
|
|
|
|
- error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
|
|
+ error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
|
|
if (error3 < 0)
|
|
return error3;
|
|
|
|
- return error + error2 + error3;
|
|
+ error += error2 + error3;
|
|
+ if (size && error > size)
|
|
+ return -ERANGE;
|
|
+ return error;
|
|
}
|
|
|
|
static void nfs4_enable_swap(struct inode *inode)
|
|
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
|
|
index 620329b7e6aeb..0b1c1d2e076c1 100644
|
|
--- a/fs/nfs/nfsroot.c
|
|
+++ b/fs/nfs/nfsroot.c
|
|
@@ -175,10 +175,10 @@ static int __init root_nfs_cat(char *dest, const char *src,
|
|
size_t len = strlen(dest);
|
|
|
|
if (len && dest[len - 1] != ',')
|
|
- if (strlcat(dest, ",", destlen) > destlen)
|
|
+ if (strlcat(dest, ",", destlen) >= destlen)
|
|
return -1;
|
|
|
|
- if (strlcat(dest, src, destlen) > destlen)
|
|
+ if (strlcat(dest, src, destlen) >= destlen)
|
|
return -1;
|
|
return 0;
|
|
}
|
|
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
|
|
index 697acf5c3c681..ee9c923192e08 100644
|
|
--- a/fs/nfsd/filecache.c
|
|
+++ b/fs/nfsd/filecache.c
|
|
@@ -74,70 +74,9 @@ static struct list_lru nfsd_file_lru;
|
|
static unsigned long nfsd_file_flags;
|
|
static struct fsnotify_group *nfsd_file_fsnotify_group;
|
|
static struct delayed_work nfsd_filecache_laundrette;
|
|
-static struct rhashtable nfsd_file_rhash_tbl
|
|
+static struct rhltable nfsd_file_rhltable
|
|
____cacheline_aligned_in_smp;
|
|
|
|
-enum nfsd_file_lookup_type {
|
|
- NFSD_FILE_KEY_INODE,
|
|
- NFSD_FILE_KEY_FULL,
|
|
-};
|
|
-
|
|
-struct nfsd_file_lookup_key {
|
|
- struct inode *inode;
|
|
- struct net *net;
|
|
- const struct cred *cred;
|
|
- unsigned char need;
|
|
- bool gc;
|
|
- enum nfsd_file_lookup_type type;
|
|
-};
|
|
-
|
|
-/*
|
|
- * The returned hash value is based solely on the address of an in-code
|
|
- * inode, a pointer to a slab-allocated object. The entropy in such a
|
|
- * pointer is concentrated in its middle bits.
|
|
- */
|
|
-static u32 nfsd_file_inode_hash(const struct inode *inode, u32 seed)
|
|
-{
|
|
- unsigned long ptr = (unsigned long)inode;
|
|
- u32 k;
|
|
-
|
|
- k = ptr >> L1_CACHE_SHIFT;
|
|
- k &= 0x00ffffff;
|
|
- return jhash2(&k, 1, seed);
|
|
-}
|
|
-
|
|
-/**
|
|
- * nfsd_file_key_hashfn - Compute the hash value of a lookup key
|
|
- * @data: key on which to compute the hash value
|
|
- * @len: rhash table's key_len parameter (unused)
|
|
- * @seed: rhash table's random seed of the day
|
|
- *
|
|
- * Return value:
|
|
- * Computed 32-bit hash value
|
|
- */
|
|
-static u32 nfsd_file_key_hashfn(const void *data, u32 len, u32 seed)
|
|
-{
|
|
- const struct nfsd_file_lookup_key *key = data;
|
|
-
|
|
- return nfsd_file_inode_hash(key->inode, seed);
|
|
-}
|
|
-
|
|
-/**
|
|
- * nfsd_file_obj_hashfn - Compute the hash value of an nfsd_file
|
|
- * @data: object on which to compute the hash value
|
|
- * @len: rhash table's key_len parameter (unused)
|
|
- * @seed: rhash table's random seed of the day
|
|
- *
|
|
- * Return value:
|
|
- * Computed 32-bit hash value
|
|
- */
|
|
-static u32 nfsd_file_obj_hashfn(const void *data, u32 len, u32 seed)
|
|
-{
|
|
- const struct nfsd_file *nf = data;
|
|
-
|
|
- return nfsd_file_inode_hash(nf->nf_inode, seed);
|
|
-}
|
|
-
|
|
static bool
|
|
nfsd_match_cred(const struct cred *c1, const struct cred *c2)
|
|
{
|
|
@@ -158,53 +97,16 @@ nfsd_match_cred(const struct cred *c1, const struct cred *c2)
|
|
return true;
|
|
}
|
|
|
|
-/**
|
|
- * nfsd_file_obj_cmpfn - Match a cache item against search criteria
|
|
- * @arg: search criteria
|
|
- * @ptr: cache item to check
|
|
- *
|
|
- * Return values:
|
|
- * %0 - Item matches search criteria
|
|
- * %1 - Item does not match search criteria
|
|
- */
|
|
-static int nfsd_file_obj_cmpfn(struct rhashtable_compare_arg *arg,
|
|
- const void *ptr)
|
|
-{
|
|
- const struct nfsd_file_lookup_key *key = arg->key;
|
|
- const struct nfsd_file *nf = ptr;
|
|
-
|
|
- switch (key->type) {
|
|
- case NFSD_FILE_KEY_INODE:
|
|
- if (nf->nf_inode != key->inode)
|
|
- return 1;
|
|
- break;
|
|
- case NFSD_FILE_KEY_FULL:
|
|
- if (nf->nf_inode != key->inode)
|
|
- return 1;
|
|
- if (nf->nf_may != key->need)
|
|
- return 1;
|
|
- if (nf->nf_net != key->net)
|
|
- return 1;
|
|
- if (!nfsd_match_cred(nf->nf_cred, key->cred))
|
|
- return 1;
|
|
- if (!!test_bit(NFSD_FILE_GC, &nf->nf_flags) != key->gc)
|
|
- return 1;
|
|
- if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0)
|
|
- return 1;
|
|
- break;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-
|
|
static const struct rhashtable_params nfsd_file_rhash_params = {
|
|
.key_len = sizeof_field(struct nfsd_file, nf_inode),
|
|
.key_offset = offsetof(struct nfsd_file, nf_inode),
|
|
- .head_offset = offsetof(struct nfsd_file, nf_rhash),
|
|
- .hashfn = nfsd_file_key_hashfn,
|
|
- .obj_hashfn = nfsd_file_obj_hashfn,
|
|
- .obj_cmpfn = nfsd_file_obj_cmpfn,
|
|
- /* Reduce resizing churn on light workloads */
|
|
- .min_size = 512, /* buckets */
|
|
+ .head_offset = offsetof(struct nfsd_file, nf_rlist),
|
|
+
|
|
+ /*
|
|
+ * Start with a single page hash table to reduce resizing churn
|
|
+ * on light workloads.
|
|
+ */
|
|
+ .min_size = 256,
|
|
.automatic_shrinking = true,
|
|
};
|
|
|
|
@@ -307,27 +209,27 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf, struct inode *inode)
|
|
}
|
|
|
|
static struct nfsd_file *
|
|
-nfsd_file_alloc(struct nfsd_file_lookup_key *key, unsigned int may)
|
|
+nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need,
|
|
+ bool want_gc)
|
|
{
|
|
struct nfsd_file *nf;
|
|
|
|
nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
|
|
- if (nf) {
|
|
- INIT_LIST_HEAD(&nf->nf_lru);
|
|
- nf->nf_birthtime = ktime_get();
|
|
- nf->nf_file = NULL;
|
|
- nf->nf_cred = get_current_cred();
|
|
- nf->nf_net = key->net;
|
|
- nf->nf_flags = 0;
|
|
- __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
|
|
- __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
|
|
- if (key->gc)
|
|
- __set_bit(NFSD_FILE_GC, &nf->nf_flags);
|
|
- nf->nf_inode = key->inode;
|
|
- refcount_set(&nf->nf_ref, 1);
|
|
- nf->nf_may = key->need;
|
|
- nf->nf_mark = NULL;
|
|
- }
|
|
+ if (unlikely(!nf))
|
|
+ return NULL;
|
|
+
|
|
+ INIT_LIST_HEAD(&nf->nf_lru);
|
|
+ nf->nf_birthtime = ktime_get();
|
|
+ nf->nf_file = NULL;
|
|
+ nf->nf_cred = get_current_cred();
|
|
+ nf->nf_net = net;
|
|
+ nf->nf_flags = want_gc ?
|
|
+ BIT(NFSD_FILE_HASHED) | BIT(NFSD_FILE_PENDING) | BIT(NFSD_FILE_GC) :
|
|
+ BIT(NFSD_FILE_HASHED) | BIT(NFSD_FILE_PENDING);
|
|
+ nf->nf_inode = inode;
|
|
+ refcount_set(&nf->nf_ref, 1);
|
|
+ nf->nf_may = need;
|
|
+ nf->nf_mark = NULL;
|
|
return nf;
|
|
}
|
|
|
|
@@ -352,8 +254,8 @@ static void
|
|
nfsd_file_hash_remove(struct nfsd_file *nf)
|
|
{
|
|
trace_nfsd_file_unhash(nf);
|
|
- rhashtable_remove_fast(&nfsd_file_rhash_tbl, &nf->nf_rhash,
|
|
- nfsd_file_rhash_params);
|
|
+ rhltable_remove(&nfsd_file_rhltable, &nf->nf_rlist,
|
|
+ nfsd_file_rhash_params);
|
|
}
|
|
|
|
static bool
|
|
@@ -380,10 +282,8 @@ nfsd_file_free(struct nfsd_file *nf)
|
|
if (nf->nf_mark)
|
|
nfsd_file_mark_put(nf->nf_mark);
|
|
if (nf->nf_file) {
|
|
- get_file(nf->nf_file);
|
|
- filp_close(nf->nf_file, NULL);
|
|
nfsd_file_check_write_error(nf);
|
|
- fput(nf->nf_file);
|
|
+ filp_close(nf->nf_file, NULL);
|
|
}
|
|
|
|
/*
|
|
@@ -402,13 +302,23 @@ nfsd_file_check_writeback(struct nfsd_file *nf)
|
|
struct file *file = nf->nf_file;
|
|
struct address_space *mapping;
|
|
|
|
- if (!file || !(file->f_mode & FMODE_WRITE))
|
|
+ /* File not open for write? */
|
|
+ if (!(file->f_mode & FMODE_WRITE))
|
|
+ return false;
|
|
+
|
|
+ /*
|
|
+ * Some filesystems (e.g. NFS) flush all dirty data on close.
|
|
+ * On others, there is no need to wait for writeback.
|
|
+ */
|
|
+ if (!(file_inode(file)->i_sb->s_export_op->flags & EXPORT_OP_FLUSH_ON_CLOSE))
|
|
return false;
|
|
+
|
|
mapping = file->f_mapping;
|
|
return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
|
|
mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
|
|
}
|
|
|
|
+
|
|
static bool nfsd_file_lru_add(struct nfsd_file *nf)
|
|
{
|
|
set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
|
|
@@ -431,7 +341,7 @@ static bool nfsd_file_lru_remove(struct nfsd_file *nf)
|
|
struct nfsd_file *
|
|
nfsd_file_get(struct nfsd_file *nf)
|
|
{
|
|
- if (likely(refcount_inc_not_zero(&nf->nf_ref)))
|
|
+ if (nf && refcount_inc_not_zero(&nf->nf_ref))
|
|
return nf;
|
|
return NULL;
|
|
}
|
|
@@ -492,49 +402,26 @@ nfsd_file_dispose_list(struct list_head *dispose)
|
|
}
|
|
}
|
|
|
|
-static void
|
|
-nfsd_file_list_remove_disposal(struct list_head *dst,
|
|
- struct nfsd_fcache_disposal *l)
|
|
-{
|
|
- spin_lock(&l->lock);
|
|
- list_splice_init(&l->freeme, dst);
|
|
- spin_unlock(&l->lock);
|
|
-}
|
|
-
|
|
-static void
|
|
-nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
|
|
-{
|
|
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
|
|
- struct nfsd_fcache_disposal *l = nn->fcache_disposal;
|
|
-
|
|
- spin_lock(&l->lock);
|
|
- list_splice_tail_init(files, &l->freeme);
|
|
- spin_unlock(&l->lock);
|
|
- queue_work(nfsd_filecache_wq, &l->work);
|
|
-}
|
|
-
|
|
-static void
|
|
-nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
|
|
- struct net *net)
|
|
-{
|
|
- struct nfsd_file *nf, *tmp;
|
|
-
|
|
- list_for_each_entry_safe(nf, tmp, src, nf_lru) {
|
|
- if (nf->nf_net == net)
|
|
- list_move_tail(&nf->nf_lru, dst);
|
|
- }
|
|
-}
|
|
-
|
|
+/**
|
|
+ * nfsd_file_dispose_list_delayed - move list of dead files to net's freeme list
|
|
+ * @dispose: list of nfsd_files to be disposed
|
|
+ *
|
|
+ * Transfers each file to the "freeme" list for its nfsd_net, to eventually
|
|
+ * be disposed of by the per-net garbage collector.
|
|
+ */
|
|
static void
|
|
nfsd_file_dispose_list_delayed(struct list_head *dispose)
|
|
{
|
|
- LIST_HEAD(list);
|
|
- struct nfsd_file *nf;
|
|
-
|
|
while(!list_empty(dispose)) {
|
|
- nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
|
|
- nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
|
|
- nfsd_file_list_add_disposal(&list, nf->nf_net);
|
|
+ struct nfsd_file *nf = list_first_entry(dispose,
|
|
+ struct nfsd_file, nf_lru);
|
|
+ struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
|
|
+ struct nfsd_fcache_disposal *l = nn->fcache_disposal;
|
|
+
|
|
+ spin_lock(&l->lock);
|
|
+ list_move_tail(&nf->nf_lru, &l->freeme);
|
|
+ spin_unlock(&l->lock);
|
|
+ queue_work(nfsd_filecache_wq, &l->work);
|
|
}
|
|
}
|
|
|
|
@@ -678,8 +565,8 @@ nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
|
|
* @inode: inode on which to close out nfsd_files
|
|
* @dispose: list on which to gather nfsd_files to close out
|
|
*
|
|
- * An nfsd_file represents a struct file being held open on behalf of nfsd. An
|
|
- * open file however can block other activity (such as leases), or cause
|
|
+ * An nfsd_file represents a struct file being held open on behalf of nfsd.
|
|
+ * An open file however can block other activity (such as leases), or cause
|
|
* undesirable behavior (e.g. spurious silly-renames when reexporting NFS).
|
|
*
|
|
* This function is intended to find open nfsd_files when this sort of
|
|
@@ -692,20 +579,17 @@ nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
|
|
static void
|
|
nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
|
|
{
|
|
- struct nfsd_file_lookup_key key = {
|
|
- .type = NFSD_FILE_KEY_INODE,
|
|
- .inode = inode,
|
|
- };
|
|
+ struct rhlist_head *tmp, *list;
|
|
struct nfsd_file *nf;
|
|
|
|
rcu_read_lock();
|
|
- do {
|
|
- nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
|
|
- nfsd_file_rhash_params);
|
|
- if (!nf)
|
|
- break;
|
|
+ list = rhltable_lookup(&nfsd_file_rhltable, &inode,
|
|
+ nfsd_file_rhash_params);
|
|
+ rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist) {
|
|
+ if (!test_bit(NFSD_FILE_GC, &nf->nf_flags))
|
|
+ continue;
|
|
nfsd_file_cond_queue(nf, dispose);
|
|
- } while (1);
|
|
+ }
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
@@ -758,8 +642,8 @@ nfsd_file_close_inode_sync(struct inode *inode)
|
|
* nfsd_file_delayed_close - close unused nfsd_files
|
|
* @work: dummy
|
|
*
|
|
- * Walk the LRU list and destroy any entries that have not been used since
|
|
- * the last scan.
|
|
+ * Scrape the freeme list for this nfsd_net, and then dispose of them
|
|
+ * all.
|
|
*/
|
|
static void
|
|
nfsd_file_delayed_close(struct work_struct *work)
|
|
@@ -768,7 +652,10 @@ nfsd_file_delayed_close(struct work_struct *work)
|
|
struct nfsd_fcache_disposal *l = container_of(work,
|
|
struct nfsd_fcache_disposal, work);
|
|
|
|
- nfsd_file_list_remove_disposal(&head, l);
|
|
+ spin_lock(&l->lock);
|
|
+ list_splice_init(&l->freeme, &head);
|
|
+ spin_unlock(&l->lock);
|
|
+
|
|
nfsd_file_dispose_list(&head);
|
|
}
|
|
|
|
@@ -829,7 +716,7 @@ nfsd_file_cache_init(void)
|
|
if (test_and_set_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
|
|
return 0;
|
|
|
|
- ret = rhashtable_init(&nfsd_file_rhash_tbl, &nfsd_file_rhash_params);
|
|
+ ret = rhltable_init(&nfsd_file_rhltable, &nfsd_file_rhash_params);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -897,7 +784,7 @@ nfsd_file_cache_init(void)
|
|
nfsd_file_mark_slab = NULL;
|
|
destroy_workqueue(nfsd_filecache_wq);
|
|
nfsd_filecache_wq = NULL;
|
|
- rhashtable_destroy(&nfsd_file_rhash_tbl);
|
|
+ rhltable_destroy(&nfsd_file_rhltable);
|
|
goto out;
|
|
}
|
|
|
|
@@ -906,7 +793,8 @@ nfsd_file_cache_init(void)
|
|
* @net: net-namespace to shut down the cache (may be NULL)
|
|
*
|
|
* Walk the nfsd_file cache and close out any that match @net. If @net is NULL,
|
|
- * then close out everything. Called when an nfsd instance is being shut down.
|
|
+ * then close out everything. Called when an nfsd instance is being shut down,
|
|
+ * and when the exports table is flushed.
|
|
*/
|
|
static void
|
|
__nfsd_file_cache_purge(struct net *net)
|
|
@@ -915,7 +803,7 @@ __nfsd_file_cache_purge(struct net *net)
|
|
struct nfsd_file *nf;
|
|
LIST_HEAD(dispose);
|
|
|
|
- rhashtable_walk_enter(&nfsd_file_rhash_tbl, &iter);
|
|
+ rhltable_walk_enter(&nfsd_file_rhltable, &iter);
|
|
do {
|
|
rhashtable_walk_start(&iter);
|
|
|
|
@@ -1021,7 +909,7 @@ nfsd_file_cache_shutdown(void)
|
|
nfsd_file_mark_slab = NULL;
|
|
destroy_workqueue(nfsd_filecache_wq);
|
|
nfsd_filecache_wq = NULL;
|
|
- rhashtable_destroy(&nfsd_file_rhash_tbl);
|
|
+ rhltable_destroy(&nfsd_file_rhltable);
|
|
|
|
for_each_possible_cpu(i) {
|
|
per_cpu(nfsd_file_cache_hits, i) = 0;
|
|
@@ -1032,6 +920,35 @@ nfsd_file_cache_shutdown(void)
|
|
}
|
|
}
|
|
|
|
+static struct nfsd_file *
|
|
+nfsd_file_lookup_locked(const struct net *net, const struct cred *cred,
|
|
+ struct inode *inode, unsigned char need,
|
|
+ bool want_gc)
|
|
+{
|
|
+ struct rhlist_head *tmp, *list;
|
|
+ struct nfsd_file *nf;
|
|
+
|
|
+ list = rhltable_lookup(&nfsd_file_rhltable, &inode,
|
|
+ nfsd_file_rhash_params);
|
|
+ rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist) {
|
|
+ if (nf->nf_may != need)
|
|
+ continue;
|
|
+ if (nf->nf_net != net)
|
|
+ continue;
|
|
+ if (!nfsd_match_cred(nf->nf_cred, cred))
|
|
+ continue;
|
|
+ if (test_bit(NFSD_FILE_GC, &nf->nf_flags) != want_gc)
|
|
+ continue;
|
|
+ if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0)
|
|
+ continue;
|
|
+
|
|
+ if (!nfsd_file_get(nf))
|
|
+ continue;
|
|
+ return nf;
|
|
+ }
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
/**
|
|
* nfsd_file_is_cached - are there any cached open files for this inode?
|
|
* @inode: inode to check
|
|
@@ -1046,15 +963,20 @@ nfsd_file_cache_shutdown(void)
|
|
bool
|
|
nfsd_file_is_cached(struct inode *inode)
|
|
{
|
|
- struct nfsd_file_lookup_key key = {
|
|
- .type = NFSD_FILE_KEY_INODE,
|
|
- .inode = inode,
|
|
- };
|
|
+ struct rhlist_head *tmp, *list;
|
|
+ struct nfsd_file *nf;
|
|
bool ret = false;
|
|
|
|
- if (rhashtable_lookup_fast(&nfsd_file_rhash_tbl, &key,
|
|
- nfsd_file_rhash_params) != NULL)
|
|
- ret = true;
|
|
+ rcu_read_lock();
|
|
+ list = rhltable_lookup(&nfsd_file_rhltable, &inode,
|
|
+ nfsd_file_rhash_params);
|
|
+ rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist)
|
|
+ if (test_bit(NFSD_FILE_GC, &nf->nf_flags)) {
|
|
+ ret = true;
|
|
+ break;
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
+
|
|
trace_nfsd_file_is_cached(inode, (int)ret);
|
|
return ret;
|
|
}
|
|
@@ -1064,14 +986,12 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|
unsigned int may_flags, struct file *file,
|
|
struct nfsd_file **pnf, bool want_gc)
|
|
{
|
|
- struct nfsd_file_lookup_key key = {
|
|
- .type = NFSD_FILE_KEY_FULL,
|
|
- .need = may_flags & NFSD_FILE_MAY_MASK,
|
|
- .net = SVC_NET(rqstp),
|
|
- .gc = want_gc,
|
|
- };
|
|
+ unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
|
|
+ struct net *net = SVC_NET(rqstp);
|
|
+ struct nfsd_file *new, *nf;
|
|
+ const struct cred *cred;
|
|
bool open_retry = true;
|
|
- struct nfsd_file *nf;
|
|
+ struct inode *inode;
|
|
__be32 status;
|
|
int ret;
|
|
|
|
@@ -1079,81 +999,88 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|
may_flags|NFSD_MAY_OWNER_OVERRIDE);
|
|
if (status != nfs_ok)
|
|
return status;
|
|
- key.inode = d_inode(fhp->fh_dentry);
|
|
- key.cred = get_current_cred();
|
|
+ inode = d_inode(fhp->fh_dentry);
|
|
+ cred = get_current_cred();
|
|
|
|
retry:
|
|
rcu_read_lock();
|
|
- nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
|
|
- nfsd_file_rhash_params);
|
|
- if (nf)
|
|
- nf = nfsd_file_get(nf);
|
|
+ nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
|
|
rcu_read_unlock();
|
|
|
|
if (nf) {
|
|
+ /*
|
|
+ * If the nf is on the LRU then it holds an extra reference
|
|
+ * that must be put if it's removed. It had better not be
|
|
+ * the last one however, since we should hold another.
|
|
+ */
|
|
if (nfsd_file_lru_remove(nf))
|
|
WARN_ON_ONCE(refcount_dec_and_test(&nf->nf_ref));
|
|
goto wait_for_construction;
|
|
}
|
|
|
|
- nf = nfsd_file_alloc(&key, may_flags);
|
|
- if (!nf) {
|
|
+ new = nfsd_file_alloc(net, inode, need, want_gc);
|
|
+ if (!new) {
|
|
status = nfserr_jukebox;
|
|
- goto out_status;
|
|
+ goto out;
|
|
}
|
|
|
|
- ret = rhashtable_lookup_insert_key(&nfsd_file_rhash_tbl,
|
|
- &key, &nf->nf_rhash,
|
|
- nfsd_file_rhash_params);
|
|
+ rcu_read_lock();
|
|
+ spin_lock(&inode->i_lock);
|
|
+ nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
|
|
+ if (unlikely(nf)) {
|
|
+ spin_unlock(&inode->i_lock);
|
|
+ rcu_read_unlock();
|
|
+ nfsd_file_slab_free(&new->nf_rcu);
|
|
+ goto wait_for_construction;
|
|
+ }
|
|
+ nf = new;
|
|
+ ret = rhltable_insert(&nfsd_file_rhltable, &nf->nf_rlist,
|
|
+ nfsd_file_rhash_params);
|
|
+ spin_unlock(&inode->i_lock);
|
|
+ rcu_read_unlock();
|
|
if (likely(ret == 0))
|
|
goto open_file;
|
|
|
|
- nfsd_file_slab_free(&nf->nf_rcu);
|
|
- nf = NULL;
|
|
if (ret == -EEXIST)
|
|
goto retry;
|
|
- trace_nfsd_file_insert_err(rqstp, key.inode, may_flags, ret);
|
|
+ trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret);
|
|
status = nfserr_jukebox;
|
|
- goto out_status;
|
|
+ goto construction_err;
|
|
|
|
wait_for_construction:
|
|
wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
|
|
|
|
/* Did construction of this file fail? */
|
|
if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
|
|
- trace_nfsd_file_cons_err(rqstp, key.inode, may_flags, nf);
|
|
+ trace_nfsd_file_cons_err(rqstp, inode, may_flags, nf);
|
|
if (!open_retry) {
|
|
status = nfserr_jukebox;
|
|
- goto out;
|
|
+ goto construction_err;
|
|
}
|
|
open_retry = false;
|
|
- if (refcount_dec_and_test(&nf->nf_ref))
|
|
- nfsd_file_free(nf);
|
|
goto retry;
|
|
}
|
|
-
|
|
this_cpu_inc(nfsd_file_cache_hits);
|
|
|
|
status = nfserrno(nfsd_open_break_lease(file_inode(nf->nf_file), may_flags));
|
|
+ if (status != nfs_ok) {
|
|
+ nfsd_file_put(nf);
|
|
+ nf = NULL;
|
|
+ }
|
|
+
|
|
out:
|
|
if (status == nfs_ok) {
|
|
this_cpu_inc(nfsd_file_acquisitions);
|
|
nfsd_file_check_write_error(nf);
|
|
*pnf = nf;
|
|
- } else {
|
|
- if (refcount_dec_and_test(&nf->nf_ref))
|
|
- nfsd_file_free(nf);
|
|
- nf = NULL;
|
|
}
|
|
-
|
|
-out_status:
|
|
- put_cred(key.cred);
|
|
- trace_nfsd_file_acquire(rqstp, key.inode, may_flags, nf, status);
|
|
+ put_cred(cred);
|
|
+ trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
|
|
return status;
|
|
|
|
open_file:
|
|
trace_nfsd_file_alloc(nf);
|
|
- nf->nf_mark = nfsd_file_mark_find_or_create(nf, key.inode);
|
|
+ nf->nf_mark = nfsd_file_mark_find_or_create(nf, inode);
|
|
if (nf->nf_mark) {
|
|
if (file) {
|
|
get_file(file);
|
|
@@ -1171,13 +1098,16 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|
* If construction failed, or we raced with a call to unlink()
|
|
* then unhash.
|
|
*/
|
|
- if (status == nfs_ok && key.inode->i_nlink == 0)
|
|
- status = nfserr_jukebox;
|
|
- if (status != nfs_ok)
|
|
+ if (status != nfs_ok || inode->i_nlink == 0)
|
|
nfsd_file_unhash(nf);
|
|
- clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
|
|
- smp_mb__after_atomic();
|
|
- wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
|
|
+ clear_and_wake_up_bit(NFSD_FILE_PENDING, &nf->nf_flags);
|
|
+ if (status == nfs_ok)
|
|
+ goto out;
|
|
+
|
|
+construction_err:
|
|
+ if (refcount_dec_and_test(&nf->nf_ref))
|
|
+ nfsd_file_free(nf);
|
|
+ nf = NULL;
|
|
goto out;
|
|
}
|
|
|
|
@@ -1193,8 +1123,11 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|
* seconds after the final nfsd_file_put() in case the caller
|
|
* wants to re-use it.
|
|
*
|
|
- * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
|
|
- * network byte order is returned.
|
|
+ * Return values:
|
|
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
|
|
+ * count boosted.
|
|
+ *
|
|
+ * On error, an nfsstat value in network byte order is returned.
|
|
*/
|
|
__be32
|
|
nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|
@@ -1214,8 +1147,11 @@ nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|
* but not garbage-collected. The object is unhashed after the
|
|
* final nfsd_file_put().
|
|
*
|
|
- * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
|
|
- * network byte order is returned.
|
|
+ * Return values:
|
|
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
|
|
+ * count boosted.
|
|
+ *
|
|
+ * On error, an nfsstat value in network byte order is returned.
|
|
*/
|
|
__be32
|
|
nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|
@@ -1236,8 +1172,11 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|
* and @file is non-NULL, use it to instantiate a new nfsd_file instead of
|
|
* opening a new one.
|
|
*
|
|
- * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
|
|
- * network byte order is returned.
|
|
+ * Return values:
|
|
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
|
|
+ * count boosted.
|
|
+ *
|
|
+ * On error, an nfsstat value in network byte order is returned.
|
|
*/
|
|
__be32
|
|
nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|
@@ -1268,7 +1207,7 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
|
|
lru = list_lru_count(&nfsd_file_lru);
|
|
|
|
rcu_read_lock();
|
|
- ht = &nfsd_file_rhash_tbl;
|
|
+ ht = &nfsd_file_rhltable.ht;
|
|
count = atomic_read(&ht->nelems);
|
|
tbl = rht_dereference_rcu(ht->tbl, ht);
|
|
buckets = tbl->size;
|
|
@@ -1284,7 +1223,7 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
|
|
evictions += per_cpu(nfsd_file_evictions, i);
|
|
}
|
|
|
|
- seq_printf(m, "total entries: %u\n", count);
|
|
+ seq_printf(m, "total inodes: %u\n", count);
|
|
seq_printf(m, "hash buckets: %u\n", buckets);
|
|
seq_printf(m, "lru entries: %lu\n", lru);
|
|
seq_printf(m, "cache hits: %lu\n", hits);
|
|
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
|
|
index 41516a4263ea5..e54165a3224f0 100644
|
|
--- a/fs/nfsd/filecache.h
|
|
+++ b/fs/nfsd/filecache.h
|
|
@@ -29,9 +29,8 @@ struct nfsd_file_mark {
|
|
* never be dereferenced, only used for comparison.
|
|
*/
|
|
struct nfsd_file {
|
|
- struct rhash_head nf_rhash;
|
|
- struct list_head nf_lru;
|
|
- struct rcu_head nf_rcu;
|
|
+ struct rhlist_head nf_rlist;
|
|
+ void *nf_inode;
|
|
struct file *nf_file;
|
|
const struct cred *nf_cred;
|
|
struct net *nf_net;
|
|
@@ -40,10 +39,12 @@ struct nfsd_file {
|
|
#define NFSD_FILE_REFERENCED (2)
|
|
#define NFSD_FILE_GC (3)
|
|
unsigned long nf_flags;
|
|
- struct inode *nf_inode; /* don't deref */
|
|
refcount_t nf_ref;
|
|
unsigned char nf_may;
|
|
+
|
|
struct nfsd_file_mark *nf_mark;
|
|
+ struct list_head nf_lru;
|
|
+ struct rcu_head nf_rcu;
|
|
ktime_t nf_birthtime;
|
|
};
|
|
|
|
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
|
|
index b9d694ec25d19..e4522e86e984e 100644
|
|
--- a/fs/nfsd/nfs4state.c
|
|
+++ b/fs/nfsd/nfs4state.c
|
|
@@ -602,9 +602,7 @@ put_nfs4_file(struct nfs4_file *fi)
|
|
static struct nfsd_file *
|
|
__nfs4_get_fd(struct nfs4_file *f, int oflag)
|
|
{
|
|
- if (f->fi_fds[oflag])
|
|
- return nfsd_file_get(f->fi_fds[oflag]);
|
|
- return NULL;
|
|
+ return nfsd_file_get(f->fi_fds[oflag]);
|
|
}
|
|
|
|
static struct nfsd_file *
|
|
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
|
|
index 597f14a80512f..4ed9fef14adc2 100644
|
|
--- a/fs/nfsd/nfs4xdr.c
|
|
+++ b/fs/nfsd/nfs4xdr.c
|
|
@@ -2541,6 +2541,20 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
|
|
return p;
|
|
}
|
|
|
|
+static __be32 nfsd4_encode_nfstime4(struct xdr_stream *xdr,
|
|
+ struct timespec64 *tv)
|
|
+{
|
|
+ __be32 *p;
|
|
+
|
|
+ p = xdr_reserve_space(xdr, XDR_UNIT * 3);
|
|
+ if (!p)
|
|
+ return nfserr_resource;
|
|
+
|
|
+ p = xdr_encode_hyper(p, (s64)tv->tv_sec);
|
|
+ *p = cpu_to_be32(tv->tv_nsec);
|
|
+ return nfs_ok;
|
|
+}
|
|
+
|
|
/*
|
|
* ctime (in NFSv4, time_metadata) is not writeable, and the client
|
|
* doesn't really care what resolution could theoretically be stored by
|
|
@@ -3346,11 +3360,14 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
|
|
p = xdr_encode_hyper(p, dummy64);
|
|
}
|
|
if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
|
|
- p = xdr_reserve_space(xdr, 12);
|
|
- if (!p)
|
|
- goto out_resource;
|
|
- p = xdr_encode_hyper(p, (s64)stat.atime.tv_sec);
|
|
- *p++ = cpu_to_be32(stat.atime.tv_nsec);
|
|
+ status = nfsd4_encode_nfstime4(xdr, &stat.atime);
|
|
+ if (status)
|
|
+ goto out;
|
|
+ }
|
|
+ if (bmval1 & FATTR4_WORD1_TIME_CREATE) {
|
|
+ status = nfsd4_encode_nfstime4(xdr, &stat.btime);
|
|
+ if (status)
|
|
+ goto out;
|
|
}
|
|
if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
|
|
p = xdr_reserve_space(xdr, 12);
|
|
@@ -3359,25 +3376,14 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
|
|
p = encode_time_delta(p, d_inode(dentry));
|
|
}
|
|
if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
|
|
- p = xdr_reserve_space(xdr, 12);
|
|
- if (!p)
|
|
- goto out_resource;
|
|
- p = xdr_encode_hyper(p, (s64)stat.ctime.tv_sec);
|
|
- *p++ = cpu_to_be32(stat.ctime.tv_nsec);
|
|
+ status = nfsd4_encode_nfstime4(xdr, &stat.ctime);
|
|
+ if (status)
|
|
+ goto out;
|
|
}
|
|
if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
|
|
- p = xdr_reserve_space(xdr, 12);
|
|
- if (!p)
|
|
- goto out_resource;
|
|
- p = xdr_encode_hyper(p, (s64)stat.mtime.tv_sec);
|
|
- *p++ = cpu_to_be32(stat.mtime.tv_nsec);
|
|
- }
|
|
- if (bmval1 & FATTR4_WORD1_TIME_CREATE) {
|
|
- p = xdr_reserve_space(xdr, 12);
|
|
- if (!p)
|
|
- goto out_resource;
|
|
- p = xdr_encode_hyper(p, (s64)stat.btime.tv_sec);
|
|
- *p++ = cpu_to_be32(stat.btime.tv_nsec);
|
|
+ status = nfsd4_encode_nfstime4(xdr, &stat.mtime);
|
|
+ if (status)
|
|
+ goto out;
|
|
}
|
|
if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
|
|
u64 ino = stat.ino;
|
|
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
|
|
index ffbadb8b3032d..ea3f104371d62 100644
|
|
--- a/fs/pstore/inode.c
|
|
+++ b/fs/pstore/inode.c
|
|
@@ -182,25 +182,21 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
|
|
{
|
|
struct pstore_private *p = d_inode(dentry)->i_private;
|
|
struct pstore_record *record = p->record;
|
|
- int rc = 0;
|
|
|
|
if (!record->psi->erase)
|
|
return -EPERM;
|
|
|
|
/* Make sure we can't race while removing this file. */
|
|
- mutex_lock(&records_list_lock);
|
|
- if (!list_empty(&p->list))
|
|
- list_del_init(&p->list);
|
|
- else
|
|
- rc = -ENOENT;
|
|
- p->dentry = NULL;
|
|
- mutex_unlock(&records_list_lock);
|
|
- if (rc)
|
|
- return rc;
|
|
-
|
|
- mutex_lock(&record->psi->read_mutex);
|
|
- record->psi->erase(record);
|
|
- mutex_unlock(&record->psi->read_mutex);
|
|
+ scoped_guard(mutex, &records_list_lock) {
|
|
+ if (!list_empty(&p->list))
|
|
+ list_del_init(&p->list);
|
|
+ else
|
|
+ return -ENOENT;
|
|
+ p->dentry = NULL;
|
|
+ }
|
|
+
|
|
+ scoped_guard(mutex, &record->psi->read_mutex)
|
|
+ record->psi->erase(record);
|
|
|
|
return simple_unlink(dir, dentry);
|
|
}
|
|
@@ -292,19 +288,16 @@ static struct dentry *psinfo_lock_root(void)
|
|
{
|
|
struct dentry *root;
|
|
|
|
- mutex_lock(&pstore_sb_lock);
|
|
+ guard(mutex)(&pstore_sb_lock);
|
|
/*
|
|
* Having no backend is fine -- no records appear.
|
|
* Not being mounted is fine -- nothing to do.
|
|
*/
|
|
- if (!psinfo || !pstore_sb) {
|
|
- mutex_unlock(&pstore_sb_lock);
|
|
+ if (!psinfo || !pstore_sb)
|
|
return NULL;
|
|
- }
|
|
|
|
root = pstore_sb->s_root;
|
|
inode_lock(d_inode(root));
|
|
- mutex_unlock(&pstore_sb_lock);
|
|
|
|
return root;
|
|
}
|
|
@@ -313,29 +306,25 @@ int pstore_put_backend_records(struct pstore_info *psi)
|
|
{
|
|
struct pstore_private *pos, *tmp;
|
|
struct dentry *root;
|
|
- int rc = 0;
|
|
|
|
root = psinfo_lock_root();
|
|
if (!root)
|
|
return 0;
|
|
|
|
- mutex_lock(&records_list_lock);
|
|
- list_for_each_entry_safe(pos, tmp, &records_list, list) {
|
|
- if (pos->record->psi == psi) {
|
|
- list_del_init(&pos->list);
|
|
- rc = simple_unlink(d_inode(root), pos->dentry);
|
|
- if (WARN_ON(rc))
|
|
- break;
|
|
- d_drop(pos->dentry);
|
|
- dput(pos->dentry);
|
|
- pos->dentry = NULL;
|
|
+ scoped_guard(mutex, &records_list_lock) {
|
|
+ list_for_each_entry_safe(pos, tmp, &records_list, list) {
|
|
+ if (pos->record->psi == psi) {
|
|
+ list_del_init(&pos->list);
|
|
+ d_invalidate(pos->dentry);
|
|
+ simple_unlink(d_inode(root), pos->dentry);
|
|
+ pos->dentry = NULL;
|
|
+ }
|
|
}
|
|
}
|
|
- mutex_unlock(&records_list_lock);
|
|
|
|
inode_unlock(d_inode(root));
|
|
|
|
- return rc;
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
@@ -355,20 +344,20 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
|
|
if (WARN_ON(!inode_is_locked(d_inode(root))))
|
|
return -EINVAL;
|
|
|
|
- rc = -EEXIST;
|
|
+ guard(mutex)(&records_list_lock);
|
|
+
|
|
/* Skip records that are already present in the filesystem. */
|
|
- mutex_lock(&records_list_lock);
|
|
list_for_each_entry(pos, &records_list, list) {
|
|
if (pos->record->type == record->type &&
|
|
pos->record->id == record->id &&
|
|
pos->record->psi == record->psi)
|
|
- goto fail;
|
|
+ return -EEXIST;
|
|
}
|
|
|
|
rc = -ENOMEM;
|
|
inode = pstore_get_inode(root->d_sb);
|
|
if (!inode)
|
|
- goto fail;
|
|
+ return -ENOMEM;
|
|
inode->i_mode = S_IFREG | 0444;
|
|
inode->i_fop = &pstore_file_operations;
|
|
scnprintf(name, sizeof(name), "%s-%s-%llu%s",
|
|
@@ -395,7 +384,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
|
|
d_add(dentry, inode);
|
|
|
|
list_add(&private->list, &records_list);
|
|
- mutex_unlock(&records_list_lock);
|
|
|
|
return 0;
|
|
|
|
@@ -403,8 +391,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
|
|
free_pstore_private(private);
|
|
fail_inode:
|
|
iput(inode);
|
|
-fail:
|
|
- mutex_unlock(&records_list_lock);
|
|
return rc;
|
|
}
|
|
|
|
@@ -450,9 +436,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
|
|
if (!sb->s_root)
|
|
return -ENOMEM;
|
|
|
|
- mutex_lock(&pstore_sb_lock);
|
|
- pstore_sb = sb;
|
|
- mutex_unlock(&pstore_sb_lock);
|
|
+ scoped_guard(mutex, &pstore_sb_lock)
|
|
+ pstore_sb = sb;
|
|
|
|
pstore_get_records(0);
|
|
|
|
@@ -467,17 +452,14 @@ static struct dentry *pstore_mount(struct file_system_type *fs_type,
|
|
|
|
static void pstore_kill_sb(struct super_block *sb)
|
|
{
|
|
- mutex_lock(&pstore_sb_lock);
|
|
+ guard(mutex)(&pstore_sb_lock);
|
|
WARN_ON(pstore_sb && pstore_sb != sb);
|
|
|
|
kill_litter_super(sb);
|
|
pstore_sb = NULL;
|
|
|
|
- mutex_lock(&records_list_lock);
|
|
+ guard(mutex)(&records_list_lock);
|
|
INIT_LIST_HEAD(&records_list);
|
|
- mutex_unlock(&records_list_lock);
|
|
-
|
|
- mutex_unlock(&pstore_sb_lock);
|
|
}
|
|
|
|
static struct file_system_type pstore_fs_type = {
|
|
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
|
|
index b0cf3869d3bf5..b67557647d61f 100644
|
|
--- a/fs/quota/dquot.c
|
|
+++ b/fs/quota/dquot.c
|
|
@@ -399,15 +399,17 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
|
|
EXPORT_SYMBOL(dquot_mark_dquot_dirty);
|
|
|
|
/* Dirtify all the dquots - this can block when journalling */
|
|
-static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
|
|
+static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
|
|
{
|
|
int ret, err, cnt;
|
|
+ struct dquot *dquot;
|
|
|
|
ret = err = 0;
|
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
|
- if (dquot[cnt])
|
|
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
|
|
+ if (dquot)
|
|
/* Even in case of error we have to continue */
|
|
- ret = mark_dquot_dirty(dquot[cnt]);
|
|
+ ret = mark_dquot_dirty(dquot);
|
|
if (!err)
|
|
err = ret;
|
|
}
|
|
@@ -1004,14 +1006,15 @@ struct dquot *dqget(struct super_block *sb, struct kqid qid)
|
|
}
|
|
EXPORT_SYMBOL(dqget);
|
|
|
|
-static inline struct dquot **i_dquot(struct inode *inode)
|
|
+static inline struct dquot __rcu **i_dquot(struct inode *inode)
|
|
{
|
|
- return inode->i_sb->s_op->get_dquots(inode);
|
|
+ /* Force __rcu for now until filesystems are fixed */
|
|
+ return (struct dquot __rcu **)inode->i_sb->s_op->get_dquots(inode);
|
|
}
|
|
|
|
static int dqinit_needed(struct inode *inode, int type)
|
|
{
|
|
- struct dquot * const *dquots;
|
|
+ struct dquot __rcu * const *dquots;
|
|
int cnt;
|
|
|
|
if (IS_NOQUOTA(inode))
|
|
@@ -1084,59 +1087,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
|
|
return err;
|
|
}
|
|
|
|
-/*
|
|
- * Remove references to dquots from inode and add dquot to list for freeing
|
|
- * if we have the last reference to dquot
|
|
- */
|
|
-static void remove_inode_dquot_ref(struct inode *inode, int type,
|
|
- struct list_head *tofree_head)
|
|
-{
|
|
- struct dquot **dquots = i_dquot(inode);
|
|
- struct dquot *dquot = dquots[type];
|
|
-
|
|
- if (!dquot)
|
|
- return;
|
|
-
|
|
- dquots[type] = NULL;
|
|
- if (list_empty(&dquot->dq_free)) {
|
|
- /*
|
|
- * The inode still has reference to dquot so it can't be in the
|
|
- * free list
|
|
- */
|
|
- spin_lock(&dq_list_lock);
|
|
- list_add(&dquot->dq_free, tofree_head);
|
|
- spin_unlock(&dq_list_lock);
|
|
- } else {
|
|
- /*
|
|
- * Dquot is already in a list to put so we won't drop the last
|
|
- * reference here.
|
|
- */
|
|
- dqput(dquot);
|
|
- }
|
|
-}
|
|
-
|
|
-/*
|
|
- * Free list of dquots
|
|
- * Dquots are removed from inodes and no new references can be got so we are
|
|
- * the only ones holding reference
|
|
- */
|
|
-static void put_dquot_list(struct list_head *tofree_head)
|
|
-{
|
|
- struct list_head *act_head;
|
|
- struct dquot *dquot;
|
|
-
|
|
- act_head = tofree_head->next;
|
|
- while (act_head != tofree_head) {
|
|
- dquot = list_entry(act_head, struct dquot, dq_free);
|
|
- act_head = act_head->next;
|
|
- /* Remove dquot from the list so we won't have problems... */
|
|
- list_del_init(&dquot->dq_free);
|
|
- dqput(dquot);
|
|
- }
|
|
-}
|
|
-
|
|
-static void remove_dquot_ref(struct super_block *sb, int type,
|
|
- struct list_head *tofree_head)
|
|
+static void remove_dquot_ref(struct super_block *sb, int type)
|
|
{
|
|
struct inode *inode;
|
|
#ifdef CONFIG_QUOTA_DEBUG
|
|
@@ -1153,11 +1104,18 @@ static void remove_dquot_ref(struct super_block *sb, int type,
|
|
*/
|
|
spin_lock(&dq_data_lock);
|
|
if (!IS_NOQUOTA(inode)) {
|
|
+ struct dquot __rcu **dquots = i_dquot(inode);
|
|
+ struct dquot *dquot = srcu_dereference_check(
|
|
+ dquots[type], &dquot_srcu,
|
|
+ lockdep_is_held(&dq_data_lock));
|
|
+
|
|
#ifdef CONFIG_QUOTA_DEBUG
|
|
if (unlikely(inode_get_rsv_space(inode) > 0))
|
|
reserved = 1;
|
|
#endif
|
|
- remove_inode_dquot_ref(inode, type, tofree_head);
|
|
+ rcu_assign_pointer(dquots[type], NULL);
|
|
+ if (dquot)
|
|
+ dqput(dquot);
|
|
}
|
|
spin_unlock(&dq_data_lock);
|
|
}
|
|
@@ -1174,13 +1132,8 @@ static void remove_dquot_ref(struct super_block *sb, int type,
|
|
/* Gather all references from inodes and drop them */
|
|
static void drop_dquot_ref(struct super_block *sb, int type)
|
|
{
|
|
- LIST_HEAD(tofree_head);
|
|
-
|
|
- if (sb->dq_op) {
|
|
- remove_dquot_ref(sb, type, &tofree_head);
|
|
- synchronize_srcu(&dquot_srcu);
|
|
- put_dquot_list(&tofree_head);
|
|
- }
|
|
+ if (sb->dq_op)
|
|
+ remove_dquot_ref(sb, type);
|
|
}
|
|
|
|
static inline
|
|
@@ -1513,7 +1466,8 @@ static int inode_quota_active(const struct inode *inode)
|
|
static int __dquot_initialize(struct inode *inode, int type)
|
|
{
|
|
int cnt, init_needed = 0;
|
|
- struct dquot **dquots, *got[MAXQUOTAS] = {};
|
|
+ struct dquot __rcu **dquots;
|
|
+ struct dquot *got[MAXQUOTAS] = {};
|
|
struct super_block *sb = inode->i_sb;
|
|
qsize_t rsv;
|
|
int ret = 0;
|
|
@@ -1588,7 +1542,7 @@ static int __dquot_initialize(struct inode *inode, int type)
|
|
if (!got[cnt])
|
|
continue;
|
|
if (!dquots[cnt]) {
|
|
- dquots[cnt] = got[cnt];
|
|
+ rcu_assign_pointer(dquots[cnt], got[cnt]);
|
|
got[cnt] = NULL;
|
|
/*
|
|
* Make quota reservation system happy if someone
|
|
@@ -1596,12 +1550,16 @@ static int __dquot_initialize(struct inode *inode, int type)
|
|
*/
|
|
rsv = inode_get_rsv_space(inode);
|
|
if (unlikely(rsv)) {
|
|
+ struct dquot *dquot = srcu_dereference_check(
|
|
+ dquots[cnt], &dquot_srcu,
|
|
+ lockdep_is_held(&dq_data_lock));
|
|
+
|
|
spin_lock(&inode->i_lock);
|
|
/* Get reservation again under proper lock */
|
|
rsv = __inode_get_rsv_space(inode);
|
|
- spin_lock(&dquots[cnt]->dq_dqb_lock);
|
|
- dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
|
|
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
|
|
+ spin_lock(&dquot->dq_dqb_lock);
|
|
+ dquot->dq_dqb.dqb_rsvspace += rsv;
|
|
+ spin_unlock(&dquot->dq_dqb_lock);
|
|
spin_unlock(&inode->i_lock);
|
|
}
|
|
}
|
|
@@ -1623,7 +1581,7 @@ EXPORT_SYMBOL(dquot_initialize);
|
|
|
|
bool dquot_initialize_needed(struct inode *inode)
|
|
{
|
|
- struct dquot **dquots;
|
|
+ struct dquot __rcu **dquots;
|
|
int i;
|
|
|
|
if (!inode_quota_active(inode))
|
|
@@ -1648,13 +1606,14 @@ EXPORT_SYMBOL(dquot_initialize_needed);
|
|
static void __dquot_drop(struct inode *inode)
|
|
{
|
|
int cnt;
|
|
- struct dquot **dquots = i_dquot(inode);
|
|
+ struct dquot __rcu **dquots = i_dquot(inode);
|
|
struct dquot *put[MAXQUOTAS];
|
|
|
|
spin_lock(&dq_data_lock);
|
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
|
- put[cnt] = dquots[cnt];
|
|
- dquots[cnt] = NULL;
|
|
+ put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
|
|
+ lockdep_is_held(&dq_data_lock));
|
|
+ rcu_assign_pointer(dquots[cnt], NULL);
|
|
}
|
|
spin_unlock(&dq_data_lock);
|
|
dqput_all(put);
|
|
@@ -1662,7 +1621,7 @@ static void __dquot_drop(struct inode *inode)
|
|
|
|
void dquot_drop(struct inode *inode)
|
|
{
|
|
- struct dquot * const *dquots;
|
|
+ struct dquot __rcu * const *dquots;
|
|
int cnt;
|
|
|
|
if (IS_NOQUOTA(inode))
|
|
@@ -1735,7 +1694,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
|
|
int cnt, ret = 0, index;
|
|
struct dquot_warn warn[MAXQUOTAS];
|
|
int reserve = flags & DQUOT_SPACE_RESERVE;
|
|
- struct dquot **dquots;
|
|
+ struct dquot __rcu **dquots;
|
|
+ struct dquot *dquot;
|
|
|
|
if (!inode_quota_active(inode)) {
|
|
if (reserve) {
|
|
@@ -1755,27 +1715,26 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
|
|
index = srcu_read_lock(&dquot_srcu);
|
|
spin_lock(&inode->i_lock);
|
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
|
- if (!dquots[cnt])
|
|
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
|
|
+ if (!dquot)
|
|
continue;
|
|
if (reserve) {
|
|
- ret = dquot_add_space(dquots[cnt], 0, number, flags,
|
|
- &warn[cnt]);
|
|
+ ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
|
|
} else {
|
|
- ret = dquot_add_space(dquots[cnt], number, 0, flags,
|
|
- &warn[cnt]);
|
|
+ ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
|
|
}
|
|
if (ret) {
|
|
/* Back out changes we already did */
|
|
for (cnt--; cnt >= 0; cnt--) {
|
|
- if (!dquots[cnt])
|
|
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
|
|
+ if (!dquot)
|
|
continue;
|
|
- spin_lock(&dquots[cnt]->dq_dqb_lock);
|
|
+ spin_lock(&dquot->dq_dqb_lock);
|
|
if (reserve)
|
|
- dquot_free_reserved_space(dquots[cnt],
|
|
- number);
|
|
+ dquot_free_reserved_space(dquot, number);
|
|
else
|
|
- dquot_decr_space(dquots[cnt], number);
|
|
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
|
|
+ dquot_decr_space(dquot, number);
|
|
+ spin_unlock(&dquot->dq_dqb_lock);
|
|
}
|
|
spin_unlock(&inode->i_lock);
|
|
goto out_flush_warn;
|
|
@@ -1805,7 +1764,8 @@ int dquot_alloc_inode(struct inode *inode)
|
|
{
|
|
int cnt, ret = 0, index;
|
|
struct dquot_warn warn[MAXQUOTAS];
|
|
- struct dquot * const *dquots;
|
|
+ struct dquot __rcu * const *dquots;
|
|
+ struct dquot *dquot;
|
|
|
|
if (!inode_quota_active(inode))
|
|
return 0;
|
|
@@ -1816,17 +1776,19 @@ int dquot_alloc_inode(struct inode *inode)
|
|
index = srcu_read_lock(&dquot_srcu);
|
|
spin_lock(&inode->i_lock);
|
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
|
- if (!dquots[cnt])
|
|
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
|
|
+ if (!dquot)
|
|
continue;
|
|
- ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
|
|
+ ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
|
|
if (ret) {
|
|
for (cnt--; cnt >= 0; cnt--) {
|
|
- if (!dquots[cnt])
|
|
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
|
|
+ if (!dquot)
|
|
continue;
|
|
/* Back out changes we already did */
|
|
- spin_lock(&dquots[cnt]->dq_dqb_lock);
|
|
- dquot_decr_inodes(dquots[cnt], 1);
|
|
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
|
|
+ spin_lock(&dquot->dq_dqb_lock);
|
|
+ dquot_decr_inodes(dquot, 1);
|
|
+ spin_unlock(&dquot->dq_dqb_lock);
|
|
}
|
|
goto warn_put_all;
|
|
}
|
|
@@ -1847,7 +1809,8 @@ EXPORT_SYMBOL(dquot_alloc_inode);
|
|
*/
|
|
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
|
|
{
|
|
- struct dquot **dquots;
|
|
+ struct dquot __rcu **dquots;
|
|
+ struct dquot *dquot;
|
|
int cnt, index;
|
|
|
|
if (!inode_quota_active(inode)) {
|
|
@@ -1863,9 +1826,8 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
|
|
spin_lock(&inode->i_lock);
|
|
/* Claim reserved quotas to allocated quotas */
|
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
|
- if (dquots[cnt]) {
|
|
- struct dquot *dquot = dquots[cnt];
|
|
-
|
|
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
|
|
+ if (dquot) {
|
|
spin_lock(&dquot->dq_dqb_lock);
|
|
if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
|
|
number = dquot->dq_dqb.dqb_rsvspace;
|
|
@@ -1889,7 +1851,8 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
|
|
*/
|
|
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
|
|
{
|
|
- struct dquot **dquots;
|
|
+ struct dquot __rcu **dquots;
|
|
+ struct dquot *dquot;
|
|
int cnt, index;
|
|
|
|
if (!inode_quota_active(inode)) {
|
|
@@ -1905,9 +1868,8 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
|
|
spin_lock(&inode->i_lock);
|
|
/* Claim reserved quotas to allocated quotas */
|
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
|
- if (dquots[cnt]) {
|
|
- struct dquot *dquot = dquots[cnt];
|
|
-
|
|
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
|
|
+ if (dquot) {
|
|
spin_lock(&dquot->dq_dqb_lock);
|
|
if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
|
|
number = dquot->dq_dqb.dqb_curspace;
|
|
@@ -1933,7 +1895,8 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
|
|
{
|
|
unsigned int cnt;
|
|
struct dquot_warn warn[MAXQUOTAS];
|
|
- struct dquot **dquots;
|
|
+ struct dquot __rcu **dquots;
|
|
+ struct dquot *dquot;
|
|
int reserve = flags & DQUOT_SPACE_RESERVE, index;
|
|
|
|
if (!inode_quota_active(inode)) {
|
|
@@ -1954,17 +1917,18 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
|
|
int wtype;
|
|
|
|
warn[cnt].w_type = QUOTA_NL_NOWARN;
|
|
- if (!dquots[cnt])
|
|
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
|
|
+ if (!dquot)
|
|
continue;
|
|
- spin_lock(&dquots[cnt]->dq_dqb_lock);
|
|
- wtype = info_bdq_free(dquots[cnt], number);
|
|
+ spin_lock(&dquot->dq_dqb_lock);
|
|
+ wtype = info_bdq_free(dquot, number);
|
|
if (wtype != QUOTA_NL_NOWARN)
|
|
- prepare_warning(&warn[cnt], dquots[cnt], wtype);
|
|
+ prepare_warning(&warn[cnt], dquot, wtype);
|
|
if (reserve)
|
|
- dquot_free_reserved_space(dquots[cnt], number);
|
|
+ dquot_free_reserved_space(dquot, number);
|
|
else
|
|
- dquot_decr_space(dquots[cnt], number);
|
|
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
|
|
+ dquot_decr_space(dquot, number);
|
|
+ spin_unlock(&dquot->dq_dqb_lock);
|
|
}
|
|
if (reserve)
|
|
*inode_reserved_space(inode) -= number;
|
|
@@ -1988,7 +1952,8 @@ void dquot_free_inode(struct inode *inode)
|
|
{
|
|
unsigned int cnt;
|
|
struct dquot_warn warn[MAXQUOTAS];
|
|
- struct dquot * const *dquots;
|
|
+ struct dquot __rcu * const *dquots;
|
|
+ struct dquot *dquot;
|
|
int index;
|
|
|
|
if (!inode_quota_active(inode))
|
|
@@ -1999,16 +1964,16 @@ void dquot_free_inode(struct inode *inode)
|
|
spin_lock(&inode->i_lock);
|
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
|
int wtype;
|
|
-
|
|
warn[cnt].w_type = QUOTA_NL_NOWARN;
|
|
- if (!dquots[cnt])
|
|
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
|
|
+ if (!dquot)
|
|
continue;
|
|
- spin_lock(&dquots[cnt]->dq_dqb_lock);
|
|
- wtype = info_idq_free(dquots[cnt], 1);
|
|
+ spin_lock(&dquot->dq_dqb_lock);
|
|
+ wtype = info_idq_free(dquot, 1);
|
|
if (wtype != QUOTA_NL_NOWARN)
|
|
- prepare_warning(&warn[cnt], dquots[cnt], wtype);
|
|
- dquot_decr_inodes(dquots[cnt], 1);
|
|
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
|
|
+ prepare_warning(&warn[cnt], dquot, wtype);
|
|
+ dquot_decr_inodes(dquot, 1);
|
|
+ spin_unlock(&dquot->dq_dqb_lock);
|
|
}
|
|
spin_unlock(&inode->i_lock);
|
|
mark_all_dquot_dirty(dquots);
|
|
@@ -2034,8 +1999,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
|
|
qsize_t cur_space;
|
|
qsize_t rsv_space = 0;
|
|
qsize_t inode_usage = 1;
|
|
+ struct dquot __rcu **dquots;
|
|
struct dquot *transfer_from[MAXQUOTAS] = {};
|
|
- int cnt, ret = 0;
|
|
+ int cnt, index, ret = 0;
|
|
char is_valid[MAXQUOTAS] = {};
|
|
struct dquot_warn warn_to[MAXQUOTAS];
|
|
struct dquot_warn warn_from_inodes[MAXQUOTAS];
|
|
@@ -2066,6 +2032,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
|
|
}
|
|
cur_space = __inode_get_bytes(inode);
|
|
rsv_space = __inode_get_rsv_space(inode);
|
|
+ dquots = i_dquot(inode);
|
|
/*
|
|
* Build the transfer_from list, check limits, and update usage in
|
|
* the target structures.
|
|
@@ -2080,7 +2047,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
|
|
if (!sb_has_quota_active(inode->i_sb, cnt))
|
|
continue;
|
|
is_valid[cnt] = 1;
|
|
- transfer_from[cnt] = i_dquot(inode)[cnt];
|
|
+ transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
|
|
+ &dquot_srcu, lockdep_is_held(&dq_data_lock));
|
|
ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
|
|
&warn_to[cnt]);
|
|
if (ret)
|
|
@@ -2119,13 +2087,21 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
|
|
rsv_space);
|
|
spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
|
|
}
|
|
- i_dquot(inode)[cnt] = transfer_to[cnt];
|
|
+ rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
|
|
}
|
|
spin_unlock(&inode->i_lock);
|
|
spin_unlock(&dq_data_lock);
|
|
|
|
- mark_all_dquot_dirty(transfer_from);
|
|
- mark_all_dquot_dirty(transfer_to);
|
|
+ /*
|
|
+ * These arrays are local and we hold dquot references so we don't need
|
|
+ * the srcu protection but still take dquot_srcu to avoid warning in
|
|
+ * mark_all_dquot_dirty().
|
|
+ */
|
|
+ index = srcu_read_lock(&dquot_srcu);
|
|
+ mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
|
|
+ mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
|
|
+ srcu_read_unlock(&dquot_srcu, index);
|
|
+
|
|
flush_warnings(warn_to);
|
|
flush_warnings(warn_from_inodes);
|
|
flush_warnings(warn_from_space);
|
|
diff --git a/fs/select.c b/fs/select.c
|
|
index 0ee55af1a55c2..d4d881d439dcd 100644
|
|
--- a/fs/select.c
|
|
+++ b/fs/select.c
|
|
@@ -476,7 +476,7 @@ static inline void wait_key_set(poll_table *wait, unsigned long in,
|
|
wait->_key |= POLLOUT_SET;
|
|
}
|
|
|
|
-static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
|
|
+static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
|
|
{
|
|
ktime_t expire, *to = NULL;
|
|
struct poll_wqueues table;
|
|
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
|
|
index 03cb890690e83..a476a406e5997 100644
|
|
--- a/include/drm/drm_fixed.h
|
|
+++ b/include/drm/drm_fixed.h
|
|
@@ -70,7 +70,6 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
|
|
}
|
|
|
|
#define DRM_FIXED_POINT 32
|
|
-#define DRM_FIXED_POINT_HALF 16
|
|
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
|
|
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
|
|
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
|
|
@@ -89,12 +88,12 @@ static inline int drm_fixp2int(s64 a)
|
|
|
|
static inline int drm_fixp2int_round(s64 a)
|
|
{
|
|
- return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
|
|
+ return drm_fixp2int(a + DRM_FIXED_ONE / 2);
|
|
}
|
|
|
|
static inline int drm_fixp2int_ceil(s64 a)
|
|
{
|
|
- if (a > 0)
|
|
+ if (a >= 0)
|
|
return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
|
|
else
|
|
return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
|
|
diff --git a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
|
|
index 754c54a6eb06a..7850cdc62e285 100644
|
|
--- a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
|
|
+++ b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
|
|
@@ -86,5 +86,6 @@
|
|
#define R8A779G0_CLK_CPEX 74
|
|
#define R8A779G0_CLK_CBFUSA 75
|
|
#define R8A779G0_CLK_R 76
|
|
+#define R8A779G0_CLK_CP 77
|
|
|
|
#endif /* __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__ */
|
|
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
|
|
index 92e7abfe04f92..70b3737052dd2 100644
|
|
--- a/include/linux/dm-io.h
|
|
+++ b/include/linux/dm-io.h
|
|
@@ -79,7 +79,8 @@ void dm_io_client_destroy(struct dm_io_client *client);
|
|
* error occurred doing io to the corresponding region.
|
|
*/
|
|
int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
|
|
- struct dm_io_region *region, unsigned int long *sync_error_bits);
|
|
+ struct dm_io_region *region, unsigned int long *sync_error_bits,
|
|
+ unsigned short ioprio);
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _LINUX_DM_IO_H */
|
|
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
|
|
index fe848901fcc3a..218fc5c54e901 100644
|
|
--- a/include/linux/exportfs.h
|
|
+++ b/include/linux/exportfs.h
|
|
@@ -221,6 +221,7 @@ struct export_operations {
|
|
#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
|
|
atomic attribute updates
|
|
*/
|
|
+#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
|
|
unsigned long flags;
|
|
};
|
|
|
|
diff --git a/include/linux/filter.h b/include/linux/filter.h
|
|
index efc42a6e3aed0..face590b24e17 100644
|
|
--- a/include/linux/filter.h
|
|
+++ b/include/linux/filter.h
|
|
@@ -495,24 +495,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
|
|
__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
|
|
u64, __ur_3, u64, __ur_4, u64, __ur_5)
|
|
|
|
-#define BPF_CALL_x(x, name, ...) \
|
|
+#define BPF_CALL_x(x, attr, name, ...) \
|
|
static __always_inline \
|
|
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
|
|
typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
|
|
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
|
|
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
|
|
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
|
|
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
|
|
{ \
|
|
return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
|
|
} \
|
|
static __always_inline \
|
|
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
|
|
|
|
-#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
|
|
-#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
|
|
-#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
|
|
-#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
|
|
-#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
|
|
-#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
|
|
+#define __NOATTR
|
|
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
|
|
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
|
|
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
|
|
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
|
|
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
|
|
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
|
|
+
|
|
+#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__)
|
|
|
|
#define bpf_ctx_range(TYPE, MEMBER) \
|
|
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
|
|
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
|
|
index a1484cdb3158e..a8f3058448eaa 100644
|
|
--- a/include/linux/io_uring.h
|
|
+++ b/include/linux/io_uring.h
|
|
@@ -42,11 +42,11 @@ void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
|
|
unsigned issue_flags);
|
|
void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
|
|
void (*task_work_cb)(struct io_uring_cmd *, unsigned));
|
|
-struct sock *io_uring_get_socket(struct file *file);
|
|
void __io_uring_cancel(bool cancel_all);
|
|
void __io_uring_free(struct task_struct *tsk);
|
|
void io_uring_unreg_ringfd(void);
|
|
const char *io_uring_get_opcode(u8 opcode);
|
|
+bool io_is_uring_fops(struct file *file);
|
|
|
|
static inline void io_uring_files_cancel(void)
|
|
{
|
|
@@ -71,6 +71,10 @@ static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
+static inline bool io_is_uring_fops(struct file *file)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
|
|
ssize_t ret2, unsigned issue_flags)
|
|
{
|
|
@@ -79,10 +83,6 @@ static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
|
|
void (*task_work_cb)(struct io_uring_cmd *, unsigned))
|
|
{
|
|
}
|
|
-static inline struct sock *io_uring_get_socket(struct file *file)
|
|
-{
|
|
- return NULL;
|
|
-}
|
|
static inline void io_uring_task_cancel(void)
|
|
{
|
|
}
|
|
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
|
|
index f5b687a787a34..37aeea266ebb3 100644
|
|
--- a/include/linux/io_uring_types.h
|
|
+++ b/include/linux/io_uring_types.h
|
|
@@ -330,9 +330,6 @@ struct io_ring_ctx {
|
|
|
|
struct list_head io_buffers_pages;
|
|
|
|
- #if defined(CONFIG_UNIX)
|
|
- struct socket *ring_sock;
|
|
- #endif
|
|
/* hashed buffered write serialization */
|
|
struct io_wq_hash *hash_map;
|
|
|
|
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
|
|
index 4657d5c54abef..ca0eee571ad7b 100644
|
|
--- a/include/linux/mlx5/qp.h
|
|
+++ b/include/linux/mlx5/qp.h
|
|
@@ -269,7 +269,10 @@ struct mlx5_wqe_eth_seg {
|
|
union {
|
|
struct {
|
|
__be16 sz;
|
|
- u8 start[2];
|
|
+ union {
|
|
+ u8 start[2];
|
|
+ DECLARE_FLEX_ARRAY(u8, data);
|
|
+ };
|
|
} inline_hdr;
|
|
struct {
|
|
__be16 type;
|
|
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
|
|
index 1322652a9d0d9..7dc186ec52a29 100644
|
|
--- a/include/linux/moduleloader.h
|
|
+++ b/include/linux/moduleloader.h
|
|
@@ -95,6 +95,14 @@ int module_finalize(const Elf_Ehdr *hdr,
|
|
const Elf_Shdr *sechdrs,
|
|
struct module *mod);
|
|
|
|
+#ifdef CONFIG_MODULES
|
|
+void flush_module_init_free_work(void);
|
|
+#else
|
|
+static inline void flush_module_init_free_work(void)
|
|
+{
|
|
+}
|
|
+#endif
|
|
+
|
|
/* Any cleanup needed when module leaves. */
|
|
void module_arch_cleanup(struct module *mod);
|
|
|
|
diff --git a/include/linux/pci.h b/include/linux/pci.h
|
|
index eccaf1abea79d..f5d89a4b811f1 100644
|
|
--- a/include/linux/pci.h
|
|
+++ b/include/linux/pci.h
|
|
@@ -2355,6 +2355,11 @@ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
|
|
return NULL;
|
|
}
|
|
|
|
+static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
|
|
+{
|
|
+ return dev->error_state == pci_channel_io_perm_failure;
|
|
+}
|
|
+
|
|
void pci_request_acs(void);
|
|
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
|
|
bool pci_acs_path_enabled(struct pci_dev *start,
|
|
diff --git a/include/linux/poll.h b/include/linux/poll.h
|
|
index a9e0e1c2d1f2f..d1ea4f3714a84 100644
|
|
--- a/include/linux/poll.h
|
|
+++ b/include/linux/poll.h
|
|
@@ -14,11 +14,7 @@
|
|
|
|
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
|
|
additional memory. */
|
|
-#ifdef __clang__
|
|
-#define MAX_STACK_ALLOC 768
|
|
-#else
|
|
#define MAX_STACK_ALLOC 832
|
|
-#endif
|
|
#define FRONTEND_STACK_ALLOC 256
|
|
#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
|
|
#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
|
|
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
|
|
index d2507168b9c7b..319698087d66a 100644
|
|
--- a/include/linux/rcupdate.h
|
|
+++ b/include/linux/rcupdate.h
|
|
@@ -268,6 +268,37 @@ do { \
|
|
cond_resched(); \
|
|
} while (0)
|
|
|
|
+/**
|
|
+ * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states
|
|
+ * @old_ts: jiffies at start of processing.
|
|
+ *
|
|
+ * This helper is for long-running softirq handlers, such as NAPI threads in
|
|
+ * networking. The caller should initialize the variable passed in as @old_ts
|
|
+ * at the beginning of the softirq handler. When invoked frequently, this macro
|
|
+ * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will
|
|
+ * provide both RCU and RCU-Tasks quiescent states. Note that this macro
|
|
+ * modifies its old_ts argument.
|
|
+ *
|
|
+ * Because regions of code that have disabled softirq act as RCU read-side
|
|
+ * critical sections, this macro should be invoked with softirq (and
|
|
+ * preemption) enabled.
|
|
+ *
|
|
+ * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would
|
|
+ * have more chance to invoke schedule() calls and provide necessary quiescent
|
|
+ * states. As a contrast, calling cond_resched() only won't achieve the same
|
|
+ * effect because cond_resched() does not provide RCU-Tasks quiescent states.
|
|
+ */
|
|
+#define rcu_softirq_qs_periodic(old_ts) \
|
|
+do { \
|
|
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \
|
|
+ time_after(jiffies, (old_ts) + HZ / 10)) { \
|
|
+ preempt_disable(); \
|
|
+ rcu_softirq_qs(); \
|
|
+ preempt_enable(); \
|
|
+ (old_ts) = jiffies; \
|
|
+ } \
|
|
+} while (0)
|
|
+
|
|
/*
|
|
* Infrastructure to implement the synchronize_() primitives in
|
|
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
|
|
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
|
|
index a674221d151db..c69e09909449f 100644
|
|
--- a/include/net/bluetooth/hci.h
|
|
+++ b/include/net/bluetooth/hci.h
|
|
@@ -416,7 +416,6 @@ enum {
|
|
#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
|
|
#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
|
|
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
|
|
-#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
|
|
#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
|
|
#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
|
|
|
|
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
|
|
index 09c978f3d95dc..c50a41f1782a4 100644
|
|
--- a/include/net/bluetooth/hci_core.h
|
|
+++ b/include/net/bluetooth/hci_core.h
|
|
@@ -81,7 +81,7 @@ struct discovery_state {
|
|
u8 last_adv_addr_type;
|
|
s8 last_adv_rssi;
|
|
u32 last_adv_flags;
|
|
- u8 last_adv_data[HCI_MAX_AD_LENGTH];
|
|
+ u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH];
|
|
u8 last_adv_data_len;
|
|
bool report_invalid_rssi;
|
|
bool result_filtering;
|
|
@@ -293,7 +293,7 @@ struct adv_pattern {
|
|
__u8 ad_type;
|
|
__u8 offset;
|
|
__u8 length;
|
|
- __u8 value[HCI_MAX_AD_LENGTH];
|
|
+ __u8 value[HCI_MAX_EXT_AD_LENGTH];
|
|
};
|
|
|
|
struct adv_rssi_thresholds {
|
|
@@ -549,6 +549,7 @@ struct hci_dev {
|
|
__u32 req_status;
|
|
__u32 req_result;
|
|
struct sk_buff *req_skb;
|
|
+ struct sk_buff *req_rsp;
|
|
|
|
void *smp_data;
|
|
void *smp_bredr_data;
|
|
@@ -726,7 +727,7 @@ struct hci_conn {
|
|
__u16 le_conn_interval;
|
|
__u16 le_conn_latency;
|
|
__u16 le_supv_timeout;
|
|
- __u8 le_adv_data[HCI_MAX_AD_LENGTH];
|
|
+ __u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH];
|
|
__u8 le_adv_data_len;
|
|
__u8 le_per_adv_data[HCI_MAX_PER_AD_LENGTH];
|
|
__u8 le_per_adv_data_len;
|
|
@@ -739,6 +740,7 @@ struct hci_conn {
|
|
unsigned long flags;
|
|
|
|
enum conn_reasons conn_reason;
|
|
+ __u8 abort_reason;
|
|
|
|
__u32 clock;
|
|
__u16 clock_accuracy;
|
|
@@ -758,7 +760,6 @@ struct hci_conn {
|
|
struct delayed_work auto_accept_work;
|
|
struct delayed_work idle_work;
|
|
struct delayed_work le_conn_timeout;
|
|
- struct work_struct le_scan_cleanup;
|
|
|
|
struct device dev;
|
|
struct dentry *debugfs;
|
|
@@ -1709,6 +1710,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
|
|
/* Extended advertising support */
|
|
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
|
|
|
|
+/* Maximum advertising length */
|
|
+#define max_adv_len(dev) \
|
|
+ (ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH)
|
|
+
|
|
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
|
|
*
|
|
* C24: Mandatory if the LE Controller supports Connection State and either
|
|
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
|
|
index 17f5a4c32f36e..59d15b1a978ab 100644
|
|
--- a/include/net/bluetooth/hci_sync.h
|
|
+++ b/include/net/bluetooth/hci_sync.h
|
|
@@ -39,8 +39,10 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|
void hci_cmd_sync_init(struct hci_dev *hdev);
|
|
void hci_cmd_sync_clear(struct hci_dev *hdev);
|
|
void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
|
|
-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
|
|
+void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err);
|
|
|
|
+int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
|
+ void *data, hci_cmd_sync_work_destroy_t destroy);
|
|
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
|
void *data, hci_cmd_sync_work_destroy_t destroy);
|
|
|
|
diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
|
|
index a7085e092d348..3a0cde4dcf331 100644
|
|
--- a/include/uapi/rdma/irdma-abi.h
|
|
+++ b/include/uapi/rdma/irdma-abi.h
|
|
@@ -22,10 +22,15 @@ enum irdma_memreg_type {
|
|
IRDMA_MEMREG_TYPE_CQ = 2,
|
|
};
|
|
|
|
+enum {
|
|
+ IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
|
|
+};
|
|
+
|
|
struct irdma_alloc_ucontext_req {
|
|
__u32 rsvd32;
|
|
__u8 userspace_ver;
|
|
__u8 rsvd8[3];
|
|
+ __aligned_u64 comp_mask;
|
|
};
|
|
|
|
struct irdma_alloc_ucontext_resp {
|
|
@@ -46,6 +51,7 @@ struct irdma_alloc_ucontext_resp {
|
|
__u16 max_hw_sq_chunk;
|
|
__u8 hw_rev;
|
|
__u8 rsvd2;
|
|
+ __aligned_u64 comp_mask;
|
|
};
|
|
|
|
struct irdma_alloc_pd_resp {
|
|
diff --git a/init/main.c b/init/main.c
|
|
index 87a52bdb41d67..ccde19e7275fa 100644
|
|
--- a/init/main.c
|
|
+++ b/init/main.c
|
|
@@ -89,6 +89,7 @@
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/context_tracking.h>
|
|
#include <linux/random.h>
|
|
+#include <linux/moduleloader.h>
|
|
#include <linux/list.h>
|
|
#include <linux/integrity.h>
|
|
#include <linux/proc_ns.h>
|
|
@@ -1473,11 +1474,11 @@ static void mark_readonly(void)
|
|
if (rodata_enabled) {
|
|
/*
|
|
* load_module() results in W+X mappings, which are cleaned
|
|
- * up with call_rcu(). Let's make sure that queued work is
|
|
+ * up with init_free_wq. Let's make sure that queued work is
|
|
* flushed so that we don't hit false positives looking for
|
|
* insecure pages which are W+X.
|
|
*/
|
|
- rcu_barrier();
|
|
+ flush_module_init_free_work();
|
|
mark_rodata_ro();
|
|
rodata_test();
|
|
} else
|
|
diff --git a/io_uring/filetable.c b/io_uring/filetable.c
|
|
index b80614e7d6051..4660cb89ea9f5 100644
|
|
--- a/io_uring/filetable.c
|
|
+++ b/io_uring/filetable.c
|
|
@@ -95,12 +95,10 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
|
|
needs_switch = true;
|
|
}
|
|
|
|
- ret = io_scm_file_account(ctx, file);
|
|
- if (!ret) {
|
|
- *io_get_tag_slot(ctx->file_data, slot_index) = 0;
|
|
- io_fixed_file_set(file_slot, file);
|
|
- io_file_bitmap_set(&ctx->file_table, slot_index);
|
|
- }
|
|
+ *io_get_tag_slot(ctx->file_data, slot_index) = 0;
|
|
+ io_fixed_file_set(file_slot, file);
|
|
+ io_file_bitmap_set(&ctx->file_table, slot_index);
|
|
+ return 0;
|
|
err:
|
|
if (needs_switch)
|
|
io_rsrc_node_switch(ctx, ctx->file_data);
|
|
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
|
|
index 35894955b4549..415248c1f82c6 100644
|
|
--- a/io_uring/io_uring.c
|
|
+++ b/io_uring/io_uring.c
|
|
@@ -60,7 +60,6 @@
|
|
#include <linux/net.h>
|
|
#include <net/sock.h>
|
|
#include <net/af_unix.h>
|
|
-#include <net/scm.h>
|
|
#include <linux/anon_inodes.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/uaccess.h>
|
|
@@ -153,19 +152,6 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
|
|
|
|
static struct kmem_cache *req_cachep;
|
|
|
|
-struct sock *io_uring_get_socket(struct file *file)
|
|
-{
|
|
-#if defined(CONFIG_UNIX)
|
|
- if (io_is_uring_fops(file)) {
|
|
- struct io_ring_ctx *ctx = file->private_data;
|
|
-
|
|
- return ctx->ring_sock->sk;
|
|
- }
|
|
-#endif
|
|
- return NULL;
|
|
-}
|
|
-EXPORT_SYMBOL(io_uring_get_socket);
|
|
-
|
|
static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
|
|
{
|
|
if (!wq_list_empty(&ctx->submit_state.compl_reqs))
|
|
@@ -2641,12 +2627,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
|
WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
|
|
WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
|
|
|
|
-#if defined(CONFIG_UNIX)
|
|
- if (ctx->ring_sock) {
|
|
- ctx->ring_sock->file = NULL; /* so that iput() is called */
|
|
- sock_release(ctx->ring_sock);
|
|
- }
|
|
-#endif
|
|
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
|
|
|
|
if (ctx->mm_account) {
|
|
@@ -3451,32 +3431,12 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
|
|
/*
|
|
* Allocate an anonymous fd, this is what constitutes the application
|
|
* visible backing of an io_uring instance. The application mmaps this
|
|
- * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
|
|
- * we have to tie this fd to a socket for file garbage collection purposes.
|
|
+ * fd to gain access to the SQ/CQ ring details.
|
|
*/
|
|
static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
|
|
{
|
|
- struct file *file;
|
|
-#if defined(CONFIG_UNIX)
|
|
- int ret;
|
|
-
|
|
- ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
|
|
- &ctx->ring_sock);
|
|
- if (ret)
|
|
- return ERR_PTR(ret);
|
|
-#endif
|
|
-
|
|
- file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
|
|
+ return anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
|
|
O_RDWR | O_CLOEXEC, NULL);
|
|
-#if defined(CONFIG_UNIX)
|
|
- if (IS_ERR(file)) {
|
|
- sock_release(ctx->ring_sock);
|
|
- ctx->ring_sock = NULL;
|
|
- } else {
|
|
- ctx->ring_sock->file = file;
|
|
- }
|
|
-#endif
|
|
- return file;
|
|
}
|
|
|
|
static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
|
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
|
|
index 019600570ee49..59e6f755f12c6 100644
|
|
--- a/io_uring/io_uring.h
|
|
+++ b/io_uring/io_uring.h
|
|
@@ -52,7 +52,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
|
|
}
|
|
|
|
void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
|
|
-bool io_is_uring_fops(struct file *file);
|
|
bool io_alloc_async_data(struct io_kiocb *req);
|
|
void io_req_task_queue(struct io_kiocb *req);
|
|
void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
|
|
diff --git a/io_uring/net.c b/io_uring/net.c
|
|
index c062ce66af12c..0d4ee3d738fbf 100644
|
|
--- a/io_uring/net.c
|
|
+++ b/io_uring/net.c
|
|
@@ -183,16 +183,115 @@ static int io_setup_async_msg(struct io_kiocb *req,
|
|
return -EAGAIN;
|
|
}
|
|
|
|
+#ifdef CONFIG_COMPAT
|
|
+static int io_compat_msg_copy_hdr(struct io_kiocb *req,
|
|
+ struct io_async_msghdr *iomsg,
|
|
+ struct compat_msghdr *msg, int ddir)
|
|
+{
|
|
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
|
+ struct compat_iovec __user *uiov;
|
|
+ int ret;
|
|
+
|
|
+ if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ uiov = compat_ptr(msg->msg_iov);
|
|
+ if (req->flags & REQ_F_BUFFER_SELECT) {
|
|
+ compat_ssize_t clen;
|
|
+
|
|
+ iomsg->free_iov = NULL;
|
|
+ if (msg->msg_iovlen == 0) {
|
|
+ sr->len = 0;
|
|
+ } else if (msg->msg_iovlen > 1) {
|
|
+ return -EINVAL;
|
|
+ } else {
|
|
+ if (!access_ok(uiov, sizeof(*uiov)))
|
|
+ return -EFAULT;
|
|
+ if (__get_user(clen, &uiov->iov_len))
|
|
+ return -EFAULT;
|
|
+ if (clen < 0)
|
|
+ return -EINVAL;
|
|
+ sr->len = clen;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ iomsg->free_iov = iomsg->fast_iov;
|
|
+ ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
|
|
+ UIO_FASTIOV, &iomsg->free_iov,
|
|
+ &iomsg->msg.msg_iter, true);
|
|
+ if (unlikely(ret < 0))
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
|
|
+ struct user_msghdr *msg, int ddir)
|
|
+{
|
|
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
|
+ int ret;
|
|
+
|
|
+ if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ if (req->flags & REQ_F_BUFFER_SELECT) {
|
|
+ if (msg->msg_iovlen == 0) {
|
|
+ sr->len = iomsg->fast_iov[0].iov_len = 0;
|
|
+ iomsg->fast_iov[0].iov_base = NULL;
|
|
+ iomsg->free_iov = NULL;
|
|
+ } else if (msg->msg_iovlen > 1) {
|
|
+ return -EINVAL;
|
|
+ } else {
|
|
+ if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
|
|
+ sizeof(*msg->msg_iov)))
|
|
+ return -EFAULT;
|
|
+ sr->len = iomsg->fast_iov[0].iov_len;
|
|
+ iomsg->free_iov = NULL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ iomsg->free_iov = iomsg->fast_iov;
|
|
+ ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
|
|
+ &iomsg->free_iov, &iomsg->msg.msg_iter, false);
|
|
+ if (unlikely(ret < 0))
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
|
|
struct io_async_msghdr *iomsg)
|
|
{
|
|
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
|
+ struct user_msghdr msg;
|
|
int ret;
|
|
|
|
iomsg->msg.msg_name = &iomsg->addr;
|
|
- iomsg->free_iov = iomsg->fast_iov;
|
|
- ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
|
|
- &iomsg->free_iov);
|
|
+ iomsg->msg.msg_iter.nr_segs = 0;
|
|
+
|
|
+#ifdef CONFIG_COMPAT
|
|
+ if (unlikely(req->ctx->compat)) {
|
|
+ struct compat_msghdr cmsg;
|
|
+
|
|
+ ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
|
|
+ if (unlikely(ret))
|
|
+ return ret;
|
|
+
|
|
+ return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
|
|
+ if (unlikely(ret))
|
|
+ return ret;
|
|
+
|
|
+ ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
|
|
+
|
|
/* save msg_control as sys_sendmsg() overwrites it */
|
|
sr->msg_control = iomsg->msg.msg_control_user;
|
|
return ret;
|
|
@@ -415,142 +514,77 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
|
|
return IOU_OK;
|
|
}
|
|
|
|
-static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
|
|
+static int io_recvmsg_mshot_prep(struct io_kiocb *req,
|
|
+ struct io_async_msghdr *iomsg,
|
|
+ int namelen, size_t controllen)
|
|
{
|
|
- int hdr;
|
|
-
|
|
- if (iomsg->namelen < 0)
|
|
- return true;
|
|
- if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
|
|
- iomsg->namelen, &hdr))
|
|
- return true;
|
|
- if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
|
|
- return true;
|
|
+ if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
|
|
+ (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
|
|
+ int hdr;
|
|
+
|
|
+ if (unlikely(namelen < 0))
|
|
+ return -EOVERFLOW;
|
|
+ if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
|
|
+ namelen, &hdr))
|
|
+ return -EOVERFLOW;
|
|
+ if (check_add_overflow(hdr, controllen, &hdr))
|
|
+ return -EOVERFLOW;
|
|
+
|
|
+ iomsg->namelen = namelen;
|
|
+ iomsg->controllen = controllen;
|
|
+ return 0;
|
|
+ }
|
|
|
|
- return false;
|
|
+ return 0;
|
|
}
|
|
|
|
-static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
|
|
- struct io_async_msghdr *iomsg)
|
|
+static int io_recvmsg_copy_hdr(struct io_kiocb *req,
|
|
+ struct io_async_msghdr *iomsg)
|
|
{
|
|
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
|
struct user_msghdr msg;
|
|
int ret;
|
|
|
|
- if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
|
|
- return -EFAULT;
|
|
-
|
|
- ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- if (req->flags & REQ_F_BUFFER_SELECT) {
|
|
- if (msg.msg_iovlen == 0) {
|
|
- sr->len = iomsg->fast_iov[0].iov_len = 0;
|
|
- iomsg->fast_iov[0].iov_base = NULL;
|
|
- iomsg->free_iov = NULL;
|
|
- } else if (msg.msg_iovlen > 1) {
|
|
- return -EINVAL;
|
|
- } else {
|
|
- if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
|
|
- return -EFAULT;
|
|
- sr->len = iomsg->fast_iov[0].iov_len;
|
|
- iomsg->free_iov = NULL;
|
|
- }
|
|
-
|
|
- if (req->flags & REQ_F_APOLL_MULTISHOT) {
|
|
- iomsg->namelen = msg.msg_namelen;
|
|
- iomsg->controllen = msg.msg_controllen;
|
|
- if (io_recvmsg_multishot_overflow(iomsg))
|
|
- return -EOVERFLOW;
|
|
- }
|
|
- } else {
|
|
- iomsg->free_iov = iomsg->fast_iov;
|
|
- ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
|
|
- &iomsg->free_iov, &iomsg->msg.msg_iter,
|
|
- false);
|
|
- if (ret > 0)
|
|
- ret = 0;
|
|
- }
|
|
-
|
|
- return ret;
|
|
-}
|
|
+ iomsg->msg.msg_name = &iomsg->addr;
|
|
+ iomsg->msg.msg_iter.nr_segs = 0;
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
-static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
|
|
- struct io_async_msghdr *iomsg)
|
|
-{
|
|
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
|
- struct compat_msghdr msg;
|
|
- struct compat_iovec __user *uiov;
|
|
- int ret;
|
|
-
|
|
- if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
|
|
- return -EFAULT;
|
|
-
|
|
- ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
|
|
- if (ret)
|
|
- return ret;
|
|
+ if (unlikely(req->ctx->compat)) {
|
|
+ struct compat_msghdr cmsg;
|
|
|
|
- uiov = compat_ptr(msg.msg_iov);
|
|
- if (req->flags & REQ_F_BUFFER_SELECT) {
|
|
- compat_ssize_t clen;
|
|
-
|
|
- iomsg->free_iov = NULL;
|
|
- if (msg.msg_iovlen == 0) {
|
|
- sr->len = 0;
|
|
- } else if (msg.msg_iovlen > 1) {
|
|
- return -EINVAL;
|
|
- } else {
|
|
- if (!access_ok(uiov, sizeof(*uiov)))
|
|
- return -EFAULT;
|
|
- if (__get_user(clen, &uiov->iov_len))
|
|
- return -EFAULT;
|
|
- if (clen < 0)
|
|
- return -EINVAL;
|
|
- sr->len = clen;
|
|
- }
|
|
+ ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
|
|
+ if (unlikely(ret))
|
|
+ return ret;
|
|
|
|
- if (req->flags & REQ_F_APOLL_MULTISHOT) {
|
|
- iomsg->namelen = msg.msg_namelen;
|
|
- iomsg->controllen = msg.msg_controllen;
|
|
- if (io_recvmsg_multishot_overflow(iomsg))
|
|
- return -EOVERFLOW;
|
|
- }
|
|
- } else {
|
|
- iomsg->free_iov = iomsg->fast_iov;
|
|
- ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
|
|
- UIO_FASTIOV, &iomsg->free_iov,
|
|
- &iomsg->msg.msg_iter, true);
|
|
- if (ret < 0)
|
|
+ ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
|
|
+ if (unlikely(ret))
|
|
return ret;
|
|
- }
|
|
|
|
- return 0;
|
|
-}
|
|
+ return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
|
|
+ cmsg.msg_controllen);
|
|
+ }
|
|
#endif
|
|
|
|
-static int io_recvmsg_copy_hdr(struct io_kiocb *req,
|
|
- struct io_async_msghdr *iomsg)
|
|
-{
|
|
- iomsg->msg.msg_name = &iomsg->addr;
|
|
- iomsg->msg.msg_iter.nr_segs = 0;
|
|
+ ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
|
|
+ if (unlikely(ret))
|
|
+ return ret;
|
|
|
|
-#ifdef CONFIG_COMPAT
|
|
- if (req->ctx->compat)
|
|
- return __io_compat_recvmsg_copy_hdr(req, iomsg);
|
|
-#endif
|
|
+ ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
|
|
+ if (unlikely(ret))
|
|
+ return ret;
|
|
|
|
- return __io_recvmsg_copy_hdr(req, iomsg);
|
|
+ return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
|
|
+ msg.msg_controllen);
|
|
}
|
|
|
|
int io_recvmsg_prep_async(struct io_kiocb *req)
|
|
{
|
|
+ struct io_async_msghdr *iomsg;
|
|
int ret;
|
|
|
|
if (!io_msg_alloc_async_prep(req))
|
|
return -ENOMEM;
|
|
- ret = io_recvmsg_copy_hdr(req, req->async_data);
|
|
+ iomsg = req->async_data;
|
|
+ ret = io_recvmsg_copy_hdr(req, iomsg);
|
|
if (!ret)
|
|
req->flags |= REQ_F_NEED_CLEANUP;
|
|
return ret;
|
|
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
|
|
index 7ada0339b3870..ac658cfa89c63 100644
|
|
--- a/io_uring/rsrc.c
|
|
+++ b/io_uring/rsrc.c
|
|
@@ -494,11 +494,6 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
|
err = -EBADF;
|
|
break;
|
|
}
|
|
- err = io_scm_file_account(ctx, file);
|
|
- if (err) {
|
|
- fput(file);
|
|
- break;
|
|
- }
|
|
*io_get_tag_slot(data, i) = tag;
|
|
io_fixed_file_set(file_slot, file);
|
|
io_file_bitmap_set(&ctx->file_table, i);
|
|
@@ -762,22 +757,12 @@ void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
|
for (i = 0; i < ctx->nr_user_files; i++) {
|
|
struct file *file = io_file_from_index(&ctx->file_table, i);
|
|
|
|
- /* skip scm accounted files, they'll be freed by ->ring_sock */
|
|
- if (!file || io_file_need_scm(file))
|
|
+ if (!file)
|
|
continue;
|
|
io_file_bitmap_clear(&ctx->file_table, i);
|
|
fput(file);
|
|
}
|
|
|
|
-#if defined(CONFIG_UNIX)
|
|
- if (ctx->ring_sock) {
|
|
- struct sock *sock = ctx->ring_sock->sk;
|
|
- struct sk_buff *skb;
|
|
-
|
|
- while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
|
|
- kfree_skb(skb);
|
|
- }
|
|
-#endif
|
|
io_free_file_tables(&ctx->file_table);
|
|
io_file_table_set_alloc_range(ctx, 0, 0);
|
|
io_rsrc_data_free(ctx->file_data);
|
|
@@ -805,134 +790,11 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
|
return ret;
|
|
}
|
|
|
|
-/*
|
|
- * Ensure the UNIX gc is aware of our file set, so we are certain that
|
|
- * the io_uring can be safely unregistered on process exit, even if we have
|
|
- * loops in the file referencing. We account only files that can hold other
|
|
- * files because otherwise they can't form a loop and so are not interesting
|
|
- * for GC.
|
|
- */
|
|
-int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
|
|
-{
|
|
-#if defined(CONFIG_UNIX)
|
|
- struct sock *sk = ctx->ring_sock->sk;
|
|
- struct sk_buff_head *head = &sk->sk_receive_queue;
|
|
- struct scm_fp_list *fpl;
|
|
- struct sk_buff *skb;
|
|
-
|
|
- if (likely(!io_file_need_scm(file)))
|
|
- return 0;
|
|
-
|
|
- /*
|
|
- * See if we can merge this file into an existing skb SCM_RIGHTS
|
|
- * file set. If there's no room, fall back to allocating a new skb
|
|
- * and filling it in.
|
|
- */
|
|
- spin_lock_irq(&head->lock);
|
|
- skb = skb_peek(head);
|
|
- if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
|
|
- __skb_unlink(skb, head);
|
|
- else
|
|
- skb = NULL;
|
|
- spin_unlock_irq(&head->lock);
|
|
-
|
|
- if (!skb) {
|
|
- fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
|
|
- if (!fpl)
|
|
- return -ENOMEM;
|
|
-
|
|
- skb = alloc_skb(0, GFP_KERNEL);
|
|
- if (!skb) {
|
|
- kfree(fpl);
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
- fpl->user = get_uid(current_user());
|
|
- fpl->max = SCM_MAX_FD;
|
|
- fpl->count = 0;
|
|
-
|
|
- UNIXCB(skb).fp = fpl;
|
|
- skb->sk = sk;
|
|
- skb->scm_io_uring = 1;
|
|
- skb->destructor = unix_destruct_scm;
|
|
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
|
|
- }
|
|
-
|
|
- fpl = UNIXCB(skb).fp;
|
|
- fpl->fp[fpl->count++] = get_file(file);
|
|
- unix_inflight(fpl->user, file);
|
|
- skb_queue_head(head, skb);
|
|
- fput(file);
|
|
-#endif
|
|
- return 0;
|
|
-}
|
|
-
|
|
static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
|
|
{
|
|
struct file *file = prsrc->file;
|
|
-#if defined(CONFIG_UNIX)
|
|
- struct sock *sock = ctx->ring_sock->sk;
|
|
- struct sk_buff_head list, *head = &sock->sk_receive_queue;
|
|
- struct sk_buff *skb;
|
|
- int i;
|
|
-
|
|
- if (!io_file_need_scm(file)) {
|
|
- fput(file);
|
|
- return;
|
|
- }
|
|
-
|
|
- __skb_queue_head_init(&list);
|
|
-
|
|
- /*
|
|
- * Find the skb that holds this file in its SCM_RIGHTS. When found,
|
|
- * remove this entry and rearrange the file array.
|
|
- */
|
|
- skb = skb_dequeue(head);
|
|
- while (skb) {
|
|
- struct scm_fp_list *fp;
|
|
|
|
- fp = UNIXCB(skb).fp;
|
|
- for (i = 0; i < fp->count; i++) {
|
|
- int left;
|
|
-
|
|
- if (fp->fp[i] != file)
|
|
- continue;
|
|
-
|
|
- unix_notinflight(fp->user, fp->fp[i]);
|
|
- left = fp->count - 1 - i;
|
|
- if (left) {
|
|
- memmove(&fp->fp[i], &fp->fp[i + 1],
|
|
- left * sizeof(struct file *));
|
|
- }
|
|
- fp->count--;
|
|
- if (!fp->count) {
|
|
- kfree_skb(skb);
|
|
- skb = NULL;
|
|
- } else {
|
|
- __skb_queue_tail(&list, skb);
|
|
- }
|
|
- fput(file);
|
|
- file = NULL;
|
|
- break;
|
|
- }
|
|
-
|
|
- if (!file)
|
|
- break;
|
|
-
|
|
- __skb_queue_tail(&list, skb);
|
|
-
|
|
- skb = skb_dequeue(head);
|
|
- }
|
|
-
|
|
- if (skb_peek(&list)) {
|
|
- spin_lock_irq(&head->lock);
|
|
- while ((skb = __skb_dequeue(&list)) != NULL)
|
|
- __skb_queue_tail(head, skb);
|
|
- spin_unlock_irq(&head->lock);
|
|
- }
|
|
-#else
|
|
fput(file);
|
|
-#endif
|
|
}
|
|
|
|
int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|
@@ -986,21 +848,12 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|
goto fail;
|
|
|
|
/*
|
|
- * Don't allow io_uring instances to be registered. If UNIX
|
|
- * isn't enabled, then this causes a reference cycle and this
|
|
- * instance can never get freed. If UNIX is enabled we'll
|
|
- * handle it just fine, but there's still no point in allowing
|
|
- * a ring fd as it doesn't support regular read/write anyway.
|
|
+ * Don't allow io_uring instances to be registered.
|
|
*/
|
|
if (io_is_uring_fops(file)) {
|
|
fput(file);
|
|
goto fail;
|
|
}
|
|
- ret = io_scm_file_account(ctx, file);
|
|
- if (ret) {
|
|
- fput(file);
|
|
- goto fail;
|
|
- }
|
|
file_slot = io_fixed_file_slot(&ctx->file_table, i);
|
|
io_fixed_file_set(file_slot, file);
|
|
io_file_bitmap_set(&ctx->file_table, i);
|
|
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
|
|
index acaf8dad05401..85f145607c620 100644
|
|
--- a/io_uring/rsrc.h
|
|
+++ b/io_uring/rsrc.h
|
|
@@ -77,21 +77,6 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx);
|
|
int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|
unsigned nr_args, u64 __user *tags);
|
|
|
|
-int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
|
|
-
|
|
-static inline bool io_file_need_scm(struct file *filp)
|
|
-{
|
|
- return false;
|
|
-}
|
|
-
|
|
-static inline int io_scm_file_account(struct io_ring_ctx *ctx,
|
|
- struct file *file)
|
|
-{
|
|
- if (likely(!io_file_need_scm(file)))
|
|
- return 0;
|
|
- return __io_scm_file_account(ctx, file);
|
|
-}
|
|
-
|
|
int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
|
|
unsigned nr_args);
|
|
int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
|
|
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
|
|
index 76bf1de261152..44abf88e1bb0d 100644
|
|
--- a/kernel/bpf/core.c
|
|
+++ b/kernel/bpf/core.c
|
|
@@ -857,7 +857,12 @@ static LIST_HEAD(pack_list);
|
|
* CONFIG_MMU=n. Use PAGE_SIZE in these cases.
|
|
*/
|
|
#ifdef PMD_SIZE
|
|
-#define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
|
|
+/* PMD_SIZE is really big for some archs. It doesn't make sense to
|
|
+ * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
|
|
+ * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
|
|
+ * greater than or equal to 2MB.
|
|
+ */
|
|
+#define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
|
|
#else
|
|
#define BPF_PROG_PACK_SIZE PAGE_SIZE
|
|
#endif
|
|
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
|
|
index 0508937048137..806a7c1b364b6 100644
|
|
--- a/kernel/bpf/cpumap.c
|
|
+++ b/kernel/bpf/cpumap.c
|
|
@@ -306,6 +306,7 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
|
|
static int cpu_map_kthread_run(void *data)
|
|
{
|
|
struct bpf_cpu_map_entry *rcpu = data;
|
|
+ unsigned long last_qs = jiffies;
|
|
|
|
complete(&rcpu->kthread_running);
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
@@ -331,10 +332,12 @@ static int cpu_map_kthread_run(void *data)
|
|
if (__ptr_ring_empty(rcpu->queue)) {
|
|
schedule();
|
|
sched = 1;
|
|
+ last_qs = jiffies;
|
|
} else {
|
|
__set_current_state(TASK_RUNNING);
|
|
}
|
|
} else {
|
|
+ rcu_softirq_qs_periodic(last_qs);
|
|
sched = cond_resched();
|
|
}
|
|
|
|
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
|
|
index f9a87dcc5535b..e051cbb07dac0 100644
|
|
--- a/kernel/bpf/devmap.c
|
|
+++ b/kernel/bpf/devmap.c
|
|
@@ -131,13 +131,14 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
|
|
bpf_map_init_from_attr(&dtab->map, attr);
|
|
|
|
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
|
|
- dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
|
|
-
|
|
- if (!dtab->n_buckets) /* Overflow check */
|
|
+ /* hash table size must be power of 2; roundup_pow_of_two() can
|
|
+ * overflow into UB on 32-bit arches, so check that first
|
|
+ */
|
|
+ if (dtab->map.max_entries > 1UL << 31)
|
|
return -EINVAL;
|
|
- }
|
|
|
|
- if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
|
|
+ dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
|
|
+
|
|
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
|
|
dtab->map.numa_node);
|
|
if (!dtab->dev_index_head)
|
|
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
|
|
index 88c71de0a0a95..0c74cc9012d5c 100644
|
|
--- a/kernel/bpf/hashtab.c
|
|
+++ b/kernel/bpf/hashtab.c
|
|
@@ -495,7 +495,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|
num_possible_cpus());
|
|
}
|
|
|
|
- /* hash table size must be power of 2 */
|
|
+ /* hash table size must be power of 2; roundup_pow_of_two() can overflow
|
|
+ * into UB on 32-bit arches, so check that first
|
|
+ */
|
|
+ err = -E2BIG;
|
|
+ if (htab->map.max_entries > 1UL << 31)
|
|
+ goto free_htab;
|
|
+
|
|
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
|
|
|
|
htab->elem_size = sizeof(struct htab_elem) +
|
|
@@ -505,10 +511,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|
else
|
|
htab->elem_size += round_up(htab->map.value_size, 8);
|
|
|
|
- err = -E2BIG;
|
|
- /* prevent zero size kmalloc and check for u32 overflow */
|
|
- if (htab->n_buckets == 0 ||
|
|
- htab->n_buckets > U32_MAX / sizeof(struct bucket))
|
|
+ /* check for u32 overflow */
|
|
+ if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
|
|
goto free_htab;
|
|
|
|
err = -ENOMEM;
|
|
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
|
|
index 83f8f67e933df..758510b46d87b 100644
|
|
--- a/kernel/bpf/helpers.c
|
|
+++ b/kernel/bpf/helpers.c
|
|
@@ -328,7 +328,7 @@ static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
|
|
__this_cpu_write(irqsave_flags, flags);
|
|
}
|
|
|
|
-notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
|
|
+NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
|
|
{
|
|
__bpf_spin_lock_irqsave(lock);
|
|
return 0;
|
|
@@ -350,7 +350,7 @@ static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
-notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
|
|
+NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
|
|
{
|
|
__bpf_spin_unlock_irqrestore(lock);
|
|
return 0;
|
|
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
|
|
index f86db3cf72123..f0fd936cef319 100644
|
|
--- a/kernel/bpf/stackmap.c
|
|
+++ b/kernel/bpf/stackmap.c
|
|
@@ -94,11 +94,14 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
|
|
} else if (value_size / 8 > sysctl_perf_event_max_stack)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- /* hash table size must be power of 2 */
|
|
- n_buckets = roundup_pow_of_two(attr->max_entries);
|
|
- if (!n_buckets)
|
|
+ /* hash table size must be power of 2; roundup_pow_of_two() can overflow
|
|
+ * into UB on 32-bit arches, so check that first
|
|
+ */
|
|
+ if (attr->max_entries > 1UL << 31)
|
|
return ERR_PTR(-E2BIG);
|
|
|
|
+ n_buckets = roundup_pow_of_two(attr->max_entries);
|
|
+
|
|
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
|
|
smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
|
|
if (!smap)
|
|
diff --git a/kernel/module/main.c b/kernel/module/main.c
|
|
index 7a376e26de85b..554aba47ab689 100644
|
|
--- a/kernel/module/main.c
|
|
+++ b/kernel/module/main.c
|
|
@@ -2434,6 +2434,11 @@ static void do_free_init(struct work_struct *w)
|
|
}
|
|
}
|
|
|
|
+void flush_module_init_free_work(void)
|
|
+{
|
|
+ flush_work(&init_free_wq);
|
|
+}
|
|
+
|
|
#undef MODULE_PARAM_PREFIX
|
|
#define MODULE_PARAM_PREFIX "module."
|
|
/* Default value for module->async_probe_requested */
|
|
@@ -2524,8 +2529,8 @@ static noinline int do_init_module(struct module *mod)
|
|
* Note that module_alloc() on most architectures creates W+X page
|
|
* mappings which won't be cleaned up until do_free_init() runs. Any
|
|
* code such as mark_rodata_ro() which depends on those mappings to
|
|
- * be cleaned up needs to sync with the queued work - ie
|
|
- * rcu_barrier()
|
|
+ * be cleaned up needs to sync with the queued work by invoking
|
|
+ * flush_module_init_free_work().
|
|
*/
|
|
if (llist_add(&freeinit->node, &init_free_list))
|
|
schedule_work(&init_free_wq);
|
|
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
|
|
index cc53fb77f77cc..981cdb00b8722 100644
|
|
--- a/kernel/printk/printk.c
|
|
+++ b/kernel/printk/printk.c
|
|
@@ -1797,10 +1797,23 @@ static bool console_waiter;
|
|
*/
|
|
static void console_lock_spinning_enable(void)
|
|
{
|
|
+ /*
|
|
+ * Do not use spinning in panic(). The panic CPU wants to keep the lock.
|
|
+ * Non-panic CPUs abandon the flush anyway.
|
|
+ *
|
|
+ * Just keep the lockdep annotation. The panic-CPU should avoid
|
|
+ * taking console_owner_lock because it might cause a deadlock.
|
|
+ * This looks like the easiest way how to prevent false lockdep
|
|
+ * reports without handling races a lockless way.
|
|
+ */
|
|
+ if (panic_in_progress())
|
|
+ goto lockdep;
|
|
+
|
|
raw_spin_lock(&console_owner_lock);
|
|
console_owner = current;
|
|
raw_spin_unlock(&console_owner_lock);
|
|
|
|
+lockdep:
|
|
/* The waiter may spin on us after setting console_owner */
|
|
spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
|
|
}
|
|
@@ -1824,6 +1837,22 @@ static int console_lock_spinning_disable_and_check(void)
|
|
{
|
|
int waiter;
|
|
|
|
+ /*
|
|
+ * Ignore spinning waiters during panic() because they might get stopped
|
|
+ * or blocked at any time,
|
|
+ *
|
|
+ * It is safe because nobody is allowed to start spinning during panic
|
|
+ * in the first place. If there has been a waiter then non panic CPUs
|
|
+ * might stay spinning. They would get stopped anyway. The panic context
|
|
+ * will never start spinning and an interrupted spin on panic CPU will
|
|
+ * never continue.
|
|
+ */
|
|
+ if (panic_in_progress()) {
|
|
+ /* Keep lockdep happy. */
|
|
+ spin_release(&console_owner_dep_map, _THIS_IP_);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
raw_spin_lock(&console_owner_lock);
|
|
waiter = READ_ONCE(console_waiter);
|
|
console_owner = NULL;
|
|
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
|
|
index 9d7464a90f85d..61f9503a5fe9c 100644
|
|
--- a/kernel/rcu/tree.c
|
|
+++ b/kernel/rcu/tree.c
|
|
@@ -4465,13 +4465,16 @@ static void __init rcu_start_exp_gp_kworkers(void)
|
|
rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
|
|
if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
|
|
pr_err("Failed to create %s!\n", gp_kworker_name);
|
|
+ rcu_exp_gp_kworker = NULL;
|
|
return;
|
|
}
|
|
|
|
rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
|
|
if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
|
|
pr_err("Failed to create %s!\n", par_gp_kworker_name);
|
|
+ rcu_exp_par_gp_kworker = NULL;
|
|
kthread_destroy_worker(rcu_exp_gp_kworker);
|
|
+ rcu_exp_gp_kworker = NULL;
|
|
return;
|
|
}
|
|
|
|
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
|
|
index 6d2cbed96b462..75e8d9652f7bb 100644
|
|
--- a/kernel/rcu/tree_exp.h
|
|
+++ b/kernel/rcu/tree_exp.h
|
|
@@ -427,7 +427,12 @@ static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
|
|
__sync_rcu_exp_select_node_cpus(rewp);
|
|
}
|
|
|
|
-static inline bool rcu_gp_par_worker_started(void)
|
|
+static inline bool rcu_exp_worker_started(void)
|
|
+{
|
|
+ return !!READ_ONCE(rcu_exp_gp_kworker);
|
|
+}
|
|
+
|
|
+static inline bool rcu_exp_par_worker_started(void)
|
|
{
|
|
return !!READ_ONCE(rcu_exp_par_gp_kworker);
|
|
}
|
|
@@ -477,7 +482,12 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
|
|
__sync_rcu_exp_select_node_cpus(rewp);
|
|
}
|
|
|
|
-static inline bool rcu_gp_par_worker_started(void)
|
|
+static inline bool rcu_exp_worker_started(void)
|
|
+{
|
|
+ return !!READ_ONCE(rcu_gp_wq);
|
|
+}
|
|
+
|
|
+static inline bool rcu_exp_par_worker_started(void)
|
|
{
|
|
return !!READ_ONCE(rcu_par_gp_wq);
|
|
}
|
|
@@ -540,7 +550,7 @@ static void sync_rcu_exp_select_cpus(void)
|
|
rnp->exp_need_flush = false;
|
|
if (!READ_ONCE(rnp->expmask))
|
|
continue; /* Avoid early boot non-existent wq. */
|
|
- if (!rcu_gp_par_worker_started() ||
|
|
+ if (!rcu_exp_par_worker_started() ||
|
|
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
|
|
rcu_is_last_leaf_node(rnp)) {
|
|
/* No worker started yet or last leaf, do direct call. */
|
|
@@ -910,7 +920,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
|
|
*/
|
|
void synchronize_rcu_expedited(void)
|
|
{
|
|
- bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
|
|
+ bool use_worker;
|
|
unsigned long flags;
|
|
struct rcu_exp_work rew;
|
|
struct rcu_node *rnp;
|
|
@@ -921,6 +931,9 @@ void synchronize_rcu_expedited(void)
|
|
lock_is_held(&rcu_sched_lock_map),
|
|
"Illegal synchronize_rcu_expedited() in RCU read-side critical section");
|
|
|
|
+ use_worker = (rcu_scheduler_active != RCU_SCHEDULER_INIT) &&
|
|
+ rcu_exp_worker_started();
|
|
+
|
|
/* Is the state is such that the call is a grace period? */
|
|
if (rcu_blocking_is_gp()) {
|
|
// Note well that this code runs with !PREEMPT && !SMP.
|
|
@@ -950,7 +963,7 @@ void synchronize_rcu_expedited(void)
|
|
return; /* Someone else did our work for us. */
|
|
|
|
/* Ensure that load happens before action based on it. */
|
|
- if (unlikely(boottime)) {
|
|
+ if (unlikely(!use_worker)) {
|
|
/* Direct call during scheduler init and early_initcalls(). */
|
|
rcu_exp_sel_wait_wake(s);
|
|
} else {
|
|
@@ -968,7 +981,7 @@ void synchronize_rcu_expedited(void)
|
|
/* Let the next expedited grace period start. */
|
|
mutex_unlock(&rcu_state.exp_mutex);
|
|
|
|
- if (likely(!boottime))
|
|
+ if (likely(use_worker))
|
|
synchronize_rcu_expedited_destroy_work(&rew);
|
|
}
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index 2558ab9033bee..91c101ecfef9f 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -6656,7 +6656,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
|
|
if (!available_idle_cpu(cpu)) {
|
|
idle = false;
|
|
if (*idle_cpu == -1) {
|
|
- if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
|
|
+ if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, cpus)) {
|
|
*idle_cpu = cpu;
|
|
break;
|
|
}
|
|
@@ -6664,7 +6664,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
|
|
}
|
|
break;
|
|
}
|
|
- if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr))
|
|
+ if (*idle_cpu == -1 && cpumask_test_cpu(cpu, cpus))
|
|
*idle_cpu = cpu;
|
|
}
|
|
|
|
@@ -6678,13 +6678,19 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
|
|
/*
|
|
* Scan the local SMT mask for idle CPUs.
|
|
*/
|
|
-static int select_idle_smt(struct task_struct *p, int target)
|
|
+static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
|
|
if (cpu == target)
|
|
continue;
|
|
+ /*
|
|
+ * Check if the CPU is in the LLC scheduling domain of @target.
|
|
+ * Due to isolcpus, there is no guarantee that all the siblings are in the domain.
|
|
+ */
|
|
+ if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
|
|
+ continue;
|
|
if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
|
|
return cpu;
|
|
}
|
|
@@ -6708,7 +6714,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
|
|
return __select_idle_cpu(core, p);
|
|
}
|
|
|
|
-static inline int select_idle_smt(struct task_struct *p, int target)
|
|
+static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
|
|
{
|
|
return -1;
|
|
}
|
|
@@ -6970,7 +6976,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
|
|
has_idle_core = test_idle_cores(target);
|
|
|
|
if (!has_idle_core && cpus_share_cache(prev, target)) {
|
|
- i = select_idle_smt(p, prev);
|
|
+ i = select_idle_smt(p, sd, prev);
|
|
if ((unsigned int)i < nr_cpumask_bits)
|
|
return i;
|
|
}
|
|
diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c
|
|
index 831e8e779acef..f7c3de01197c9 100644
|
|
--- a/kernel/time/time_test.c
|
|
+++ b/kernel/time/time_test.c
|
|
@@ -73,7 +73,7 @@ static void time64_to_tm_test_date_range(struct kunit *test)
|
|
|
|
days = div_s64(secs, 86400);
|
|
|
|
- #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %ld", \
|
|
+ #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %lld", \
|
|
year, month, mdday, yday, days
|
|
|
|
KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
|
|
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
|
|
index 221c8c404973a..b158cbef4d8dc 100644
|
|
--- a/kernel/time/timekeeping.c
|
|
+++ b/kernel/time/timekeeping.c
|
|
@@ -1180,13 +1180,15 @@ static int adjust_historical_crosststamp(struct system_time_snapshot *history,
|
|
}
|
|
|
|
/*
|
|
- * cycle_between - true if test occurs chronologically between before and after
|
|
+ * timestamp_in_interval - true if ts is chronologically in [start, end]
|
|
+ *
|
|
+ * True if ts occurs chronologically at or after start, and before or at end.
|
|
*/
|
|
-static bool cycle_between(u64 before, u64 test, u64 after)
|
|
+static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
|
|
{
|
|
- if (test > before && test < after)
|
|
+ if (ts >= start && ts <= end)
|
|
return true;
|
|
- if (test < before && before > after)
|
|
+ if (start > end && (ts >= start || ts <= end))
|
|
return true;
|
|
return false;
|
|
}
|
|
@@ -1246,7 +1248,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
|
|
*/
|
|
now = tk_clock_read(&tk->tkr_mono);
|
|
interval_start = tk->tkr_mono.cycle_last;
|
|
- if (!cycle_between(interval_start, cycles, now)) {
|
|
+ if (!timestamp_in_interval(interval_start, now, cycles)) {
|
|
clock_was_set_seq = tk->clock_was_set_seq;
|
|
cs_was_changed_seq = tk->cs_was_changed_seq;
|
|
cycles = interval_start;
|
|
@@ -1259,10 +1261,8 @@ int get_device_system_crosststamp(int (*get_time_fn)
|
|
tk_core.timekeeper.offs_real);
|
|
base_raw = tk->tkr_raw.base;
|
|
|
|
- nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
|
|
- system_counterval.cycles);
|
|
- nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
|
|
- system_counterval.cycles);
|
|
+ nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
|
|
+ nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
|
|
} while (read_seqcount_retry(&tk_core.seq, seq));
|
|
|
|
xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
|
|
@@ -1277,13 +1277,13 @@ int get_device_system_crosststamp(int (*get_time_fn)
|
|
bool discontinuity;
|
|
|
|
/*
|
|
- * Check that the counter value occurs after the provided
|
|
+ * Check that the counter value is not before the provided
|
|
* history reference and that the history doesn't cross a
|
|
* clocksource change
|
|
*/
|
|
if (!history_begin ||
|
|
- !cycle_between(history_begin->cycles,
|
|
- system_counterval.cycles, cycles) ||
|
|
+ !timestamp_in_interval(history_begin->cycles,
|
|
+ cycles, system_counterval.cycles) ||
|
|
history_begin->cs_was_changed_seq != cs_was_changed_seq)
|
|
return -EINVAL;
|
|
partial_history_cycles = cycles - system_counterval.cycles;
|
|
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
|
|
index d4572dbc91453..705b82736be08 100644
|
|
--- a/lib/cmdline_kunit.c
|
|
+++ b/lib/cmdline_kunit.c
|
|
@@ -124,7 +124,7 @@ static void cmdline_do_one_range_test(struct kunit *test, const char *in,
|
|
n, e[0], r[0]);
|
|
|
|
p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
|
|
- KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r);
|
|
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %td out of bound", n, p - r);
|
|
}
|
|
|
|
static void cmdline_test_range(struct kunit *test)
|
|
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
|
|
index 2b5cc70ac53fc..dbedd99aa6163 100644
|
|
--- a/lib/memcpy_kunit.c
|
|
+++ b/lib/memcpy_kunit.c
|
|
@@ -32,7 +32,7 @@ struct some_bytes {
|
|
BUILD_BUG_ON(sizeof(instance.data) != 32); \
|
|
for (size_t i = 0; i < sizeof(instance.data); i++) { \
|
|
KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
|
|
- "line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \
|
|
+ "line %d: '%s' not initialized to 0x%02x @ %zu (saw 0x%02x)\n", \
|
|
__LINE__, #instance, v, i, instance.data[i]); \
|
|
} \
|
|
} while (0)
|
|
@@ -41,7 +41,7 @@ struct some_bytes {
|
|
BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
|
|
for (size_t i = 0; i < sizeof(one); i++) { \
|
|
KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
|
|
- "line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \
|
|
+ "line %d: %s.data[%zu] (0x%02x) != %s.data[%zu] (0x%02x)\n", \
|
|
__LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
|
|
} \
|
|
kunit_info(test, "ok: " TEST_OP "() " name "\n"); \
|
|
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
|
|
index 4c40580a99a36..f247089d63c08 100644
|
|
--- a/lib/test_blackhole_dev.c
|
|
+++ b/lib/test_blackhole_dev.c
|
|
@@ -29,7 +29,6 @@ static int __init test_blackholedev_init(void)
|
|
{
|
|
struct ipv6hdr *ip6h;
|
|
struct sk_buff *skb;
|
|
- struct ethhdr *ethh;
|
|
struct udphdr *uh;
|
|
int data_len;
|
|
int ret;
|
|
@@ -61,7 +60,7 @@ static int __init test_blackholedev_init(void)
|
|
ip6h->saddr = in6addr_loopback;
|
|
ip6h->daddr = in6addr_loopback;
|
|
/* Ether */
|
|
- ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
|
|
+ skb_push(skb, sizeof(struct ethhdr));
|
|
skb_set_mac_header(skb, 0);
|
|
|
|
skb->protocol = htons(ETH_P_IPV6);
|
|
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
|
|
index f1b7510359e4b..3f9ff02baafe3 100644
|
|
--- a/net/bluetooth/af_bluetooth.c
|
|
+++ b/net/bluetooth/af_bluetooth.c
|
|
@@ -264,14 +264,11 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|
if (flags & MSG_OOB)
|
|
return -EOPNOTSUPP;
|
|
|
|
- lock_sock(sk);
|
|
-
|
|
skb = skb_recv_datagram(sk, flags, &err);
|
|
if (!skb) {
|
|
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
|
err = 0;
|
|
|
|
- release_sock(sk);
|
|
return err;
|
|
}
|
|
|
|
@@ -297,8 +294,6 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|
|
|
skb_free_datagram(sk, skb);
|
|
|
|
- release_sock(sk);
|
|
-
|
|
if (flags & MSG_TRUNC)
|
|
copied = skblen;
|
|
|
|
@@ -521,10 +516,11 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
|
if (sk->sk_state == BT_LISTEN)
|
|
return -EINVAL;
|
|
|
|
- lock_sock(sk);
|
|
+ spin_lock(&sk->sk_receive_queue.lock);
|
|
skb = skb_peek(&sk->sk_receive_queue);
|
|
amount = skb ? skb->len : 0;
|
|
- release_sock(sk);
|
|
+ spin_unlock(&sk->sk_receive_queue.lock);
|
|
+
|
|
err = put_user(amount, (int __user *)arg);
|
|
break;
|
|
|
|
diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c
|
|
index 8a85f6cdfbc16..1bc51e2b05a34 100644
|
|
--- a/net/bluetooth/eir.c
|
|
+++ b/net/bluetooth/eir.c
|
|
@@ -13,48 +13,33 @@
|
|
|
|
#define PNP_INFO_SVCLASS_ID 0x1200
|
|
|
|
-static u8 eir_append_name(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len)
|
|
-{
|
|
- u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
|
|
-
|
|
- /* If data is already NULL terminated just pass it directly */
|
|
- if (data[data_len - 1] == '\0')
|
|
- return eir_append_data(eir, eir_len, type, data, data_len);
|
|
-
|
|
- memcpy(name, data, HCI_MAX_SHORT_NAME_LENGTH);
|
|
- name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
|
|
-
|
|
- return eir_append_data(eir, eir_len, type, name, sizeof(name));
|
|
-}
|
|
-
|
|
u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
|
|
{
|
|
size_t short_len;
|
|
size_t complete_len;
|
|
|
|
- /* no space left for name (+ NULL + type + len) */
|
|
- if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
|
|
+ /* no space left for name (+ type + len) */
|
|
+ if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 2)
|
|
return ad_len;
|
|
|
|
/* use complete name if present and fits */
|
|
complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
|
|
if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
|
|
- return eir_append_name(ptr, ad_len, EIR_NAME_COMPLETE,
|
|
- hdev->dev_name, complete_len + 1);
|
|
+ return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
|
|
+ hdev->dev_name, complete_len);
|
|
|
|
/* use short name if present */
|
|
short_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
|
|
if (short_len)
|
|
- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
|
|
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
|
|
hdev->short_name,
|
|
- short_len == HCI_MAX_SHORT_NAME_LENGTH ?
|
|
- short_len : short_len + 1);
|
|
+ short_len);
|
|
|
|
/* use shortened full name if present, we already know that name
|
|
* is longer then HCI_MAX_SHORT_NAME_LENGTH
|
|
*/
|
|
if (complete_len)
|
|
- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
|
|
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
|
|
hdev->dev_name,
|
|
HCI_MAX_SHORT_NAME_LENGTH);
|
|
|
|
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
|
|
index 12d36875358b9..bac5a369d2bef 100644
|
|
--- a/net/bluetooth/hci_conn.c
|
|
+++ b/net/bluetooth/hci_conn.c
|
|
@@ -175,57 +175,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
|
|
hci_dev_put(hdev);
|
|
}
|
|
|
|
-static void le_scan_cleanup(struct work_struct *work)
|
|
-{
|
|
- struct hci_conn *conn = container_of(work, struct hci_conn,
|
|
- le_scan_cleanup);
|
|
- struct hci_dev *hdev = conn->hdev;
|
|
- struct hci_conn *c = NULL;
|
|
-
|
|
- BT_DBG("%s hcon %p", hdev->name, conn);
|
|
-
|
|
- hci_dev_lock(hdev);
|
|
-
|
|
- /* Check that the hci_conn is still around */
|
|
- rcu_read_lock();
|
|
- list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
|
|
- if (c == conn)
|
|
- break;
|
|
- }
|
|
- rcu_read_unlock();
|
|
-
|
|
- if (c == conn) {
|
|
- hci_connect_le_scan_cleanup(conn, 0x00);
|
|
- hci_conn_cleanup(conn);
|
|
- }
|
|
-
|
|
- hci_dev_unlock(hdev);
|
|
- hci_dev_put(hdev);
|
|
- hci_conn_put(conn);
|
|
-}
|
|
-
|
|
-static void hci_connect_le_scan_remove(struct hci_conn *conn)
|
|
-{
|
|
- BT_DBG("%s hcon %p", conn->hdev->name, conn);
|
|
-
|
|
- /* We can't call hci_conn_del/hci_conn_cleanup here since that
|
|
- * could deadlock with another hci_conn_del() call that's holding
|
|
- * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
|
|
- * Instead, grab temporary extra references to the hci_dev and
|
|
- * hci_conn and perform the necessary cleanup in a separate work
|
|
- * callback.
|
|
- */
|
|
-
|
|
- hci_dev_hold(conn->hdev);
|
|
- hci_conn_get(conn);
|
|
-
|
|
- /* Even though we hold a reference to the hdev, many other
|
|
- * things might get cleaned up meanwhile, including the hdev's
|
|
- * own workqueue, so we can't use that for scheduling.
|
|
- */
|
|
- schedule_work(&conn->le_scan_cleanup);
|
|
-}
|
|
-
|
|
static void hci_acl_create_connection(struct hci_conn *conn)
|
|
{
|
|
struct hci_dev *hdev = conn->hdev;
|
|
@@ -672,13 +621,6 @@ static void hci_conn_timeout(struct work_struct *work)
|
|
if (refcnt > 0)
|
|
return;
|
|
|
|
- /* LE connections in scanning state need special handling */
|
|
- if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
|
|
- test_bit(HCI_CONN_SCANNING, &conn->flags)) {
|
|
- hci_connect_le_scan_remove(conn);
|
|
- return;
|
|
- }
|
|
-
|
|
hci_abort_conn(conn, hci_proto_disconn_ind(conn));
|
|
}
|
|
|
|
@@ -1050,7 +992,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
|
|
INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
|
|
INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
|
|
INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
|
|
- INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
|
|
|
|
atomic_set(&conn->refcnt, 0);
|
|
|
|
@@ -2837,81 +2778,46 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
|
|
return phys;
|
|
}
|
|
|
|
-int hci_abort_conn(struct hci_conn *conn, u8 reason)
|
|
+static int abort_conn_sync(struct hci_dev *hdev, void *data)
|
|
{
|
|
- int r = 0;
|
|
+ struct hci_conn *conn;
|
|
+ u16 handle = PTR_ERR(data);
|
|
|
|
- if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
|
|
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
|
|
+ if (!conn)
|
|
return 0;
|
|
|
|
- switch (conn->state) {
|
|
- case BT_CONNECTED:
|
|
- case BT_CONFIG:
|
|
- if (conn->type == AMP_LINK) {
|
|
- struct hci_cp_disconn_phy_link cp;
|
|
+ return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
|
|
+}
|
|
|
|
- cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
|
|
- cp.reason = reason;
|
|
- r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
|
|
- sizeof(cp), &cp);
|
|
- } else {
|
|
- struct hci_cp_disconnect dc;
|
|
+int hci_abort_conn(struct hci_conn *conn, u8 reason)
|
|
+{
|
|
+ struct hci_dev *hdev = conn->hdev;
|
|
|
|
- dc.handle = cpu_to_le16(conn->handle);
|
|
- dc.reason = reason;
|
|
- r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
|
|
- sizeof(dc), &dc);
|
|
- }
|
|
+ /* If abort_reason has already been set it means the connection is
|
|
+ * already being aborted so don't attempt to overwrite it.
|
|
+ */
|
|
+ if (conn->abort_reason)
|
|
+ return 0;
|
|
|
|
- conn->state = BT_DISCONN;
|
|
+ bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
|
|
|
|
- break;
|
|
- case BT_CONNECT:
|
|
- if (conn->type == LE_LINK) {
|
|
- if (test_bit(HCI_CONN_SCANNING, &conn->flags))
|
|
- break;
|
|
- r = hci_send_cmd(conn->hdev,
|
|
- HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
|
|
- } else if (conn->type == ACL_LINK) {
|
|
- if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
|
|
- break;
|
|
- r = hci_send_cmd(conn->hdev,
|
|
- HCI_OP_CREATE_CONN_CANCEL,
|
|
- 6, &conn->dst);
|
|
- }
|
|
- break;
|
|
- case BT_CONNECT2:
|
|
- if (conn->type == ACL_LINK) {
|
|
- struct hci_cp_reject_conn_req rej;
|
|
-
|
|
- bacpy(&rej.bdaddr, &conn->dst);
|
|
- rej.reason = reason;
|
|
-
|
|
- r = hci_send_cmd(conn->hdev,
|
|
- HCI_OP_REJECT_CONN_REQ,
|
|
- sizeof(rej), &rej);
|
|
- } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
|
|
- struct hci_cp_reject_sync_conn_req rej;
|
|
-
|
|
- bacpy(&rej.bdaddr, &conn->dst);
|
|
-
|
|
- /* SCO rejection has its own limited set of
|
|
- * allowed error values (0x0D-0x0F) which isn't
|
|
- * compatible with most values passed to this
|
|
- * function. To be safe hard-code one of the
|
|
- * values that's suitable for SCO.
|
|
- */
|
|
- rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
|
|
+ conn->abort_reason = reason;
|
|
|
|
- r = hci_send_cmd(conn->hdev,
|
|
- HCI_OP_REJECT_SYNC_CONN_REQ,
|
|
- sizeof(rej), &rej);
|
|
+ /* If the connection is pending check the command opcode since that
|
|
+ * might be blocking on hci_cmd_sync_work while waiting its respective
|
|
+ * event so we need to hci_cmd_sync_cancel to cancel it.
|
|
+ */
|
|
+ if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
|
|
+ switch (hci_skb_event(hdev->sent_cmd)) {
|
|
+ case HCI_EV_LE_CONN_COMPLETE:
|
|
+ case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
|
|
+ case HCI_EVT_LE_CIS_ESTABLISHED:
|
|
+ hci_cmd_sync_cancel(hdev, ECANCELED);
|
|
+ break;
|
|
}
|
|
- break;
|
|
- default:
|
|
- conn->state = BT_CLOSED;
|
|
- break;
|
|
}
|
|
|
|
- return r;
|
|
+ return hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
|
|
+ NULL);
|
|
}
|
|
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
|
|
index a8932d449eb63..70f24dc75b596 100644
|
|
--- a/net/bluetooth/hci_core.c
|
|
+++ b/net/bluetooth/hci_core.c
|
|
@@ -908,7 +908,7 @@ int hci_get_dev_info(void __user *arg)
|
|
else
|
|
flags = hdev->flags;
|
|
|
|
- strcpy(di.name, hdev->name);
|
|
+ strscpy(di.name, hdev->name, sizeof(di.name));
|
|
di.bdaddr = hdev->bdaddr;
|
|
di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
|
|
di.flags = flags;
|
|
@@ -1491,11 +1491,12 @@ static void hci_cmd_timeout(struct work_struct *work)
|
|
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
|
cmd_timer.work);
|
|
|
|
- if (hdev->sent_cmd) {
|
|
- struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
|
|
- u16 opcode = __le16_to_cpu(sent->opcode);
|
|
+ if (hdev->req_skb) {
|
|
+ u16 opcode = hci_skb_opcode(hdev->req_skb);
|
|
|
|
bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
|
|
+
|
|
+ hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
|
|
} else {
|
|
bt_dev_err(hdev, "command tx timeout");
|
|
}
|
|
@@ -2791,6 +2792,7 @@ void hci_release_dev(struct hci_dev *hdev)
|
|
|
|
ida_simple_remove(&hci_index_ida, hdev->id);
|
|
kfree_skb(hdev->sent_cmd);
|
|
+ kfree_skb(hdev->req_skb);
|
|
kfree_skb(hdev->recv_event);
|
|
kfree(hdev);
|
|
}
|
|
@@ -2822,6 +2824,23 @@ int hci_unregister_suspend_notifier(struct hci_dev *hdev)
|
|
return ret;
|
|
}
|
|
|
|
+/* Cancel ongoing command synchronously:
|
|
+ *
|
|
+ * - Cancel command timer
|
|
+ * - Reset command counter
|
|
+ * - Cancel command request
|
|
+ */
|
|
+static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
|
|
+{
|
|
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
|
|
+
|
|
+ cancel_delayed_work_sync(&hdev->cmd_timer);
|
|
+ cancel_delayed_work_sync(&hdev->ncmd_timer);
|
|
+ atomic_set(&hdev->cmd_cnt, 1);
|
|
+
|
|
+ hci_cmd_sync_cancel_sync(hdev, -err);
|
|
+}
|
|
+
|
|
/* Suspend HCI device */
|
|
int hci_suspend_dev(struct hci_dev *hdev)
|
|
{
|
|
@@ -2838,6 +2857,9 @@ int hci_suspend_dev(struct hci_dev *hdev)
|
|
if (mgmt_powering_down(hdev))
|
|
return 0;
|
|
|
|
+ /* Cancel potentially blocking sync operation before suspend */
|
|
+ hci_cancel_cmd_sync(hdev, -EHOSTDOWN);
|
|
+
|
|
hci_req_sync_lock(hdev);
|
|
ret = hci_suspend_sync(hdev);
|
|
hci_req_sync_unlock(hdev);
|
|
@@ -3100,21 +3122,33 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|
EXPORT_SYMBOL(__hci_cmd_send);
|
|
|
|
/* Get data from the previously sent command */
|
|
-void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
|
|
+static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
|
|
{
|
|
struct hci_command_hdr *hdr;
|
|
|
|
- if (!hdev->sent_cmd)
|
|
+ if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
|
|
return NULL;
|
|
|
|
- hdr = (void *) hdev->sent_cmd->data;
|
|
+ hdr = (void *)skb->data;
|
|
|
|
if (hdr->opcode != cpu_to_le16(opcode))
|
|
return NULL;
|
|
|
|
- BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
|
|
+ return skb->data + HCI_COMMAND_HDR_SIZE;
|
|
+}
|
|
+
|
|
+/* Get data from the previously sent command */
|
|
+void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
|
|
+{
|
|
+ void *data;
|
|
+
|
|
+ /* Check if opcode matches last sent command */
|
|
+ data = hci_cmd_data(hdev->sent_cmd, opcode);
|
|
+ if (!data)
|
|
+ /* Check if opcode matches last request */
|
|
+ data = hci_cmd_data(hdev->req_skb, opcode);
|
|
|
|
- return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
|
|
+ return data;
|
|
}
|
|
|
|
/* Get data from last received event */
|
|
@@ -4010,17 +4044,19 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
|
|
if (!status && !hci_req_is_complete(hdev))
|
|
return;
|
|
|
|
+ skb = hdev->req_skb;
|
|
+
|
|
/* If this was the last command in a request the complete
|
|
- * callback would be found in hdev->sent_cmd instead of the
|
|
+ * callback would be found in hdev->req_skb instead of the
|
|
* command queue (hdev->cmd_q).
|
|
*/
|
|
- if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
|
|
- *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
|
|
+ if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
|
|
+ *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
|
|
return;
|
|
}
|
|
|
|
- if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
|
|
- *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
|
|
+ if (skb && bt_cb(skb)->hci.req_complete) {
|
|
+ *req_complete = bt_cb(skb)->hci.req_complete;
|
|
return;
|
|
}
|
|
|
|
@@ -4116,6 +4152,36 @@ static void hci_rx_work(struct work_struct *work)
|
|
}
|
|
}
|
|
|
|
+static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ bt_dev_dbg(hdev, "skb %p", skb);
|
|
+
|
|
+ kfree_skb(hdev->sent_cmd);
|
|
+
|
|
+ hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
|
|
+ if (!hdev->sent_cmd) {
|
|
+ skb_queue_head(&hdev->cmd_q, skb);
|
|
+ queue_work(hdev->workqueue, &hdev->cmd_work);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ err = hci_send_frame(hdev, skb);
|
|
+ if (err < 0) {
|
|
+ hci_cmd_sync_cancel_sync(hdev, err);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (hci_req_status_pend(hdev) &&
|
|
+ !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
|
|
+ kfree_skb(hdev->req_skb);
|
|
+ hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
|
|
+ }
|
|
+
|
|
+ atomic_dec(&hdev->cmd_cnt);
|
|
+}
|
|
+
|
|
static void hci_cmd_work(struct work_struct *work)
|
|
{
|
|
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
|
|
@@ -4130,30 +4196,15 @@ static void hci_cmd_work(struct work_struct *work)
|
|
if (!skb)
|
|
return;
|
|
|
|
- kfree_skb(hdev->sent_cmd);
|
|
-
|
|
- hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
|
|
- if (hdev->sent_cmd) {
|
|
- int res;
|
|
- if (hci_req_status_pend(hdev))
|
|
- hci_dev_set_flag(hdev, HCI_CMD_PENDING);
|
|
- atomic_dec(&hdev->cmd_cnt);
|
|
-
|
|
- res = hci_send_frame(hdev, skb);
|
|
- if (res < 0)
|
|
- __hci_cmd_sync_cancel(hdev, -res);
|
|
-
|
|
- rcu_read_lock();
|
|
- if (test_bit(HCI_RESET, &hdev->flags) ||
|
|
- hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
|
|
- cancel_delayed_work(&hdev->cmd_timer);
|
|
- else
|
|
- queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
|
|
- HCI_CMD_TIMEOUT);
|
|
- rcu_read_unlock();
|
|
- } else {
|
|
- skb_queue_head(&hdev->cmd_q, skb);
|
|
- queue_work(hdev->workqueue, &hdev->cmd_work);
|
|
- }
|
|
+ hci_send_cmd_sync(hdev, skb);
|
|
+
|
|
+ rcu_read_lock();
|
|
+ if (test_bit(HCI_RESET, &hdev->flags) ||
|
|
+ hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
|
|
+ cancel_delayed_work(&hdev->cmd_timer);
|
|
+ else
|
|
+ queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
|
|
+ HCI_CMD_TIMEOUT);
|
|
+ rcu_read_unlock();
|
|
}
|
|
}
|
|
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
|
|
index 452d839c152fc..b150dee88f35c 100644
|
|
--- a/net/bluetooth/hci_event.c
|
|
+++ b/net/bluetooth/hci_event.c
|
|
@@ -1761,7 +1761,7 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
|
{
|
|
struct discovery_state *d = &hdev->discovery;
|
|
|
|
- if (len > HCI_MAX_AD_LENGTH)
|
|
+ if (len > max_adv_len(hdev))
|
|
return;
|
|
|
|
bacpy(&d->last_adv_addr, bdaddr);
|
|
@@ -3567,8 +3567,6 @@ static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
|
|
|
|
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
|
|
|
|
- hci_conn_check_pending(hdev);
|
|
-
|
|
hci_dev_lock(hdev);
|
|
|
|
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
|
|
@@ -4331,7 +4329,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
|
|
* (since for this kind of commands there will not be a command
|
|
* complete event).
|
|
*/
|
|
- if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
|
|
+ if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
|
|
hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
|
|
req_complete_skb);
|
|
if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
|
|
@@ -6242,8 +6240,9 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
|
|
return;
|
|
}
|
|
|
|
- if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
|
|
- bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
|
|
+ if (len > max_adv_len(hdev)) {
|
|
+ bt_dev_err_ratelimited(hdev,
|
|
+ "adv larger than maximum supported");
|
|
return;
|
|
}
|
|
|
|
@@ -6308,7 +6307,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
|
|
*/
|
|
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
|
|
type);
|
|
- if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
|
|
+ if (!ext_adv && conn && type == LE_ADV_IND &&
|
|
+ len <= max_adv_len(hdev)) {
|
|
/* Store report for later inclusion by
|
|
* mgmt_device_connected
|
|
*/
|
|
@@ -6449,7 +6449,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
|
|
info->length + 1))
|
|
break;
|
|
|
|
- if (info->length <= HCI_MAX_AD_LENGTH) {
|
|
+ if (info->length <= max_adv_len(hdev)) {
|
|
rssi = info->data[info->length];
|
|
process_adv_report(hdev, info->type, &info->bdaddr,
|
|
info->bdaddr_type, NULL, 0, rssi,
|
|
@@ -7149,10 +7149,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
|
|
bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
|
|
|
|
/* Only match event if command OGF is for LE */
|
|
- if (hdev->sent_cmd &&
|
|
- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
|
|
- hci_skb_event(hdev->sent_cmd) == ev->subevent) {
|
|
- *opcode = hci_skb_opcode(hdev->sent_cmd);
|
|
+ if (hdev->req_skb &&
|
|
+ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
|
|
+ hci_skb_event(hdev->req_skb) == ev->subevent) {
|
|
+ *opcode = hci_skb_opcode(hdev->req_skb);
|
|
hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
|
|
req_complete_skb);
|
|
}
|
|
@@ -7539,10 +7539,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
|
}
|
|
|
|
/* Only match event if command OGF is not for LE */
|
|
- if (hdev->sent_cmd &&
|
|
- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
|
|
- hci_skb_event(hdev->sent_cmd) == event) {
|
|
- hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
|
|
+ if (hdev->req_skb &&
|
|
+ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
|
|
+ hci_skb_event(hdev->req_skb) == event) {
|
|
+ hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
|
|
status, &req_complete, &req_complete_skb);
|
|
req_evt = event;
|
|
}
|
|
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
|
|
index f7e006a363829..4468647df6722 100644
|
|
--- a/net/bluetooth/hci_request.c
|
|
+++ b/net/bluetooth/hci_request.c
|
|
@@ -916,7 +916,7 @@ void hci_request_setup(struct hci_dev *hdev)
|
|
|
|
void hci_request_cancel_all(struct hci_dev *hdev)
|
|
{
|
|
- __hci_cmd_sync_cancel(hdev, ENODEV);
|
|
+ hci_cmd_sync_cancel_sync(hdev, ENODEV);
|
|
|
|
cancel_interleave_scan(hdev);
|
|
}
|
|
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
|
|
index a337340464567..65b2ad34179f8 100644
|
|
--- a/net/bluetooth/hci_sync.c
|
|
+++ b/net/bluetooth/hci_sync.c
|
|
@@ -31,6 +31,10 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
|
|
hdev->req_result = result;
|
|
hdev->req_status = HCI_REQ_DONE;
|
|
|
|
+ /* Free the request command so it is not used as response */
|
|
+ kfree_skb(hdev->req_skb);
|
|
+ hdev->req_skb = NULL;
|
|
+
|
|
if (skb) {
|
|
struct sock *sk = hci_skb_sk(skb);
|
|
|
|
@@ -38,7 +42,7 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
|
|
if (sk)
|
|
sock_put(sk);
|
|
|
|
- hdev->req_skb = skb_get(skb);
|
|
+ hdev->req_rsp = skb_get(skb);
|
|
}
|
|
|
|
wake_up_interruptible(&hdev->req_wait_q);
|
|
@@ -186,8 +190,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|
|
|
hdev->req_status = 0;
|
|
hdev->req_result = 0;
|
|
- skb = hdev->req_skb;
|
|
- hdev->req_skb = NULL;
|
|
+ skb = hdev->req_rsp;
|
|
+ hdev->req_rsp = NULL;
|
|
|
|
bt_dev_dbg(hdev, "end: err %d", err);
|
|
|
|
@@ -651,7 +655,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
|
|
mutex_unlock(&hdev->cmd_sync_work_lock);
|
|
}
|
|
|
|
-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
|
|
+void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
|
|
{
|
|
bt_dev_dbg(hdev, "err 0x%2.2x", err);
|
|
|
|
@@ -659,15 +663,17 @@ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
|
|
hdev->req_result = err;
|
|
hdev->req_status = HCI_REQ_CANCELED;
|
|
|
|
- cancel_delayed_work_sync(&hdev->cmd_timer);
|
|
- cancel_delayed_work_sync(&hdev->ncmd_timer);
|
|
- atomic_set(&hdev->cmd_cnt, 1);
|
|
-
|
|
- wake_up_interruptible(&hdev->req_wait_q);
|
|
+ queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
|
|
}
|
|
}
|
|
+EXPORT_SYMBOL(hci_cmd_sync_cancel);
|
|
|
|
-void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
|
|
+/* Cancel ongoing command request synchronously:
|
|
+ *
|
|
+ * - Set result and mark status to HCI_REQ_CANCELED
|
|
+ * - Wakeup command sync thread
|
|
+ */
|
|
+void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
|
|
{
|
|
bt_dev_dbg(hdev, "err 0x%2.2x", err);
|
|
|
|
@@ -675,13 +681,17 @@ void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
|
|
hdev->req_result = err;
|
|
hdev->req_status = HCI_REQ_CANCELED;
|
|
|
|
- queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
|
|
+ wake_up_interruptible(&hdev->req_wait_q);
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL(hci_cmd_sync_cancel);
|
|
+EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
|
|
|
|
-int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
|
- void *data, hci_cmd_sync_work_destroy_t destroy)
|
|
+/* Submit HCI command to be run in as cmd_sync_work:
|
|
+ *
|
|
+ * - hdev must _not_ be unregistered
|
|
+ */
|
|
+int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
|
+ void *data, hci_cmd_sync_work_destroy_t destroy)
|
|
{
|
|
struct hci_cmd_sync_work_entry *entry;
|
|
int err = 0;
|
|
@@ -711,6 +721,23 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
|
mutex_unlock(&hdev->unregister_lock);
|
|
return err;
|
|
}
|
|
+EXPORT_SYMBOL(hci_cmd_sync_submit);
|
|
+
|
|
+/* Queue HCI command:
|
|
+ *
|
|
+ * - hdev must be running
|
|
+ */
|
|
+int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
|
+ void *data, hci_cmd_sync_work_destroy_t destroy)
|
|
+{
|
|
+ /* Only queue command if hdev is running which means it had been opened
|
|
+ * and is either on init phase or is already up.
|
|
+ */
|
|
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
|
|
+ return -ENETDOWN;
|
|
+
|
|
+ return hci_cmd_sync_submit(hdev, func, data, destroy);
|
|
+}
|
|
EXPORT_SYMBOL(hci_cmd_sync_queue);
|
|
|
|
int hci_update_eir_sync(struct hci_dev *hdev)
|
|
@@ -4856,6 +4883,11 @@ int hci_dev_open_sync(struct hci_dev *hdev)
|
|
hdev->sent_cmd = NULL;
|
|
}
|
|
|
|
+ if (hdev->req_skb) {
|
|
+ kfree_skb(hdev->req_skb);
|
|
+ hdev->req_skb = NULL;
|
|
+ }
|
|
+
|
|
clear_bit(HCI_RUNNING, &hdev->flags);
|
|
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
|
|
|
|
@@ -5017,6 +5049,12 @@ int hci_dev_close_sync(struct hci_dev *hdev)
|
|
hdev->sent_cmd = NULL;
|
|
}
|
|
|
|
+ /* Drop last request */
|
|
+ if (hdev->req_skb) {
|
|
+ kfree_skb(hdev->req_skb);
|
|
+ hdev->req_skb = NULL;
|
|
+ }
|
|
+
|
|
clear_bit(HCI_RUNNING, &hdev->flags);
|
|
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
|
|
|
|
@@ -5209,22 +5247,27 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
|
|
}
|
|
|
|
static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
|
|
- struct hci_conn *conn)
|
|
+ struct hci_conn *conn, u8 reason)
|
|
{
|
|
+ /* Return reason if scanning since the connection shall probably be
|
|
+ * cleanup directly.
|
|
+ */
|
|
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
|
|
- return 0;
|
|
+ return reason;
|
|
|
|
- if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
|
|
+ if (conn->role == HCI_ROLE_SLAVE ||
|
|
+ test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
|
|
return 0;
|
|
|
|
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
|
|
0, NULL, HCI_CMD_TIMEOUT);
|
|
}
|
|
|
|
-static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
|
|
+static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
|
|
+ u8 reason)
|
|
{
|
|
if (conn->type == LE_LINK)
|
|
- return hci_le_connect_cancel_sync(hdev, conn);
|
|
+ return hci_le_connect_cancel_sync(hdev, conn, reason);
|
|
|
|
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
|
|
return 0;
|
|
@@ -5277,9 +5320,11 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
|
|
case BT_CONFIG:
|
|
return hci_disconnect_sync(hdev, conn, reason);
|
|
case BT_CONNECT:
|
|
- err = hci_connect_cancel_sync(hdev, conn);
|
|
+ err = hci_connect_cancel_sync(hdev, conn, reason);
|
|
/* Cleanup hci_conn object if it cannot be cancelled as it
|
|
- * likelly means the controller and host stack are out of sync.
|
|
+ * likelly means the controller and host stack are out of sync
|
|
+ * or in case of LE it was still scanning so it can be cleanup
|
|
+ * safely.
|
|
*/
|
|
if (err) {
|
|
hci_dev_lock(hdev);
|
|
@@ -6194,7 +6239,7 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
|
|
|
|
done:
|
|
if (err == -ETIMEDOUT)
|
|
- hci_le_connect_cancel_sync(hdev, conn);
|
|
+ hci_le_connect_cancel_sync(hdev, conn, 0x00);
|
|
|
|
/* Re-enable advertising after the connection attempt is finished. */
|
|
hci_resume_advertising_sync(hdev);
|
|
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
|
|
index 6d631a2e60166..716f6dc4934b7 100644
|
|
--- a/net/bluetooth/mgmt.c
|
|
+++ b/net/bluetooth/mgmt.c
|
|
@@ -1039,6 +1039,8 @@ static void rpa_expired(struct work_struct *work)
|
|
hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
|
|
}
|
|
|
|
+static int set_discoverable_sync(struct hci_dev *hdev, void *data);
|
|
+
|
|
static void discov_off(struct work_struct *work)
|
|
{
|
|
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
|
@@ -1057,7 +1059,7 @@ static void discov_off(struct work_struct *work)
|
|
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
|
|
hdev->discov_timeout = 0;
|
|
|
|
- hci_update_discoverable(hdev);
|
|
+ hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
|
|
|
|
mgmt_new_settings(hdev);
|
|
|
|
@@ -1399,8 +1401,16 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
|
|
goto failed;
|
|
}
|
|
|
|
- err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
|
|
- mgmt_set_powered_complete);
|
|
+ /* Cancel potentially blocking sync operation before power off */
|
|
+ if (cp->val == 0x00) {
|
|
+ hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
|
|
+ err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
|
|
+ mgmt_set_powered_complete);
|
|
+ } else {
|
|
+ /* Use hci_cmd_sync_submit since hdev might not be running */
|
|
+ err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
|
|
+ mgmt_set_powered_complete);
|
|
+ }
|
|
|
|
if (err < 0)
|
|
mgmt_pending_remove(cmd);
|
|
@@ -3573,18 +3583,6 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
|
return err;
|
|
}
|
|
|
|
-static int abort_conn_sync(struct hci_dev *hdev, void *data)
|
|
-{
|
|
- struct hci_conn *conn;
|
|
- u16 handle = PTR_ERR(data);
|
|
-
|
|
- conn = hci_conn_hash_lookup_handle(hdev, handle);
|
|
- if (!conn)
|
|
- return 0;
|
|
-
|
|
- return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
|
|
-}
|
|
-
|
|
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
|
u16 len)
|
|
{
|
|
@@ -3635,8 +3633,7 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
|
le_addr_type(addr->type));
|
|
|
|
if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
|
|
- hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
|
|
- NULL);
|
|
+ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
|
|
|
|
unlock:
|
|
hci_dev_unlock(hdev);
|
|
@@ -5381,9 +5378,9 @@ static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
|
|
for (i = 0; i < pattern_count; i++) {
|
|
offset = patterns[i].offset;
|
|
length = patterns[i].length;
|
|
- if (offset >= HCI_MAX_AD_LENGTH ||
|
|
- length > HCI_MAX_AD_LENGTH ||
|
|
- (offset + length) > HCI_MAX_AD_LENGTH)
|
|
+ if (offset >= HCI_MAX_EXT_AD_LENGTH ||
|
|
+ length > HCI_MAX_EXT_AD_LENGTH ||
|
|
+ (offset + length) > HCI_MAX_EXT_AD_LENGTH)
|
|
return MGMT_STATUS_INVALID_PARAMS;
|
|
|
|
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
|
@@ -8439,8 +8436,8 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
|
|
supported_flags = get_supported_adv_flags(hdev);
|
|
|
|
rp->supported_flags = cpu_to_le32(supported_flags);
|
|
- rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
|
|
- rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
|
|
+ rp->max_adv_data_len = max_adv_len(hdev);
|
|
+ rp->max_scan_rsp_len = max_adv_len(hdev);
|
|
rp->max_instances = hdev->le_num_of_adv_sets;
|
|
rp->num_instances = hdev->adv_instance_cnt;
|
|
|
|
@@ -8468,7 +8465,7 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
|
|
|
|
static u8 calculate_name_len(struct hci_dev *hdev)
|
|
{
|
|
- u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
|
|
+ u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
|
|
|
|
return eir_append_local_name(hdev, buf, 0);
|
|
}
|
|
@@ -8476,7 +8473,7 @@ static u8 calculate_name_len(struct hci_dev *hdev)
|
|
static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
|
|
bool is_adv_data)
|
|
{
|
|
- u8 max_len = HCI_MAX_AD_LENGTH;
|
|
+ u8 max_len = max_adv_len(hdev);
|
|
|
|
if (is_adv_data) {
|
|
if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
|
|
@@ -9764,14 +9761,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
|
struct mgmt_ev_device_disconnected ev;
|
|
struct sock *sk = NULL;
|
|
|
|
- /* The connection is still in hci_conn_hash so test for 1
|
|
- * instead of 0 to know if this is the last one.
|
|
- */
|
|
- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
|
|
- cancel_delayed_work(&hdev->power_off);
|
|
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
|
|
- }
|
|
-
|
|
if (!mgmt_connected)
|
|
return;
|
|
|
|
@@ -9828,14 +9817,6 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
|
{
|
|
struct mgmt_ev_connect_failed ev;
|
|
|
|
- /* The connection is still in hci_conn_hash so test for 1
|
|
- * instead of 0 to know if this is the last one.
|
|
- */
|
|
- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
|
|
- cancel_delayed_work(&hdev->power_off);
|
|
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
|
|
- }
|
|
-
|
|
bacpy(&ev.addr.bdaddr, bdaddr);
|
|
ev.addr.type = link_to_bdaddr(link_type, addr_type);
|
|
ev.status = mgmt_status(status);
|
|
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
|
|
index 8d6fce9005bdd..4f54c7df3a94f 100644
|
|
--- a/net/bluetooth/rfcomm/core.c
|
|
+++ b/net/bluetooth/rfcomm/core.c
|
|
@@ -1937,7 +1937,7 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
|
|
/* Get data directly from socket receive queue without copying it. */
|
|
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
|
|
skb_orphan(skb);
|
|
- if (!skb_linearize(skb)) {
|
|
+ if (!skb_linearize(skb) && sk->sk_state != BT_CLOSED) {
|
|
s = rfcomm_recv_frame(s, skb);
|
|
if (!s)
|
|
break;
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 60619fe8af5fc..65284eeec7de5 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -2271,7 +2271,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
|
|
rcu_read_lock();
|
|
again:
|
|
list_for_each_entry_rcu(ptype, ptype_list, list) {
|
|
- if (ptype->ignore_outgoing)
|
|
+ if (READ_ONCE(ptype->ignore_outgoing))
|
|
continue;
|
|
|
|
/* Never send packets back to the socket
|
|
@@ -6645,6 +6645,8 @@ static int napi_threaded_poll(void *data)
|
|
void *have;
|
|
|
|
while (!napi_thread_wait(napi)) {
|
|
+ unsigned long last_qs = jiffies;
|
|
+
|
|
for (;;) {
|
|
bool repoll = false;
|
|
|
|
@@ -6659,6 +6661,7 @@ static int napi_threaded_poll(void *data)
|
|
if (!repoll)
|
|
break;
|
|
|
|
+ rcu_softirq_qs_periodic(last_qs);
|
|
cond_resched();
|
|
}
|
|
}
|
|
diff --git a/net/core/scm.c b/net/core/scm.c
|
|
index e762a4b8a1d22..a877c4ef4c256 100644
|
|
--- a/net/core/scm.c
|
|
+++ b/net/core/scm.c
|
|
@@ -105,7 +105,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
|
|
if (fd < 0 || !(file = fget_raw(fd)))
|
|
return -EBADF;
|
|
/* don't allow io_uring files */
|
|
- if (io_uring_get_socket(file)) {
|
|
+ if (io_is_uring_fops(file)) {
|
|
fput(file);
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
|
index d4bd10f8723df..e38a4c7449f62 100644
|
|
--- a/net/core/skbuff.c
|
|
+++ b/net/core/skbuff.c
|
|
@@ -6500,6 +6500,14 @@ static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
|
|
for (i = 0; i < sp->len; i++)
|
|
xfrm_state_hold(sp->xvec[i]);
|
|
}
|
|
+#endif
|
|
+#ifdef CONFIG_MCTP_FLOWS
|
|
+ if (old_active & (1 << SKB_EXT_MCTP)) {
|
|
+ struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
|
|
+
|
|
+ if (flow->key)
|
|
+ refcount_inc(&flow->key->refs);
|
|
+ }
|
|
#endif
|
|
__skb_ext_put(old);
|
|
return new;
|
|
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
|
|
index f7cf74cdd3db1..e6ea6764d10ab 100644
|
|
--- a/net/core/sock_diag.c
|
|
+++ b/net/core/sock_diag.c
|
|
@@ -190,7 +190,7 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
|
|
if (sock_diag_handlers[hndl->family])
|
|
err = -EBUSY;
|
|
else
|
|
- sock_diag_handlers[hndl->family] = hndl;
|
|
+ WRITE_ONCE(sock_diag_handlers[hndl->family], hndl);
|
|
mutex_unlock(&sock_diag_table_mutex);
|
|
|
|
return err;
|
|
@@ -206,7 +206,7 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
|
|
|
|
mutex_lock(&sock_diag_table_mutex);
|
|
BUG_ON(sock_diag_handlers[family] != hnld);
|
|
- sock_diag_handlers[family] = NULL;
|
|
+ WRITE_ONCE(sock_diag_handlers[family], NULL);
|
|
mutex_unlock(&sock_diag_table_mutex);
|
|
}
|
|
EXPORT_SYMBOL_GPL(sock_diag_unregister);
|
|
@@ -224,7 +224,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|
return -EINVAL;
|
|
req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
|
|
|
|
- if (sock_diag_handlers[req->sdiag_family] == NULL)
|
|
+ if (READ_ONCE(sock_diag_handlers[req->sdiag_family]) == NULL)
|
|
sock_load_diag_module(req->sdiag_family, 0);
|
|
|
|
mutex_lock(&sock_diag_table_mutex);
|
|
@@ -283,12 +283,12 @@ static int sock_diag_bind(struct net *net, int group)
|
|
switch (group) {
|
|
case SKNLGRP_INET_TCP_DESTROY:
|
|
case SKNLGRP_INET_UDP_DESTROY:
|
|
- if (!sock_diag_handlers[AF_INET])
|
|
+ if (!READ_ONCE(sock_diag_handlers[AF_INET]))
|
|
sock_load_diag_module(AF_INET, 0);
|
|
break;
|
|
case SKNLGRP_INET6_TCP_DESTROY:
|
|
case SKNLGRP_INET6_UDP_DESTROY:
|
|
- if (!sock_diag_handlers[AF_INET6])
|
|
+ if (!READ_ONCE(sock_diag_handlers[AF_INET6]))
|
|
sock_load_diag_module(AF_INET6, 0);
|
|
break;
|
|
}
|
|
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
|
|
index 0b01998780952..e44a039e36afe 100644
|
|
--- a/net/hsr/hsr_framereg.c
|
|
+++ b/net/hsr/hsr_framereg.c
|
|
@@ -235,6 +235,10 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
|
|
*/
|
|
if (ethhdr->h_proto == htons(ETH_P_PRP) ||
|
|
ethhdr->h_proto == htons(ETH_P_HSR)) {
|
|
+ /* Check if skb contains hsr_ethhdr */
|
|
+ if (skb->mac_len < sizeof(struct hsr_ethhdr))
|
|
+ return NULL;
|
|
+
|
|
/* Use the existing sequence_nr from the tag as starting point
|
|
* for filtering duplicate frames.
|
|
*/
|
|
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
|
|
index b099c31501509..257b50124cee5 100644
|
|
--- a/net/hsr/hsr_main.c
|
|
+++ b/net/hsr/hsr_main.c
|
|
@@ -148,14 +148,21 @@ static struct notifier_block hsr_nb = {
|
|
|
|
static int __init hsr_init(void)
|
|
{
|
|
- int res;
|
|
+ int err;
|
|
|
|
BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
|
|
|
|
- register_netdevice_notifier(&hsr_nb);
|
|
- res = hsr_netlink_init();
|
|
+ err = register_netdevice_notifier(&hsr_nb);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ err = hsr_netlink_init();
|
|
+ if (err) {
|
|
+ unregister_netdevice_notifier(&hsr_nb);
|
|
+ return err;
|
|
+ }
|
|
|
|
- return res;
|
|
+ return 0;
|
|
}
|
|
|
|
static void __exit hsr_exit(void)
|
|
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
|
|
index f7426926a1041..8f690a6e61baa 100644
|
|
--- a/net/ipv4/inet_diag.c
|
|
+++ b/net/ipv4/inet_diag.c
|
|
@@ -57,7 +57,7 @@ static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
|
|
return ERR_PTR(-ENOENT);
|
|
}
|
|
|
|
- if (!inet_diag_table[proto])
|
|
+ if (!READ_ONCE(inet_diag_table[proto]))
|
|
sock_load_diag_module(AF_INET, proto);
|
|
|
|
mutex_lock(&inet_diag_table_mutex);
|
|
@@ -1419,7 +1419,7 @@ int inet_diag_register(const struct inet_diag_handler *h)
|
|
mutex_lock(&inet_diag_table_mutex);
|
|
err = -EEXIST;
|
|
if (!inet_diag_table[type]) {
|
|
- inet_diag_table[type] = h;
|
|
+ WRITE_ONCE(inet_diag_table[type], h);
|
|
err = 0;
|
|
}
|
|
mutex_unlock(&inet_diag_table_mutex);
|
|
@@ -1436,7 +1436,7 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
|
|
return;
|
|
|
|
mutex_lock(&inet_diag_table_mutex);
|
|
- inet_diag_table[type] = NULL;
|
|
+ WRITE_ONCE(inet_diag_table[type], NULL);
|
|
mutex_unlock(&inet_diag_table_mutex);
|
|
}
|
|
EXPORT_SYMBOL_GPL(inet_diag_unregister);
|
|
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
|
|
index 56776e1b1de52..0ad25e6783ac7 100644
|
|
--- a/net/ipv4/inet_hashtables.c
|
|
+++ b/net/ipv4/inet_hashtables.c
|
|
@@ -1117,7 +1117,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
|
sock_prot_inuse_add(net, sk->sk_prot, -1);
|
|
|
|
spin_lock(lock);
|
|
- sk_nulls_del_node_init_rcu(sk);
|
|
+ __sk_nulls_del_node_init_rcu(sk);
|
|
spin_unlock(lock);
|
|
|
|
sk->sk_hash = 0;
|
|
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
|
|
index 1d77d992e6e77..340a8f0c29800 100644
|
|
--- a/net/ipv4/inet_timewait_sock.c
|
|
+++ b/net/ipv4/inet_timewait_sock.c
|
|
@@ -281,12 +281,12 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
|
|
}
|
|
EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
|
|
|
|
+/* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
|
|
void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
|
|
{
|
|
- struct inet_timewait_sock *tw;
|
|
- struct sock *sk;
|
|
struct hlist_nulls_node *node;
|
|
unsigned int slot;
|
|
+ struct sock *sk;
|
|
|
|
for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
|
|
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
|
|
@@ -295,38 +295,35 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
|
|
rcu_read_lock();
|
|
restart:
|
|
sk_nulls_for_each_rcu(sk, node, &head->chain) {
|
|
- if (sk->sk_state != TCP_TIME_WAIT) {
|
|
- /* A kernel listener socket might not hold refcnt for net,
|
|
- * so reqsk_timer_handler() could be fired after net is
|
|
- * freed. Userspace listener and reqsk never exist here.
|
|
- */
|
|
- if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV &&
|
|
- hashinfo->pernet)) {
|
|
- struct request_sock *req = inet_reqsk(sk);
|
|
-
|
|
- inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
|
|
- }
|
|
+ int state = inet_sk_state_load(sk);
|
|
|
|
+ if ((1 << state) & ~(TCPF_TIME_WAIT |
|
|
+ TCPF_NEW_SYN_RECV))
|
|
continue;
|
|
- }
|
|
|
|
- tw = inet_twsk(sk);
|
|
- if ((tw->tw_family != family) ||
|
|
- refcount_read(&twsk_net(tw)->ns.count))
|
|
+ if (sk->sk_family != family ||
|
|
+ refcount_read(&sock_net(sk)->ns.count))
|
|
continue;
|
|
|
|
- if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
|
|
+ if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
|
|
continue;
|
|
|
|
- if (unlikely((tw->tw_family != family) ||
|
|
- refcount_read(&twsk_net(tw)->ns.count))) {
|
|
- inet_twsk_put(tw);
|
|
+ if (unlikely(sk->sk_family != family ||
|
|
+ refcount_read(&sock_net(sk)->ns.count))) {
|
|
+ sock_gen_put(sk);
|
|
goto restart;
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
local_bh_disable();
|
|
- inet_twsk_deschedule_put(tw);
|
|
+ if (state == TCP_TIME_WAIT) {
|
|
+ inet_twsk_deschedule_put(inet_twsk(sk));
|
|
+ } else {
|
|
+ struct request_sock *req = inet_reqsk(sk);
|
|
+
|
|
+ inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
|
|
+ req);
|
|
+ }
|
|
local_bh_enable();
|
|
goto restart_rcu;
|
|
}
|
|
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
|
|
index 328f9068c6a43..3445e576b05bc 100644
|
|
--- a/net/ipv4/ip_tunnel.c
|
|
+++ b/net/ipv4/ip_tunnel.c
|
|
@@ -364,7 +364,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
|
bool log_ecn_error)
|
|
{
|
|
const struct iphdr *iph = ip_hdr(skb);
|
|
- int err;
|
|
+ int nh, err;
|
|
|
|
#ifdef CONFIG_NET_IPGRE_BROADCAST
|
|
if (ipv4_is_multicast(iph->daddr)) {
|
|
@@ -390,8 +390,21 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
|
tunnel->i_seqno = ntohl(tpi->seq) + 1;
|
|
}
|
|
|
|
+ /* Save offset of outer header relative to skb->head,
|
|
+ * because we are going to reset the network header to the inner header
|
|
+ * and might change skb->head.
|
|
+ */
|
|
+ nh = skb_network_header(skb) - skb->head;
|
|
+
|
|
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
|
|
|
|
+ if (!pskb_inet_may_pull(skb)) {
|
|
+ DEV_STATS_INC(tunnel->dev, rx_length_errors);
|
|
+ DEV_STATS_INC(tunnel->dev, rx_errors);
|
|
+ goto drop;
|
|
+ }
|
|
+ iph = (struct iphdr *)(skb->head + nh);
|
|
+
|
|
err = IP_ECN_decapsulate(iph, skb);
|
|
if (unlikely(err)) {
|
|
if (log_ecn_error)
|
|
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
|
|
index d5421c38c2aae..3ed9ed2bffd29 100644
|
|
--- a/net/ipv4/ipmr.c
|
|
+++ b/net/ipv4/ipmr.c
|
|
@@ -1581,9 +1581,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
|
|
|
|
if (copy_from_sockptr(&olr, optlen, sizeof(int)))
|
|
return -EFAULT;
|
|
- olr = min_t(unsigned int, olr, sizeof(int));
|
|
if (olr < 0)
|
|
return -EINVAL;
|
|
+
|
|
+ olr = min_t(unsigned int, olr, sizeof(int));
|
|
+
|
|
if (copy_to_sockptr(optlen, &olr, sizeof(int)))
|
|
return -EFAULT;
|
|
if (copy_to_sockptr(optval, &val, olr))
|
|
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
|
|
index 7c63b91edbf7a..ee0efd0efec40 100644
|
|
--- a/net/ipv4/raw.c
|
|
+++ b/net/ipv4/raw.c
|
|
@@ -348,6 +348,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
|
|
goto error;
|
|
skb_reserve(skb, hlen);
|
|
|
|
+ skb->protocol = htons(ETH_P_IP);
|
|
skb->priority = READ_ONCE(sk->sk_priority);
|
|
skb->mark = sockc->mark;
|
|
skb->tstamp = sockc->transmit_time;
|
|
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
|
|
index 86e7695d91adf..5a165e29f7be4 100644
|
|
--- a/net/ipv4/tcp.c
|
|
+++ b/net/ipv4/tcp.c
|
|
@@ -4102,11 +4102,11 @@ int do_tcp_getsockopt(struct sock *sk, int level,
|
|
if (copy_from_sockptr(&len, optlen, sizeof(int)))
|
|
return -EFAULT;
|
|
|
|
- len = min_t(unsigned int, len, sizeof(int));
|
|
-
|
|
if (len < 0)
|
|
return -EINVAL;
|
|
|
|
+ len = min_t(unsigned int, len, sizeof(int));
|
|
+
|
|
switch (optname) {
|
|
case TCP_MAXSEG:
|
|
val = tp->mss_cache;
|
|
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
|
|
index 42844d20da020..b3bfa1a09df68 100644
|
|
--- a/net/ipv4/tcp_minisocks.c
|
|
+++ b/net/ipv4/tcp_minisocks.c
|
|
@@ -357,10 +357,6 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family)
|
|
/* Even if tw_refcount == 1, we must clean up kernel reqsk */
|
|
inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
|
|
} else if (!purged_once) {
|
|
- /* The last refcount is decremented in tcp_sk_exit_batch() */
|
|
- if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
|
|
- continue;
|
|
-
|
|
inet_twsk_purge(&tcp_hashinfo, family);
|
|
purged_once = true;
|
|
}
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index 87d759bab0012..7856b7a3e0ee9 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -2790,11 +2790,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
|
|
if (get_user(len, optlen))
|
|
return -EFAULT;
|
|
|
|
- len = min_t(unsigned int, len, sizeof(int));
|
|
-
|
|
if (len < 0)
|
|
return -EINVAL;
|
|
|
|
+ len = min_t(unsigned int, len, sizeof(int));
|
|
+
|
|
switch (optname) {
|
|
case UDP_CORK:
|
|
val = udp_test_bit(CORK, sk);
|
|
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
|
|
index 7c20038330104..be52b18e08a6b 100644
|
|
--- a/net/ipv6/fib6_rules.c
|
|
+++ b/net/ipv6/fib6_rules.c
|
|
@@ -449,6 +449,11 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
|
|
+ nla_total_size(16); /* src */
|
|
}
|
|
|
|
+static void fib6_rule_flush_cache(struct fib_rules_ops *ops)
|
|
+{
|
|
+ rt_genid_bump_ipv6(ops->fro_net);
|
|
+}
|
|
+
|
|
static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
|
|
.family = AF_INET6,
|
|
.rule_size = sizeof(struct fib6_rule),
|
|
@@ -461,6 +466,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
|
|
.compare = fib6_rule_compare,
|
|
.fill = fib6_rule_fill,
|
|
.nlmsg_payload = fib6_rule_nlmsg_payload,
|
|
+ .flush_cache = fib6_rule_flush_cache,
|
|
.nlgroup = RTNLGRP_IPV6_RULE,
|
|
.owner = THIS_MODULE,
|
|
.fro_net = &init_net,
|
|
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
|
|
index 566f3b7b957e9..a777695389403 100644
|
|
--- a/net/ipv6/mcast.c
|
|
+++ b/net/ipv6/mcast.c
|
|
@@ -2722,7 +2722,6 @@ void ipv6_mc_down(struct inet6_dev *idev)
|
|
/* Should stop work after group drop. or we will
|
|
* start work again in mld_ifc_event()
|
|
*/
|
|
- synchronize_net();
|
|
mld_query_stop_work(idev);
|
|
mld_report_stop_work(idev);
|
|
|
|
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
|
|
index fc3fddeb6f36d..f66b5f74cd83a 100644
|
|
--- a/net/iucv/iucv.c
|
|
+++ b/net/iucv/iucv.c
|
|
@@ -156,7 +156,7 @@ static char iucv_error_pathid[16] = "INVALID PATHID";
|
|
static LIST_HEAD(iucv_handler_list);
|
|
|
|
/*
|
|
- * iucv_path_table: an array of iucv_path structures.
|
|
+ * iucv_path_table: array of pointers to iucv_path structures.
|
|
*/
|
|
static struct iucv_path **iucv_path_table;
|
|
static unsigned long iucv_max_pathid;
|
|
@@ -544,7 +544,7 @@ static int iucv_enable(void)
|
|
|
|
cpus_read_lock();
|
|
rc = -ENOMEM;
|
|
- alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
|
|
+ alloc_size = iucv_max_pathid * sizeof(*iucv_path_table);
|
|
iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
|
|
if (!iucv_path_table)
|
|
goto out;
|
|
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
|
|
index 65845c59c0655..7d37bf4334d26 100644
|
|
--- a/net/kcm/kcmsock.c
|
|
+++ b/net/kcm/kcmsock.c
|
|
@@ -1274,10 +1274,11 @@ static int kcm_getsockopt(struct socket *sock, int level, int optname,
|
|
if (get_user(len, optlen))
|
|
return -EFAULT;
|
|
|
|
- len = min_t(unsigned int, len, sizeof(int));
|
|
if (len < 0)
|
|
return -EINVAL;
|
|
|
|
+ len = min_t(unsigned int, len, sizeof(int));
|
|
+
|
|
switch (optname) {
|
|
case KCM_RECV_DISABLE:
|
|
val = kcm->rx_disabled;
|
|
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
|
|
index f011af6601c9c..6146e4e67bbb5 100644
|
|
--- a/net/l2tp/l2tp_ppp.c
|
|
+++ b/net/l2tp/l2tp_ppp.c
|
|
@@ -1356,11 +1356,11 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
|
|
if (get_user(len, optlen))
|
|
return -EFAULT;
|
|
|
|
- len = min_t(unsigned int, len, sizeof(int));
|
|
-
|
|
if (len < 0)
|
|
return -EINVAL;
|
|
|
|
+ len = min_t(unsigned int, len, sizeof(int));
|
|
+
|
|
err = -ENOTCONN;
|
|
if (!sk->sk_user_data)
|
|
goto end;
|
|
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
|
|
index d5ea5f5bcf3a0..9d33fd2377c88 100644
|
|
--- a/net/mac80211/rate.c
|
|
+++ b/net/mac80211/rate.c
|
|
@@ -119,7 +119,8 @@ void rate_control_rate_update(struct ieee80211_local *local,
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
- drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
|
|
+ if (sta->uploaded)
|
|
+ drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
|
|
}
|
|
|
|
int ieee80211_rate_control_register(const struct rate_control_ops *ops)
|
|
diff --git a/net/mctp/route.c b/net/mctp/route.c
|
|
index 0144d8ebdaefb..05ab4fddc82e9 100644
|
|
--- a/net/mctp/route.c
|
|
+++ b/net/mctp/route.c
|
|
@@ -843,6 +843,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
|
|
/* copy message payload */
|
|
skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
|
|
|
|
+ /* we need to copy the extensions, for MCTP flow data */
|
|
+ skb_ext_copy(skb2, skb);
|
|
+
|
|
/* do route */
|
|
rc = rt->output(rt, skb2);
|
|
if (rc)
|
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
|
index d3ba947f43761..0a86c019a75de 100644
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -1205,7 +1205,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
|
|
if (flags & ~NFT_TABLE_F_MASK)
|
|
return -EOPNOTSUPP;
|
|
|
|
- if (flags == ctx->table->flags)
|
|
+ if (flags == (ctx->table->flags & NFT_TABLE_F_MASK))
|
|
return 0;
|
|
|
|
if ((nft_table_has_owner(ctx->table) &&
|
|
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
|
|
index e1969209b3abb..58eca26162735 100644
|
|
--- a/net/netfilter/nft_set_pipapo.c
|
|
+++ b/net/netfilter/nft_set_pipapo.c
|
|
@@ -2240,8 +2240,6 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
|
|
if (m) {
|
|
rcu_barrier();
|
|
|
|
- nft_set_pipapo_match_destroy(ctx, set, m);
|
|
-
|
|
for_each_possible_cpu(cpu)
|
|
pipapo_free_scratch(m, cpu);
|
|
free_percpu(m->scratch);
|
|
@@ -2253,8 +2251,7 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
|
|
if (priv->clone) {
|
|
m = priv->clone;
|
|
|
|
- if (priv->dirty)
|
|
- nft_set_pipapo_match_destroy(ctx, set, m);
|
|
+ nft_set_pipapo_match_destroy(ctx, set, m);
|
|
|
|
for_each_possible_cpu(cpu)
|
|
pipapo_free_scratch(priv->clone, cpu);
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index c3117350f5fbb..7188ca8d84693 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -3981,7 +3981,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
|
|
if (val < 0 || val > 1)
|
|
return -EINVAL;
|
|
|
|
- po->prot_hook.ignore_outgoing = !!val;
|
|
+ WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
|
|
return 0;
|
|
}
|
|
case PACKET_TX_HAS_OFF:
|
|
@@ -4110,7 +4110,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
|
|
0);
|
|
break;
|
|
case PACKET_IGNORE_OUTGOING:
|
|
- val = po->prot_hook.ignore_outgoing;
|
|
+ val = READ_ONCE(po->prot_hook.ignore_outgoing);
|
|
break;
|
|
case PACKET_ROLLOVER_STATS:
|
|
if (!po->rollover)
|
|
diff --git a/net/rds/send.c b/net/rds/send.c
|
|
index a4ba45c430d81..0005fb43f2dfa 100644
|
|
--- a/net/rds/send.c
|
|
+++ b/net/rds/send.c
|
|
@@ -103,13 +103,12 @@ EXPORT_SYMBOL_GPL(rds_send_path_reset);
|
|
|
|
static int acquire_in_xmit(struct rds_conn_path *cp)
|
|
{
|
|
- return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
|
|
+ return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0;
|
|
}
|
|
|
|
static void release_in_xmit(struct rds_conn_path *cp)
|
|
{
|
|
- clear_bit(RDS_IN_XMIT, &cp->cp_flags);
|
|
- smp_mb__after_atomic();
|
|
+ clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags);
|
|
/*
|
|
* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
|
|
* hot path and finding waiters is very rare. We don't want to walk
|
|
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
|
|
index 8d5eebb2dd1b1..1d4638aa4254f 100644
|
|
--- a/net/sched/sch_taprio.c
|
|
+++ b/net/sched/sch_taprio.c
|
|
@@ -765,7 +765,8 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
|
|
};
|
|
|
|
static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
|
|
- [TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 },
|
|
+ [TCA_TAPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
|
|
+ TC_QOPT_MAX_QUEUE),
|
|
[TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 },
|
|
};
|
|
|
|
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
|
|
index d435bffc61999..97ff11973c493 100644
|
|
--- a/net/sunrpc/addr.c
|
|
+++ b/net/sunrpc/addr.c
|
|
@@ -284,10 +284,10 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
|
|
}
|
|
|
|
if (snprintf(portbuf, sizeof(portbuf),
|
|
- ".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf))
|
|
+ ".%u.%u", port >> 8, port & 0xff) >= (int)sizeof(portbuf))
|
|
return NULL;
|
|
|
|
- if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
|
|
+ if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) >= sizeof(addrbuf))
|
|
return NULL;
|
|
|
|
return kstrdup(addrbuf, gfp_flags);
|
|
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
|
|
index d79f12c2550ac..cb32ab9a83952 100644
|
|
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
|
|
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
|
|
@@ -250,8 +250,8 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
|
|
|
|
creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
|
|
if (!creds) {
|
|
- kfree(oa->data);
|
|
- return -ENOMEM;
|
|
+ err = -ENOMEM;
|
|
+ goto free_oa;
|
|
}
|
|
|
|
oa->data[0].option.data = CREDS_VALUE;
|
|
@@ -265,29 +265,40 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
|
|
|
|
/* option buffer */
|
|
p = xdr_inline_decode(xdr, 4);
|
|
- if (unlikely(p == NULL))
|
|
- return -ENOSPC;
|
|
+ if (unlikely(p == NULL)) {
|
|
+ err = -ENOSPC;
|
|
+ goto free_creds;
|
|
+ }
|
|
|
|
length = be32_to_cpup(p);
|
|
p = xdr_inline_decode(xdr, length);
|
|
- if (unlikely(p == NULL))
|
|
- return -ENOSPC;
|
|
+ if (unlikely(p == NULL)) {
|
|
+ err = -ENOSPC;
|
|
+ goto free_creds;
|
|
+ }
|
|
|
|
if (length == sizeof(CREDS_VALUE) &&
|
|
memcmp(p, CREDS_VALUE, sizeof(CREDS_VALUE)) == 0) {
|
|
/* We have creds here. parse them */
|
|
err = gssx_dec_linux_creds(xdr, creds);
|
|
if (err)
|
|
- return err;
|
|
+ goto free_creds;
|
|
oa->data[0].value.len = 1; /* presence */
|
|
} else {
|
|
/* consume uninteresting buffer */
|
|
err = gssx_dec_buffer(xdr, &dummy);
|
|
if (err)
|
|
- return err;
|
|
+ goto free_creds;
|
|
}
|
|
}
|
|
return 0;
|
|
+
|
|
+free_creds:
|
|
+ kfree(creds);
|
|
+free_oa:
|
|
+ kfree(oa->data);
|
|
+ oa->data = NULL;
|
|
+ return err;
|
|
}
|
|
|
|
static int gssx_dec_status(struct xdr_stream *xdr,
|
|
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
|
|
index ab2c83d58b62a..9bfffe2a7f020 100644
|
|
--- a/net/unix/garbage.c
|
|
+++ b/net/unix/garbage.c
|
|
@@ -198,7 +198,7 @@ void wait_for_unix_gc(void)
|
|
if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
|
|
!READ_ONCE(gc_in_progress))
|
|
unix_gc();
|
|
- wait_event(unix_gc_wait, gc_in_progress == false);
|
|
+ wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress));
|
|
}
|
|
|
|
/* The external entry point: unix_gc() */
|
|
diff --git a/net/unix/scm.c b/net/unix/scm.c
|
|
index e8e2a00bb0f58..d1048b4c2baaf 100644
|
|
--- a/net/unix/scm.c
|
|
+++ b/net/unix/scm.c
|
|
@@ -34,10 +34,8 @@ struct sock *unix_get_socket(struct file *filp)
|
|
/* PF_UNIX ? */
|
|
if (s && sock->ops && sock->ops->family == PF_UNIX)
|
|
u_sock = s;
|
|
- } else {
|
|
- /* Could be an io_uring instance */
|
|
- u_sock = io_uring_get_socket(filp);
|
|
}
|
|
+
|
|
return u_sock;
|
|
}
|
|
EXPORT_SYMBOL(unix_get_socket);
|
|
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
|
|
index 5c7ad301d742e..5a8b2ea56564e 100644
|
|
--- a/net/x25/af_x25.c
|
|
+++ b/net/x25/af_x25.c
|
|
@@ -460,12 +460,12 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
|
|
if (get_user(len, optlen))
|
|
goto out;
|
|
|
|
- len = min_t(unsigned int, len, sizeof(int));
|
|
-
|
|
rc = -EINVAL;
|
|
if (len < 0)
|
|
goto out;
|
|
|
|
+ len = min_t(unsigned int, len, sizeof(int));
|
|
+
|
|
rc = -EFAULT;
|
|
if (put_user(len, optlen))
|
|
goto out;
|
|
diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
|
|
index d800b2c0af977..4f414ab706bd8 100755
|
|
--- a/scripts/clang-tools/gen_compile_commands.py
|
|
+++ b/scripts/clang-tools/gen_compile_commands.py
|
|
@@ -170,7 +170,7 @@ def process_line(root_directory, command_prefix, file_path):
|
|
# escape the pound sign '#', either as '\#' or '$(pound)' (depending on the
|
|
# kernel version). The compile_commands.json file is not interepreted
|
|
# by Make, so this code replaces the escaped version with '#'.
|
|
- prefix = command_prefix.replace('\#', '#').replace('$(pound)', '#')
|
|
+ prefix = command_prefix.replace(r'\#', '#').replace('$(pound)', '#')
|
|
|
|
# Use os.path.abspath() to normalize the path resolving '.' and '..' .
|
|
abs_path = os.path.abspath(os.path.join(root_directory, file_path))
|
|
diff --git a/scripts/kconfig/lexer.l b/scripts/kconfig/lexer.l
|
|
index cc386e4436834..2c2b3e6f248ca 100644
|
|
--- a/scripts/kconfig/lexer.l
|
|
+++ b/scripts/kconfig/lexer.l
|
|
@@ -302,8 +302,11 @@ static char *expand_token(const char *in, size_t n)
|
|
new_string();
|
|
append_string(in, n);
|
|
|
|
- /* get the whole line because we do not know the end of token. */
|
|
- while ((c = input()) != EOF) {
|
|
+ /*
|
|
+ * get the whole line because we do not know the end of token.
|
|
+ * input() returns 0 (not EOF!) when it reachs the end of file.
|
|
+ */
|
|
+ while ((c = input()) != 0) {
|
|
if (c == '\n') {
|
|
unput(c);
|
|
break;
|
|
diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
|
|
index 4589aac091542..b00bbf18a6f5d 100644
|
|
--- a/sound/core/seq/seq_midi.c
|
|
+++ b/sound/core/seq/seq_midi.c
|
|
@@ -112,6 +112,12 @@ static int dump_midi(struct snd_rawmidi_substream *substream, const char *buf, i
|
|
return 0;
|
|
}
|
|
|
|
+/* callback for snd_seq_dump_var_event(), bridging to dump_midi() */
|
|
+static int __dump_midi(void *ptr, void *buf, int count)
|
|
+{
|
|
+ return dump_midi(ptr, buf, count);
|
|
+}
|
|
+
|
|
static int event_process_midi(struct snd_seq_event *ev, int direct,
|
|
void *private_data, int atomic, int hop)
|
|
{
|
|
@@ -131,7 +137,7 @@ static int event_process_midi(struct snd_seq_event *ev, int direct,
|
|
pr_debug("ALSA: seq_midi: invalid sysex event flags = 0x%x\n", ev->flags);
|
|
return 0;
|
|
}
|
|
- snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)dump_midi, substream);
|
|
+ snd_seq_dump_var_event(ev, __dump_midi, substream);
|
|
snd_midi_event_reset_decode(msynth->parser);
|
|
} else {
|
|
if (msynth->parser == NULL)
|
|
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
|
|
index f5cae49500c81..ffd8e7202c334 100644
|
|
--- a/sound/core/seq/seq_virmidi.c
|
|
+++ b/sound/core/seq/seq_virmidi.c
|
|
@@ -62,6 +62,13 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
|
|
/*
|
|
* decode input event and put to read buffer of each opened file
|
|
*/
|
|
+
|
|
+/* callback for snd_seq_dump_var_event(), bridging to snd_rawmidi_receive() */
|
|
+static int dump_to_rawmidi(void *ptr, void *buf, int count)
|
|
+{
|
|
+ return snd_rawmidi_receive(ptr, buf, count);
|
|
+}
|
|
+
|
|
static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
|
|
struct snd_seq_event *ev,
|
|
bool atomic)
|
|
@@ -80,7 +87,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
|
|
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
|
|
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
|
|
continue;
|
|
- snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
|
|
+ snd_seq_dump_var_event(ev, dump_to_rawmidi, vmidi->substream);
|
|
snd_midi_event_reset_decode(vmidi->parser);
|
|
} else {
|
|
len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index 75bd7b2fa4ee6..6e759032eba2e 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -3681,6 +3681,7 @@ static void alc285_hp_init(struct hda_codec *codec)
|
|
int i, val;
|
|
int coef38, coef0d, coef36;
|
|
|
|
+ alc_write_coefex_idx(codec, 0x58, 0x00, 0x1888); /* write default value */
|
|
alc_update_coef_idx(codec, 0x4a, 1<<15, 1<<15); /* Reset HP JD */
|
|
coef38 = alc_read_coef_idx(codec, 0x38); /* Amp control */
|
|
coef0d = alc_read_coef_idx(codec, 0x0d); /* Digital Misc control */
|
|
@@ -6692,6 +6693,60 @@ static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
|
|
}
|
|
}
|
|
|
|
+static void alc285_fixup_hp_envy_x360(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix,
|
|
+ int action)
|
|
+{
|
|
+ static const struct coef_fw coefs[] = {
|
|
+ WRITE_COEF(0x08, 0x6a0c), WRITE_COEF(0x0d, 0xa023),
|
|
+ WRITE_COEF(0x10, 0x0320), WRITE_COEF(0x1a, 0x8c03),
|
|
+ WRITE_COEF(0x25, 0x1800), WRITE_COEF(0x26, 0x003a),
|
|
+ WRITE_COEF(0x28, 0x1dfe), WRITE_COEF(0x29, 0xb014),
|
|
+ WRITE_COEF(0x2b, 0x1dfe), WRITE_COEF(0x37, 0xfe15),
|
|
+ WRITE_COEF(0x38, 0x7909), WRITE_COEF(0x45, 0xd489),
|
|
+ WRITE_COEF(0x46, 0x00f4), WRITE_COEF(0x4a, 0x21e0),
|
|
+ WRITE_COEF(0x66, 0x03f0), WRITE_COEF(0x67, 0x1000),
|
|
+ WRITE_COEF(0x6e, 0x1005), { }
|
|
+ };
|
|
+
|
|
+ static const struct hda_pintbl pincfgs[] = {
|
|
+ { 0x12, 0xb7a60130 }, /* Internal microphone*/
|
|
+ { 0x14, 0x90170150 }, /* B&O soundbar speakers */
|
|
+ { 0x17, 0x90170153 }, /* Side speakers */
|
|
+ { 0x19, 0x03a11040 }, /* Headset microphone */
|
|
+ { }
|
|
+ };
|
|
+
|
|
+ switch (action) {
|
|
+ case HDA_FIXUP_ACT_PRE_PROBE:
|
|
+ snd_hda_apply_pincfgs(codec, pincfgs);
|
|
+
|
|
+ /* Fixes volume control problem for side speakers */
|
|
+ alc295_fixup_disable_dac3(codec, fix, action);
|
|
+
|
|
+ /* Fixes no sound from headset speaker */
|
|
+ snd_hda_codec_amp_stereo(codec, 0x21, HDA_OUTPUT, 0, -1, 0);
|
|
+
|
|
+ /* Auto-enable headset mic when plugged */
|
|
+ snd_hda_jack_set_gating_jack(codec, 0x19, 0x21);
|
|
+
|
|
+ /* Headset mic volume enhancement */
|
|
+ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREF50);
|
|
+ break;
|
|
+ case HDA_FIXUP_ACT_INIT:
|
|
+ alc_process_coef_fw(codec, coefs);
|
|
+ break;
|
|
+ case HDA_FIXUP_ACT_BUILD:
|
|
+ rename_ctl(codec, "Bass Speaker Playback Volume",
|
|
+ "B&O-Tuned Playback Volume");
|
|
+ rename_ctl(codec, "Front Playback Switch",
|
|
+ "B&O Soundbar Playback Switch");
|
|
+ rename_ctl(codec, "Bass Speaker Playback Switch",
|
|
+ "Side Speaker Playback Switch");
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
/* for hda_fixup_thinkpad_acpi() */
|
|
#include "thinkpad_helper.c"
|
|
|
|
@@ -7130,6 +7185,7 @@ enum {
|
|
ALC280_FIXUP_HP_9480M,
|
|
ALC245_FIXUP_HP_X360_AMP,
|
|
ALC285_FIXUP_HP_SPECTRE_X360_EB1,
|
|
+ ALC285_FIXUP_HP_ENVY_X360,
|
|
ALC288_FIXUP_DELL_HEADSET_MODE,
|
|
ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
|
|
ALC288_FIXUP_DELL_XPS_13,
|
|
@@ -9053,6 +9109,12 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc285_fixup_hp_spectre_x360_eb1
|
|
},
|
|
+ [ALC285_FIXUP_HP_ENVY_X360] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc285_fixup_hp_envy_x360,
|
|
+ .chained = true,
|
|
+ .chain_id = ALC285_FIXUP_HP_GPIO_AMP_INIT,
|
|
+ },
|
|
[ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc285_fixup_ideapad_s740_coef,
|
|
@@ -9594,6 +9656,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
|
SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
|
|
SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
|
|
+ SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360),
|
|
SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
|
|
SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
|
|
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
|
|
@@ -10248,6 +10311,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
|
|
{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
|
|
{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
|
|
{.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
|
|
+ {.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"},
|
|
{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
|
|
{.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"},
|
|
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
|
|
diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c
|
|
index f19f064a75272..972600d271586 100644
|
|
--- a/sound/soc/amd/acp/acp-sof-mach.c
|
|
+++ b/sound/soc/amd/acp/acp-sof-mach.c
|
|
@@ -114,16 +114,14 @@ static int acp_sof_probe(struct platform_device *pdev)
|
|
card->num_controls = ARRAY_SIZE(acp_controls);
|
|
card->drvdata = (struct acp_card_drvdata *)pdev->id_entry->driver_data;
|
|
|
|
- acp_sofdsp_dai_links_create(card);
|
|
+ ret = acp_sofdsp_dai_links_create(card);
|
|
+ if (ret)
|
|
+ return dev_err_probe(&pdev->dev, ret, "Failed to create DAI links\n");
|
|
|
|
ret = devm_snd_soc_register_card(&pdev->dev, card);
|
|
- if (ret) {
|
|
- dev_err(&pdev->dev,
|
|
- "devm_snd_soc_register_card(%s) failed: %d\n",
|
|
- card->name, ret);
|
|
- return ret;
|
|
- }
|
|
-
|
|
+ if (ret)
|
|
+ return dev_err_probe(&pdev->dev, ret,
|
|
+ "Failed to register card(%s)\n", card->name);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
|
|
index 28da4e1858d7e..e0f406b6646ba 100644
|
|
--- a/sound/soc/amd/yc/acp6x-mach.c
|
|
+++ b/sound/soc/amd/yc/acp6x-mach.c
|
|
@@ -199,6 +199,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "21HY"),
|
|
}
|
|
},
|
|
+ {
|
|
+ .driver_data = &acp6x_card,
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J2"),
|
|
+ }
|
|
+ },
|
|
+ {
|
|
+ .driver_data = &acp6x_card,
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J0"),
|
|
+ }
|
|
+ },
|
|
{
|
|
.driver_data = &acp6x_card,
|
|
.matches = {
|
|
@@ -227,6 +241,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "82QF"),
|
|
}
|
|
},
|
|
+ {
|
|
+ .driver_data = &acp6x_card,
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "82UU"),
|
|
+ }
|
|
+ },
|
|
{
|
|
.driver_data = &acp6x_card,
|
|
.matches = {
|
|
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
|
|
index 844d14d4c9a51..aac9140749968 100644
|
|
--- a/sound/soc/codecs/rt5645.c
|
|
+++ b/sound/soc/codecs/rt5645.c
|
|
@@ -3802,6 +3802,16 @@ static const struct dmi_system_id dmi_platform_data[] = {
|
|
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
|
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
|
|
DMI_EXACT_MATCH(DMI_BOARD_VERSION, "Default string"),
|
|
+ /*
|
|
+ * Above strings are too generic, LattePanda BIOS versions for
|
|
+ * all 4 hw revisions are:
|
|
+ * DF-BI-7-S70CR100-*
|
|
+ * DF-BI-7-S70CR110-*
|
|
+ * DF-BI-7-S70CR200-*
|
|
+ * LP-BS-7-S70CR700-*
|
|
+ * Do a partial match for S70CR to avoid false positive matches.
|
|
+ */
|
|
+ DMI_MATCH(DMI_BIOS_VERSION, "S70CR"),
|
|
},
|
|
.driver_data = (void *)&lattepanda_board_platform_data,
|
|
},
|
|
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
|
|
index b901e4c65e8a5..d215e58c4a7b3 100644
|
|
--- a/sound/soc/codecs/wm8962.c
|
|
+++ b/sound/soc/codecs/wm8962.c
|
|
@@ -2229,6 +2229,9 @@ SND_SOC_DAPM_PGA_E("HPOUT", SND_SOC_NOPM, 0, 0, NULL, 0, hp_event,
|
|
|
|
SND_SOC_DAPM_OUTPUT("HPOUTL"),
|
|
SND_SOC_DAPM_OUTPUT("HPOUTR"),
|
|
+
|
|
+SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0),
|
|
+SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
|
|
};
|
|
|
|
static const struct snd_soc_dapm_widget wm8962_dapm_spk_mono_widgets[] = {
|
|
@@ -2236,7 +2239,6 @@ SND_SOC_DAPM_MIXER("Speaker Mixer", WM8962_MIXER_ENABLES, 1, 0,
|
|
spkmixl, ARRAY_SIZE(spkmixl)),
|
|
SND_SOC_DAPM_MUX_E("Speaker PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux,
|
|
out_pga_event, SND_SOC_DAPM_POST_PMU),
|
|
-SND_SOC_DAPM_PGA("Speaker Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
|
|
SND_SOC_DAPM_OUTPUT("SPKOUT"),
|
|
};
|
|
|
|
@@ -2251,9 +2253,6 @@ SND_SOC_DAPM_MUX_E("SPKOUTL PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux,
|
|
SND_SOC_DAPM_MUX_E("SPKOUTR PGA", WM8962_PWR_MGMT_2, 3, 0, &spkoutr_mux,
|
|
out_pga_event, SND_SOC_DAPM_POST_PMU),
|
|
|
|
-SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
|
|
-SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0),
|
|
-
|
|
SND_SOC_DAPM_OUTPUT("SPKOUTL"),
|
|
SND_SOC_DAPM_OUTPUT("SPKOUTR"),
|
|
};
|
|
@@ -2366,12 +2365,18 @@ static const struct snd_soc_dapm_route wm8962_spk_mono_intercon[] = {
|
|
{ "Speaker PGA", "Mixer", "Speaker Mixer" },
|
|
{ "Speaker PGA", "DAC", "DACL" },
|
|
|
|
- { "Speaker Output", NULL, "Speaker PGA" },
|
|
- { "Speaker Output", NULL, "SYSCLK" },
|
|
- { "Speaker Output", NULL, "TOCLK" },
|
|
- { "Speaker Output", NULL, "TEMP_SPK" },
|
|
+ { "SPKOUTL Output", NULL, "Speaker PGA" },
|
|
+ { "SPKOUTL Output", NULL, "SYSCLK" },
|
|
+ { "SPKOUTL Output", NULL, "TOCLK" },
|
|
+ { "SPKOUTL Output", NULL, "TEMP_SPK" },
|
|
|
|
- { "SPKOUT", NULL, "Speaker Output" },
|
|
+ { "SPKOUTR Output", NULL, "Speaker PGA" },
|
|
+ { "SPKOUTR Output", NULL, "SYSCLK" },
|
|
+ { "SPKOUTR Output", NULL, "TOCLK" },
|
|
+ { "SPKOUTR Output", NULL, "TEMP_SPK" },
|
|
+
|
|
+ { "SPKOUT", NULL, "SPKOUTL Output" },
|
|
+ { "SPKOUT", NULL, "SPKOUTR Output" },
|
|
};
|
|
|
|
static const struct snd_soc_dapm_route wm8962_spk_stereo_intercon[] = {
|
|
@@ -2914,8 +2919,12 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
|
|
switch (fll_id) {
|
|
case WM8962_FLL_MCLK:
|
|
case WM8962_FLL_BCLK:
|
|
+ fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT;
|
|
+ break;
|
|
case WM8962_FLL_OSC:
|
|
fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT;
|
|
+ snd_soc_component_update_bits(component, WM8962_PLL2,
|
|
+ WM8962_OSC_ENA, WM8962_OSC_ENA);
|
|
break;
|
|
case WM8962_FLL_INT:
|
|
snd_soc_component_update_bits(component, WM8962_FLL_CONTROL_1,
|
|
@@ -2924,7 +2933,7 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
|
|
WM8962_FLL_FRC_NCO, WM8962_FLL_FRC_NCO);
|
|
break;
|
|
default:
|
|
- dev_err(component->dev, "Unknown FLL source %d\n", ret);
|
|
+ dev_err(component->dev, "Unknown FLL source %d\n", source);
|
|
return -EINVAL;
|
|
}
|
|
|
|
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
|
|
index 797d0a48d6066..094445036c20f 100644
|
|
--- a/sound/soc/intel/boards/bytcr_rt5640.c
|
|
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
|
|
@@ -685,6 +685,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
|
|
BYT_RT5640_SSP0_AIF1 |
|
|
BYT_RT5640_MCLK_EN),
|
|
},
|
|
+ { /* Chuwi Vi8 dual-boot (CWI506) */
|
|
+ .matches = {
|
|
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
|
|
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "i86"),
|
|
+ /* The above are too generic, also match BIOS info */
|
|
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
|
|
+ },
|
|
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
|
|
+ BYT_RT5640_MONO_SPEAKER |
|
|
+ BYT_RT5640_SSP0_AIF1 |
|
|
+ BYT_RT5640_MCLK_EN),
|
|
+ },
|
|
{
|
|
/* Chuwi Vi10 (CWI505) */
|
|
.matches = {
|
|
diff --git a/sound/soc/meson/aiu.c b/sound/soc/meson/aiu.c
|
|
index 88e611e64d14f..077b9c0b6c4ca 100644
|
|
--- a/sound/soc/meson/aiu.c
|
|
+++ b/sound/soc/meson/aiu.c
|
|
@@ -218,11 +218,12 @@ static const char * const aiu_spdif_ids[] = {
|
|
static int aiu_clk_get(struct device *dev)
|
|
{
|
|
struct aiu *aiu = dev_get_drvdata(dev);
|
|
+ struct clk *pclk;
|
|
int ret;
|
|
|
|
- aiu->pclk = devm_clk_get(dev, "pclk");
|
|
- if (IS_ERR(aiu->pclk))
|
|
- return dev_err_probe(dev, PTR_ERR(aiu->pclk), "Can't get the aiu pclk\n");
|
|
+ pclk = devm_clk_get_enabled(dev, "pclk");
|
|
+ if (IS_ERR(pclk))
|
|
+ return dev_err_probe(dev, PTR_ERR(pclk), "Can't get the aiu pclk\n");
|
|
|
|
aiu->spdif_mclk = devm_clk_get(dev, "spdif_mclk");
|
|
if (IS_ERR(aiu->spdif_mclk))
|
|
@@ -239,18 +240,6 @@ static int aiu_clk_get(struct device *dev)
|
|
if (ret)
|
|
return dev_err_probe(dev, ret, "Can't get the spdif clocks\n");
|
|
|
|
- ret = clk_prepare_enable(aiu->pclk);
|
|
- if (ret) {
|
|
- dev_err(dev, "peripheral clock enable failed\n");
|
|
- return ret;
|
|
- }
|
|
-
|
|
- ret = devm_add_action_or_reset(dev,
|
|
- (void(*)(void *))clk_disable_unprepare,
|
|
- aiu->pclk);
|
|
- if (ret)
|
|
- dev_err(dev, "failed to add reset action on pclk");
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/sound/soc/meson/aiu.h b/sound/soc/meson/aiu.h
|
|
index 393b6c2307e49..0f94c8bf60818 100644
|
|
--- a/sound/soc/meson/aiu.h
|
|
+++ b/sound/soc/meson/aiu.h
|
|
@@ -33,7 +33,6 @@ struct aiu_platform_data {
|
|
};
|
|
|
|
struct aiu {
|
|
- struct clk *pclk;
|
|
struct clk *spdif_mclk;
|
|
struct aiu_interface i2s;
|
|
struct aiu_interface spdif;
|
|
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
|
|
index c040c83637e02..028383f949efd 100644
|
|
--- a/sound/soc/meson/axg-tdm-interface.c
|
|
+++ b/sound/soc/meson/axg-tdm-interface.c
|
|
@@ -12,6 +12,9 @@
|
|
|
|
#include "axg-tdm.h"
|
|
|
|
+/* Maximum bit clock frequency according the datasheets */
|
|
+#define MAX_SCLK 100000000 /* Hz */
|
|
+
|
|
enum {
|
|
TDM_IFACE_PAD,
|
|
TDM_IFACE_LOOPBACK,
|
|
@@ -155,19 +158,27 @@ static int axg_tdm_iface_startup(struct snd_pcm_substream *substream,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- /* Apply component wide rate symmetry */
|
|
if (snd_soc_component_active(dai->component)) {
|
|
+ /* Apply component wide rate symmetry */
|
|
ret = snd_pcm_hw_constraint_single(substream->runtime,
|
|
SNDRV_PCM_HW_PARAM_RATE,
|
|
iface->rate);
|
|
- if (ret < 0) {
|
|
- dev_err(dai->dev,
|
|
- "can't set iface rate constraint\n");
|
|
- return ret;
|
|
- }
|
|
+
|
|
+ } else {
|
|
+ /* Limit rate according to the slot number and width */
|
|
+ unsigned int max_rate =
|
|
+ MAX_SCLK / (iface->slots * iface->slot_width);
|
|
+ ret = snd_pcm_hw_constraint_minmax(substream->runtime,
|
|
+ SNDRV_PCM_HW_PARAM_RATE,
|
|
+ 0, max_rate);
|
|
}
|
|
|
|
- return 0;
|
|
+ if (ret < 0)
|
|
+ dev_err(dai->dev, "can't set iface rate constraint\n");
|
|
+ else
|
|
+ ret = 0;
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static int axg_tdm_iface_set_stream(struct snd_pcm_substream *substream,
|
|
@@ -266,8 +277,8 @@ static int axg_tdm_iface_set_sclk(struct snd_soc_dai *dai,
|
|
srate = iface->slots * iface->slot_width * params_rate(params);
|
|
|
|
if (!iface->mclk_rate) {
|
|
- /* If no specific mclk is requested, default to bit clock * 4 */
|
|
- clk_set_rate(iface->mclk, 4 * srate);
|
|
+ /* If no specific mclk is requested, default to bit clock * 2 */
|
|
+ clk_set_rate(iface->mclk, 2 * srate);
|
|
} else {
|
|
/* Check if we can actually get the bit clock from mclk */
|
|
if (iface->mclk_rate % srate) {
|
|
diff --git a/sound/soc/meson/t9015.c b/sound/soc/meson/t9015.c
|
|
index 9c6b4dac68932..571f65788c592 100644
|
|
--- a/sound/soc/meson/t9015.c
|
|
+++ b/sound/soc/meson/t9015.c
|
|
@@ -48,7 +48,6 @@
|
|
#define POWER_CFG 0x10
|
|
|
|
struct t9015 {
|
|
- struct clk *pclk;
|
|
struct regulator *avdd;
|
|
};
|
|
|
|
@@ -249,6 +248,7 @@ static int t9015_probe(struct platform_device *pdev)
|
|
struct t9015 *priv;
|
|
void __iomem *regs;
|
|
struct regmap *regmap;
|
|
+ struct clk *pclk;
|
|
int ret;
|
|
|
|
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
|
@@ -256,26 +256,14 @@ static int t9015_probe(struct platform_device *pdev)
|
|
return -ENOMEM;
|
|
platform_set_drvdata(pdev, priv);
|
|
|
|
- priv->pclk = devm_clk_get(dev, "pclk");
|
|
- if (IS_ERR(priv->pclk))
|
|
- return dev_err_probe(dev, PTR_ERR(priv->pclk), "failed to get core clock\n");
|
|
+ pclk = devm_clk_get_enabled(dev, "pclk");
|
|
+ if (IS_ERR(pclk))
|
|
+ return dev_err_probe(dev, PTR_ERR(pclk), "failed to get core clock\n");
|
|
|
|
priv->avdd = devm_regulator_get(dev, "AVDD");
|
|
if (IS_ERR(priv->avdd))
|
|
return dev_err_probe(dev, PTR_ERR(priv->avdd), "failed to AVDD\n");
|
|
|
|
- ret = clk_prepare_enable(priv->pclk);
|
|
- if (ret) {
|
|
- dev_err(dev, "core clock enable failed\n");
|
|
- return ret;
|
|
- }
|
|
-
|
|
- ret = devm_add_action_or_reset(dev,
|
|
- (void(*)(void *))clk_disable_unprepare,
|
|
- priv->pclk);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
ret = device_reset(dev);
|
|
if (ret) {
|
|
dev_err(dev, "reset failed\n");
|
|
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
|
|
index 2550bd2a5e78c..2e36a97077b99 100644
|
|
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
|
|
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
|
|
@@ -27,8 +27,6 @@
|
|
#define DEFAULT_MCLK_FS 256
|
|
#define CH_GRP_MAX 4 /* The max channel 8 / 2 */
|
|
#define MULTIPLEX_CH_MAX 10
|
|
-#define CLK_PPM_MIN -1000
|
|
-#define CLK_PPM_MAX 1000
|
|
|
|
#define TRCM_TXRX 0
|
|
#define TRCM_TX 1
|
|
@@ -55,20 +53,6 @@ struct rk_i2s_tdm_dev {
|
|
struct clk *hclk;
|
|
struct clk *mclk_tx;
|
|
struct clk *mclk_rx;
|
|
- /* The mclk_tx_src is parent of mclk_tx */
|
|
- struct clk *mclk_tx_src;
|
|
- /* The mclk_rx_src is parent of mclk_rx */
|
|
- struct clk *mclk_rx_src;
|
|
- /*
|
|
- * The mclk_root0 and mclk_root1 are root parent and supplies for
|
|
- * the different FS.
|
|
- *
|
|
- * e.g:
|
|
- * mclk_root0 is VPLL0, used for FS=48000Hz
|
|
- * mclk_root1 is VPLL1, used for FS=44100Hz
|
|
- */
|
|
- struct clk *mclk_root0;
|
|
- struct clk *mclk_root1;
|
|
struct regmap *regmap;
|
|
struct regmap *grf;
|
|
struct snd_dmaengine_dai_dma_data capture_dma_data;
|
|
@@ -78,19 +62,11 @@ struct rk_i2s_tdm_dev {
|
|
struct rk_i2s_soc_data *soc_data;
|
|
bool is_master_mode;
|
|
bool io_multiplex;
|
|
- bool mclk_calibrate;
|
|
bool tdm_mode;
|
|
- unsigned int mclk_rx_freq;
|
|
- unsigned int mclk_tx_freq;
|
|
- unsigned int mclk_root0_freq;
|
|
- unsigned int mclk_root1_freq;
|
|
- unsigned int mclk_root0_initial_freq;
|
|
- unsigned int mclk_root1_initial_freq;
|
|
unsigned int frame_width;
|
|
unsigned int clk_trcm;
|
|
unsigned int i2s_sdis[CH_GRP_MAX];
|
|
unsigned int i2s_sdos[CH_GRP_MAX];
|
|
- int clk_ppm;
|
|
int refcount;
|
|
spinlock_t lock; /* xfer lock */
|
|
bool has_playback;
|
|
@@ -116,12 +92,6 @@ static void i2s_tdm_disable_unprepare_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
|
|
{
|
|
clk_disable_unprepare(i2s_tdm->mclk_tx);
|
|
clk_disable_unprepare(i2s_tdm->mclk_rx);
|
|
- if (i2s_tdm->mclk_calibrate) {
|
|
- clk_disable_unprepare(i2s_tdm->mclk_tx_src);
|
|
- clk_disable_unprepare(i2s_tdm->mclk_rx_src);
|
|
- clk_disable_unprepare(i2s_tdm->mclk_root0);
|
|
- clk_disable_unprepare(i2s_tdm->mclk_root1);
|
|
- }
|
|
}
|
|
|
|
/**
|
|
@@ -144,29 +114,9 @@ static int i2s_tdm_prepare_enable_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
|
|
ret = clk_prepare_enable(i2s_tdm->mclk_rx);
|
|
if (ret)
|
|
goto err_mclk_rx;
|
|
- if (i2s_tdm->mclk_calibrate) {
|
|
- ret = clk_prepare_enable(i2s_tdm->mclk_tx_src);
|
|
- if (ret)
|
|
- goto err_mclk_rx;
|
|
- ret = clk_prepare_enable(i2s_tdm->mclk_rx_src);
|
|
- if (ret)
|
|
- goto err_mclk_rx_src;
|
|
- ret = clk_prepare_enable(i2s_tdm->mclk_root0);
|
|
- if (ret)
|
|
- goto err_mclk_root0;
|
|
- ret = clk_prepare_enable(i2s_tdm->mclk_root1);
|
|
- if (ret)
|
|
- goto err_mclk_root1;
|
|
- }
|
|
|
|
return 0;
|
|
|
|
-err_mclk_root1:
|
|
- clk_disable_unprepare(i2s_tdm->mclk_root0);
|
|
-err_mclk_root0:
|
|
- clk_disable_unprepare(i2s_tdm->mclk_rx_src);
|
|
-err_mclk_rx_src:
|
|
- clk_disable_unprepare(i2s_tdm->mclk_tx_src);
|
|
err_mclk_rx:
|
|
clk_disable_unprepare(i2s_tdm->mclk_tx);
|
|
err_mclk_tx:
|
|
@@ -566,159 +516,6 @@ static void rockchip_i2s_tdm_xfer_resume(struct snd_pcm_substream *substream,
|
|
I2S_XFER_RXS_START);
|
|
}
|
|
|
|
-static int rockchip_i2s_tdm_clk_set_rate(struct rk_i2s_tdm_dev *i2s_tdm,
|
|
- struct clk *clk, unsigned long rate,
|
|
- int ppm)
|
|
-{
|
|
- unsigned long rate_target;
|
|
- int delta, ret;
|
|
-
|
|
- if (ppm == i2s_tdm->clk_ppm)
|
|
- return 0;
|
|
-
|
|
- if (ppm < 0)
|
|
- delta = -1;
|
|
- else
|
|
- delta = 1;
|
|
-
|
|
- delta *= (int)div64_u64((u64)rate * (u64)abs(ppm) + 500000,
|
|
- 1000000);
|
|
-
|
|
- rate_target = rate + delta;
|
|
-
|
|
- if (!rate_target)
|
|
- return -EINVAL;
|
|
-
|
|
- ret = clk_set_rate(clk, rate_target);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- i2s_tdm->clk_ppm = ppm;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int rockchip_i2s_tdm_calibrate_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
|
|
- struct snd_pcm_substream *substream,
|
|
- unsigned int lrck_freq)
|
|
-{
|
|
- struct clk *mclk_root;
|
|
- struct clk *mclk_parent;
|
|
- unsigned int mclk_root_freq;
|
|
- unsigned int mclk_root_initial_freq;
|
|
- unsigned int mclk_parent_freq;
|
|
- unsigned int div, delta;
|
|
- u64 ppm;
|
|
- int ret;
|
|
-
|
|
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
|
|
- mclk_parent = i2s_tdm->mclk_tx_src;
|
|
- else
|
|
- mclk_parent = i2s_tdm->mclk_rx_src;
|
|
-
|
|
- switch (lrck_freq) {
|
|
- case 8000:
|
|
- case 16000:
|
|
- case 24000:
|
|
- case 32000:
|
|
- case 48000:
|
|
- case 64000:
|
|
- case 96000:
|
|
- case 192000:
|
|
- mclk_root = i2s_tdm->mclk_root0;
|
|
- mclk_root_freq = i2s_tdm->mclk_root0_freq;
|
|
- mclk_root_initial_freq = i2s_tdm->mclk_root0_initial_freq;
|
|
- mclk_parent_freq = DEFAULT_MCLK_FS * 192000;
|
|
- break;
|
|
- case 11025:
|
|
- case 22050:
|
|
- case 44100:
|
|
- case 88200:
|
|
- case 176400:
|
|
- mclk_root = i2s_tdm->mclk_root1;
|
|
- mclk_root_freq = i2s_tdm->mclk_root1_freq;
|
|
- mclk_root_initial_freq = i2s_tdm->mclk_root1_initial_freq;
|
|
- mclk_parent_freq = DEFAULT_MCLK_FS * 176400;
|
|
- break;
|
|
- default:
|
|
- dev_err(i2s_tdm->dev, "Invalid LRCK frequency: %u Hz\n",
|
|
- lrck_freq);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- ret = clk_set_parent(mclk_parent, mclk_root);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, mclk_root,
|
|
- mclk_root_freq, 0);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- delta = abs(mclk_root_freq % mclk_parent_freq - mclk_parent_freq);
|
|
- ppm = div64_u64((uint64_t)delta * 1000000, (uint64_t)mclk_root_freq);
|
|
-
|
|
- if (ppm) {
|
|
- div = DIV_ROUND_CLOSEST(mclk_root_initial_freq, mclk_parent_freq);
|
|
- if (!div)
|
|
- return -EINVAL;
|
|
-
|
|
- mclk_root_freq = mclk_parent_freq * round_up(div, 2);
|
|
-
|
|
- ret = clk_set_rate(mclk_root, mclk_root_freq);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- i2s_tdm->mclk_root0_freq = clk_get_rate(i2s_tdm->mclk_root0);
|
|
- i2s_tdm->mclk_root1_freq = clk_get_rate(i2s_tdm->mclk_root1);
|
|
- }
|
|
-
|
|
- return clk_set_rate(mclk_parent, mclk_parent_freq);
|
|
-}
|
|
-
|
|
-static int rockchip_i2s_tdm_set_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
|
|
- struct snd_pcm_substream *substream,
|
|
- struct clk **mclk)
|
|
-{
|
|
- unsigned int mclk_freq;
|
|
- int ret;
|
|
-
|
|
- if (i2s_tdm->clk_trcm) {
|
|
- if (i2s_tdm->mclk_tx_freq != i2s_tdm->mclk_rx_freq) {
|
|
- dev_err(i2s_tdm->dev,
|
|
- "clk_trcm, tx: %d and rx: %d should be the same\n",
|
|
- i2s_tdm->mclk_tx_freq,
|
|
- i2s_tdm->mclk_rx_freq);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- ret = clk_set_rate(i2s_tdm->mclk_tx, i2s_tdm->mclk_tx_freq);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- ret = clk_set_rate(i2s_tdm->mclk_rx, i2s_tdm->mclk_rx_freq);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- /* mclk_rx is also ok. */
|
|
- *mclk = i2s_tdm->mclk_tx;
|
|
- } else {
|
|
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
|
- *mclk = i2s_tdm->mclk_tx;
|
|
- mclk_freq = i2s_tdm->mclk_tx_freq;
|
|
- } else {
|
|
- *mclk = i2s_tdm->mclk_rx;
|
|
- mclk_freq = i2s_tdm->mclk_rx_freq;
|
|
- }
|
|
-
|
|
- ret = clk_set_rate(*mclk, mclk_freq);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static int rockchip_i2s_ch_to_io(unsigned int ch, bool substream_capture)
|
|
{
|
|
if (substream_capture) {
|
|
@@ -849,19 +646,17 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
|
|
struct snd_soc_dai *dai)
|
|
{
|
|
struct rk_i2s_tdm_dev *i2s_tdm = to_info(dai);
|
|
- struct clk *mclk;
|
|
- int ret = 0;
|
|
unsigned int val = 0;
|
|
unsigned int mclk_rate, bclk_rate, div_bclk = 4, div_lrck = 64;
|
|
+ int err;
|
|
|
|
if (i2s_tdm->is_master_mode) {
|
|
- if (i2s_tdm->mclk_calibrate)
|
|
- rockchip_i2s_tdm_calibrate_mclk(i2s_tdm, substream,
|
|
- params_rate(params));
|
|
+ struct clk *mclk = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
|
|
+ i2s_tdm->mclk_tx : i2s_tdm->mclk_rx;
|
|
|
|
- ret = rockchip_i2s_tdm_set_mclk(i2s_tdm, substream, &mclk);
|
|
- if (ret)
|
|
- return ret;
|
|
+ err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
mclk_rate = clk_get_rate(mclk);
|
|
bclk_rate = i2s_tdm->frame_width * params_rate(params);
|
|
@@ -969,96 +764,6 @@ static int rockchip_i2s_tdm_trigger(struct snd_pcm_substream *substream,
|
|
return 0;
|
|
}
|
|
|
|
-static int rockchip_i2s_tdm_set_sysclk(struct snd_soc_dai *cpu_dai, int stream,
|
|
- unsigned int freq, int dir)
|
|
-{
|
|
- struct rk_i2s_tdm_dev *i2s_tdm = to_info(cpu_dai);
|
|
-
|
|
- /* Put set mclk rate into rockchip_i2s_tdm_set_mclk() */
|
|
- if (i2s_tdm->clk_trcm) {
|
|
- i2s_tdm->mclk_tx_freq = freq;
|
|
- i2s_tdm->mclk_rx_freq = freq;
|
|
- } else {
|
|
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
|
|
- i2s_tdm->mclk_tx_freq = freq;
|
|
- else
|
|
- i2s_tdm->mclk_rx_freq = freq;
|
|
- }
|
|
-
|
|
- dev_dbg(i2s_tdm->dev, "The target mclk_%s freq is: %d\n",
|
|
- stream ? "rx" : "tx", freq);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int rockchip_i2s_tdm_clk_compensation_info(struct snd_kcontrol *kcontrol,
|
|
- struct snd_ctl_elem_info *uinfo)
|
|
-{
|
|
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
|
|
- uinfo->count = 1;
|
|
- uinfo->value.integer.min = CLK_PPM_MIN;
|
|
- uinfo->value.integer.max = CLK_PPM_MAX;
|
|
- uinfo->value.integer.step = 1;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int rockchip_i2s_tdm_clk_compensation_get(struct snd_kcontrol *kcontrol,
|
|
- struct snd_ctl_elem_value *ucontrol)
|
|
-{
|
|
- struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
|
|
- struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
|
|
-
|
|
- ucontrol->value.integer.value[0] = i2s_tdm->clk_ppm;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int rockchip_i2s_tdm_clk_compensation_put(struct snd_kcontrol *kcontrol,
|
|
- struct snd_ctl_elem_value *ucontrol)
|
|
-{
|
|
- struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
|
|
- struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
|
|
- int ret = 0, ppm = 0;
|
|
- int changed = 0;
|
|
- unsigned long old_rate;
|
|
-
|
|
- if (ucontrol->value.integer.value[0] < CLK_PPM_MIN ||
|
|
- ucontrol->value.integer.value[0] > CLK_PPM_MAX)
|
|
- return -EINVAL;
|
|
-
|
|
- ppm = ucontrol->value.integer.value[0];
|
|
-
|
|
- old_rate = clk_get_rate(i2s_tdm->mclk_root0);
|
|
- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root0,
|
|
- i2s_tdm->mclk_root0_freq, ppm);
|
|
- if (ret)
|
|
- return ret;
|
|
- if (old_rate != clk_get_rate(i2s_tdm->mclk_root0))
|
|
- changed = 1;
|
|
-
|
|
- if (clk_is_match(i2s_tdm->mclk_root0, i2s_tdm->mclk_root1))
|
|
- return changed;
|
|
-
|
|
- old_rate = clk_get_rate(i2s_tdm->mclk_root1);
|
|
- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root1,
|
|
- i2s_tdm->mclk_root1_freq, ppm);
|
|
- if (ret)
|
|
- return ret;
|
|
- if (old_rate != clk_get_rate(i2s_tdm->mclk_root1))
|
|
- changed = 1;
|
|
-
|
|
- return changed;
|
|
-}
|
|
-
|
|
-static struct snd_kcontrol_new rockchip_i2s_tdm_compensation_control = {
|
|
- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
|
|
- .name = "PCM Clock Compensation in PPM",
|
|
- .info = rockchip_i2s_tdm_clk_compensation_info,
|
|
- .get = rockchip_i2s_tdm_clk_compensation_get,
|
|
- .put = rockchip_i2s_tdm_clk_compensation_put,
|
|
-};
|
|
-
|
|
static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
|
|
{
|
|
struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
|
|
@@ -1068,9 +773,6 @@ static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
|
|
if (i2s_tdm->has_playback)
|
|
dai->playback_dma_data = &i2s_tdm->playback_dma_data;
|
|
|
|
- if (i2s_tdm->mclk_calibrate)
|
|
- snd_soc_add_dai_controls(dai, &rockchip_i2s_tdm_compensation_control, 1);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
@@ -1110,7 +812,6 @@ static int rockchip_i2s_tdm_set_bclk_ratio(struct snd_soc_dai *dai,
|
|
static const struct snd_soc_dai_ops rockchip_i2s_tdm_dai_ops = {
|
|
.hw_params = rockchip_i2s_tdm_hw_params,
|
|
.set_bclk_ratio = rockchip_i2s_tdm_set_bclk_ratio,
|
|
- .set_sysclk = rockchip_i2s_tdm_set_sysclk,
|
|
.set_fmt = rockchip_i2s_tdm_set_fmt,
|
|
.set_tdm_slot = rockchip_dai_tdm_slot,
|
|
.trigger = rockchip_i2s_tdm_trigger,
|
|
@@ -1433,35 +1134,6 @@ static void rockchip_i2s_tdm_path_config(struct rk_i2s_tdm_dev *i2s_tdm,
|
|
rockchip_i2s_tdm_tx_path_config(i2s_tdm, num);
|
|
}
|
|
|
|
-static int rockchip_i2s_tdm_get_calibrate_mclks(struct rk_i2s_tdm_dev *i2s_tdm)
|
|
-{
|
|
- int num_mclks = 0;
|
|
-
|
|
- i2s_tdm->mclk_tx_src = devm_clk_get(i2s_tdm->dev, "mclk_tx_src");
|
|
- if (!IS_ERR(i2s_tdm->mclk_tx_src))
|
|
- num_mclks++;
|
|
-
|
|
- i2s_tdm->mclk_rx_src = devm_clk_get(i2s_tdm->dev, "mclk_rx_src");
|
|
- if (!IS_ERR(i2s_tdm->mclk_rx_src))
|
|
- num_mclks++;
|
|
-
|
|
- i2s_tdm->mclk_root0 = devm_clk_get(i2s_tdm->dev, "mclk_root0");
|
|
- if (!IS_ERR(i2s_tdm->mclk_root0))
|
|
- num_mclks++;
|
|
-
|
|
- i2s_tdm->mclk_root1 = devm_clk_get(i2s_tdm->dev, "mclk_root1");
|
|
- if (!IS_ERR(i2s_tdm->mclk_root1))
|
|
- num_mclks++;
|
|
-
|
|
- if (num_mclks < 4 && num_mclks != 0)
|
|
- return -ENOENT;
|
|
-
|
|
- if (num_mclks == 4)
|
|
- i2s_tdm->mclk_calibrate = 1;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static int rockchip_i2s_tdm_path_prepare(struct rk_i2s_tdm_dev *i2s_tdm,
|
|
struct device_node *np,
|
|
bool is_rx_path)
|
|
@@ -1609,11 +1281,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
|
|
i2s_tdm->io_multiplex =
|
|
of_property_read_bool(node, "rockchip,io-multiplex");
|
|
|
|
- ret = rockchip_i2s_tdm_get_calibrate_mclks(i2s_tdm);
|
|
- if (ret)
|
|
- return dev_err_probe(i2s_tdm->dev, ret,
|
|
- "mclk-calibrate clocks missing");
|
|
-
|
|
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
|
|
if (IS_ERR(regs)) {
|
|
return dev_err_probe(i2s_tdm->dev, PTR_ERR(regs),
|
|
@@ -1666,13 +1333,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
|
|
goto err_disable_hclk;
|
|
}
|
|
|
|
- if (i2s_tdm->mclk_calibrate) {
|
|
- i2s_tdm->mclk_root0_initial_freq = clk_get_rate(i2s_tdm->mclk_root0);
|
|
- i2s_tdm->mclk_root1_initial_freq = clk_get_rate(i2s_tdm->mclk_root1);
|
|
- i2s_tdm->mclk_root0_freq = i2s_tdm->mclk_root0_initial_freq;
|
|
- i2s_tdm->mclk_root1_freq = i2s_tdm->mclk_root1_initial_freq;
|
|
- }
|
|
-
|
|
pm_runtime_enable(&pdev->dev);
|
|
|
|
regmap_update_bits(i2s_tdm->regmap, I2S_DMACR, I2S_DMACR_TDL_MASK,
|
|
diff --git a/sound/soc/sof/ipc3-loader.c b/sound/soc/sof/ipc3-loader.c
|
|
index bf423ca4e97bb..6e3ef06721106 100644
|
|
--- a/sound/soc/sof/ipc3-loader.c
|
|
+++ b/sound/soc/sof/ipc3-loader.c
|
|
@@ -138,8 +138,7 @@ static ssize_t ipc3_fw_ext_man_size(struct snd_sof_dev *sdev, const struct firmw
|
|
|
|
static size_t sof_ipc3_fw_parse_ext_man(struct snd_sof_dev *sdev)
|
|
{
|
|
- struct snd_sof_pdata *plat_data = sdev->pdata;
|
|
- const struct firmware *fw = plat_data->fw;
|
|
+ const struct firmware *fw = sdev->basefw.fw;
|
|
const struct sof_ext_man_elem_header *elem_hdr;
|
|
const struct sof_ext_man_header *head;
|
|
ssize_t ext_man_size;
|
|
@@ -149,6 +148,8 @@ static size_t sof_ipc3_fw_parse_ext_man(struct snd_sof_dev *sdev)
|
|
|
|
head = (struct sof_ext_man_header *)fw->data;
|
|
remaining = head->full_size - head->header_size;
|
|
+ if (remaining < 0 || remaining > sdev->basefw.fw->size)
|
|
+ return -EINVAL;
|
|
ext_man_size = ipc3_fw_ext_man_size(sdev, fw);
|
|
|
|
/* Assert firmware starts with extended manifest */
|
|
@@ -310,18 +311,18 @@ static int sof_ipc3_parse_module_memcpy(struct snd_sof_dev *sdev,
|
|
|
|
static int sof_ipc3_load_fw_to_dsp(struct snd_sof_dev *sdev)
|
|
{
|
|
- struct snd_sof_pdata *plat_data = sdev->pdata;
|
|
- const struct firmware *fw = plat_data->fw;
|
|
+ u32 payload_offset = sdev->basefw.payload_offset;
|
|
+ const struct firmware *fw = sdev->basefw.fw;
|
|
struct snd_sof_fw_header *header;
|
|
struct snd_sof_mod_hdr *module;
|
|
int (*load_module)(struct snd_sof_dev *sof_dev, struct snd_sof_mod_hdr *hdr);
|
|
size_t remaining;
|
|
int ret, count;
|
|
|
|
- if (!plat_data->fw)
|
|
+ if (!fw)
|
|
return -EINVAL;
|
|
|
|
- header = (struct snd_sof_fw_header *)(fw->data + plat_data->fw_offset);
|
|
+ header = (struct snd_sof_fw_header *)(fw->data + payload_offset);
|
|
load_module = sof_ops(sdev)->load_module;
|
|
if (!load_module) {
|
|
dev_dbg(sdev->dev, "Using generic module loading\n");
|
|
@@ -331,9 +332,8 @@ static int sof_ipc3_load_fw_to_dsp(struct snd_sof_dev *sdev)
|
|
}
|
|
|
|
/* parse each module */
|
|
- module = (struct snd_sof_mod_hdr *)(fw->data + plat_data->fw_offset +
|
|
- sizeof(*header));
|
|
- remaining = fw->size - sizeof(*header) - plat_data->fw_offset;
|
|
+ module = (struct snd_sof_mod_hdr *)(fw->data + payload_offset + sizeof(*header));
|
|
+ remaining = fw->size - sizeof(*header) - payload_offset;
|
|
/* check for wrap */
|
|
if (remaining > fw->size) {
|
|
dev_err(sdev->dev, "%s: fw size smaller than header size\n", __func__);
|
|
@@ -374,19 +374,19 @@ static int sof_ipc3_load_fw_to_dsp(struct snd_sof_dev *sdev)
|
|
|
|
static int sof_ipc3_validate_firmware(struct snd_sof_dev *sdev)
|
|
{
|
|
- struct snd_sof_pdata *plat_data = sdev->pdata;
|
|
- const struct firmware *fw = plat_data->fw;
|
|
+ u32 payload_offset = sdev->basefw.payload_offset;
|
|
+ const struct firmware *fw = sdev->basefw.fw;
|
|
struct snd_sof_fw_header *header;
|
|
- size_t fw_size = fw->size - plat_data->fw_offset;
|
|
+ size_t fw_size = fw->size - payload_offset;
|
|
|
|
- if (fw->size <= plat_data->fw_offset) {
|
|
+ if (fw->size <= payload_offset) {
|
|
dev_err(sdev->dev,
|
|
"firmware size must be greater than firmware offset\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Read the header information from the data pointer */
|
|
- header = (struct snd_sof_fw_header *)(fw->data + plat_data->fw_offset);
|
|
+ header = (struct snd_sof_fw_header *)(fw->data + payload_offset);
|
|
|
|
/* verify FW sig */
|
|
if (strncmp(header->sig, SND_SOF_FW_SIG, SND_SOF_FW_SIG_SIZE) != 0) {
|
|
diff --git a/sound/soc/sof/ipc4-loader.c b/sound/soc/sof/ipc4-loader.c
|
|
index e635ae515fa9f..9f433e9b4cd37 100644
|
|
--- a/sound/soc/sof/ipc4-loader.c
|
|
+++ b/sound/soc/sof/ipc4-loader.c
|
|
@@ -17,9 +17,8 @@
|
|
static size_t sof_ipc4_fw_parse_ext_man(struct snd_sof_dev *sdev)
|
|
{
|
|
struct sof_ipc4_fw_data *ipc4_data = sdev->private;
|
|
- struct snd_sof_pdata *plat_data = sdev->pdata;
|
|
struct sof_man4_fw_binary_header *fw_header;
|
|
- const struct firmware *fw = plat_data->fw;
|
|
+ const struct firmware *fw = sdev->basefw.fw;
|
|
struct sof_ext_manifest4_hdr *ext_man_hdr;
|
|
struct sof_man4_module_config *fm_config;
|
|
struct sof_ipc4_fw_module *fw_module;
|
|
@@ -138,9 +137,8 @@ static int sof_ipc4_validate_firmware(struct snd_sof_dev *sdev)
|
|
{
|
|
struct sof_ipc4_fw_data *ipc4_data = sdev->private;
|
|
u32 fw_hdr_offset = ipc4_data->manifest_fw_hdr_offset;
|
|
- struct snd_sof_pdata *plat_data = sdev->pdata;
|
|
struct sof_man4_fw_binary_header *fw_header;
|
|
- const struct firmware *fw = plat_data->fw;
|
|
+ const struct firmware *fw = sdev->basefw.fw;
|
|
struct sof_ext_manifest4_hdr *ext_man_hdr;
|
|
|
|
ext_man_hdr = (struct sof_ext_manifest4_hdr *)fw->data;
|
|
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
|
|
index 5f51d936b3067..ba8e3aae0a5cb 100644
|
|
--- a/sound/soc/sof/loader.c
|
|
+++ b/sound/soc/sof/loader.c
|
|
@@ -22,7 +22,7 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
|
|
int ret;
|
|
|
|
/* Don't request firmware again if firmware is already requested */
|
|
- if (plat_data->fw)
|
|
+ if (sdev->basefw.fw)
|
|
return 0;
|
|
|
|
fw_filename = kasprintf(GFP_KERNEL, "%s/%s",
|
|
@@ -31,7 +31,7 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
|
|
if (!fw_filename)
|
|
return -ENOMEM;
|
|
|
|
- ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev);
|
|
+ ret = request_firmware(&sdev->basefw.fw, fw_filename, sdev->dev);
|
|
|
|
if (ret < 0) {
|
|
dev_err(sdev->dev,
|
|
@@ -48,7 +48,7 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
|
|
ext_man_size = sdev->ipc->ops->fw_loader->parse_ext_manifest(sdev);
|
|
if (ext_man_size > 0) {
|
|
/* when no error occurred, drop extended manifest */
|
|
- plat_data->fw_offset = ext_man_size;
|
|
+ sdev->basefw.payload_offset = ext_man_size;
|
|
} else if (!ext_man_size) {
|
|
/* No extended manifest, so nothing to skip during FW load */
|
|
dev_dbg(sdev->dev, "firmware doesn't contain extended manifest\n");
|
|
@@ -58,6 +58,12 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
|
|
fw_filename, ret);
|
|
}
|
|
|
|
+ /*
|
|
+ * Until the platform code is switched to use the new container the fw
|
|
+ * and payload offset must be set in plat_data
|
|
+ */
|
|
+ plat_data->fw = sdev->basefw.fw;
|
|
+ plat_data->fw_offset = sdev->basefw.payload_offset;
|
|
err:
|
|
kfree(fw_filename);
|
|
|
|
@@ -100,7 +106,8 @@ int snd_sof_load_firmware_memcpy(struct snd_sof_dev *sdev)
|
|
return 0;
|
|
|
|
error:
|
|
- release_firmware(plat_data->fw);
|
|
+ release_firmware(sdev->basefw.fw);
|
|
+ sdev->basefw.fw = NULL;
|
|
plat_data->fw = NULL;
|
|
return ret;
|
|
|
|
@@ -185,7 +192,8 @@ EXPORT_SYMBOL(snd_sof_run_firmware);
|
|
void snd_sof_fw_unload(struct snd_sof_dev *sdev)
|
|
{
|
|
/* TODO: support module unloading at runtime */
|
|
- release_firmware(sdev->pdata->fw);
|
|
+ release_firmware(sdev->basefw.fw);
|
|
+ sdev->basefw.fw = NULL;
|
|
sdev->pdata->fw = NULL;
|
|
}
|
|
EXPORT_SYMBOL(snd_sof_fw_unload);
|
|
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
|
|
index de08825915b35..3d70b57e4864d 100644
|
|
--- a/sound/soc/sof/sof-priv.h
|
|
+++ b/sound/soc/sof/sof-priv.h
|
|
@@ -136,6 +136,17 @@ struct snd_sof_platform_stream_params {
|
|
bool cont_update_posn;
|
|
};
|
|
|
|
+/**
|
|
+ * struct sof_firmware - Container struct for SOF firmware
|
|
+ * @fw: Pointer to the firmware
|
|
+ * @payload_offset: Offset of the data within the loaded firmware image to be
|
|
+ * loaded to the DSP (skipping for example ext_manifest section)
|
|
+ */
|
|
+struct sof_firmware {
|
|
+ const struct firmware *fw;
|
|
+ u32 payload_offset;
|
|
+};
|
|
+
|
|
/*
|
|
* SOF DSP HW abstraction operations.
|
|
* Used to abstract DSP HW architecture and any IO busses between host CPU
|
|
@@ -487,6 +498,9 @@ struct snd_sof_dev {
|
|
spinlock_t ipc_lock; /* lock for IPC users */
|
|
spinlock_t hw_lock; /* lock for HW IO access */
|
|
|
|
+ /* Main, Base firmware image */
|
|
+ struct sof_firmware basefw;
|
|
+
|
|
/*
|
|
* ASoC components. plat_drv fields are set dynamically so
|
|
* can't use const
|
|
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
|
|
index 3d4add94e367d..d5409f3879455 100644
|
|
--- a/sound/usb/stream.c
|
|
+++ b/sound/usb/stream.c
|
|
@@ -300,9 +300,12 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
|
|
c = 0;
|
|
|
|
if (bits) {
|
|
- for (; bits && *maps; maps++, bits >>= 1)
|
|
+ for (; bits && *maps; maps++, bits >>= 1) {
|
|
if (bits & 1)
|
|
chmap->map[c++] = *maps;
|
|
+ if (c == chmap->channels)
|
|
+ break;
|
|
+ }
|
|
} else {
|
|
/* If we're missing wChannelConfig, then guess something
|
|
to make sure the channel map is not skipped entirely */
|
|
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
|
|
index 41c02b6f6f043..7e0b846e17eef 100644
|
|
--- a/tools/bpf/bpftool/prog.c
|
|
+++ b/tools/bpf/bpftool/prog.c
|
|
@@ -2200,7 +2200,7 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
|
|
int map_fd;
|
|
|
|
profile_perf_events = calloc(
|
|
- sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
|
|
+ obj->rodata->num_cpu * obj->rodata->num_metric, sizeof(int));
|
|
if (!profile_perf_events) {
|
|
p_err("failed to allocate memory for perf_event array: %s",
|
|
strerror(errno));
|
|
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
|
|
index 77058174082d7..ef0764d6891e4 100644
|
|
--- a/tools/bpf/resolve_btfids/main.c
|
|
+++ b/tools/bpf/resolve_btfids/main.c
|
|
@@ -70,6 +70,7 @@
|
|
#include <sys/stat.h>
|
|
#include <fcntl.h>
|
|
#include <errno.h>
|
|
+#include <linux/btf_ids.h>
|
|
#include <linux/rbtree.h>
|
|
#include <linux/zalloc.h>
|
|
#include <linux/err.h>
|
|
@@ -78,7 +79,7 @@
|
|
#include <subcmd/parse-options.h>
|
|
|
|
#define BTF_IDS_SECTION ".BTF_ids"
|
|
-#define BTF_ID "__BTF_ID__"
|
|
+#define BTF_ID_PREFIX "__BTF_ID__"
|
|
|
|
#define BTF_STRUCT "struct"
|
|
#define BTF_UNION "union"
|
|
@@ -89,6 +90,14 @@
|
|
|
|
#define ADDR_CNT 100
|
|
|
|
+#if __BYTE_ORDER == __LITTLE_ENDIAN
|
|
+# define ELFDATANATIVE ELFDATA2LSB
|
|
+#elif __BYTE_ORDER == __BIG_ENDIAN
|
|
+# define ELFDATANATIVE ELFDATA2MSB
|
|
+#else
|
|
+# error "Unknown machine endianness!"
|
|
+#endif
|
|
+
|
|
struct btf_id {
|
|
struct rb_node rb_node;
|
|
char *name;
|
|
@@ -116,6 +125,7 @@ struct object {
|
|
int idlist_shndx;
|
|
size_t strtabidx;
|
|
unsigned long idlist_addr;
|
|
+ int encoding;
|
|
} efile;
|
|
|
|
struct rb_root sets;
|
|
@@ -161,7 +171,7 @@ static int eprintf(int level, int var, const char *fmt, ...)
|
|
|
|
static bool is_btf_id(const char *name)
|
|
{
|
|
- return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
|
|
+ return name && !strncmp(name, BTF_ID_PREFIX, sizeof(BTF_ID_PREFIX) - 1);
|
|
}
|
|
|
|
static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
|
|
@@ -319,6 +329,7 @@ static int elf_collect(struct object *obj)
|
|
{
|
|
Elf_Scn *scn = NULL;
|
|
size_t shdrstrndx;
|
|
+ GElf_Ehdr ehdr;
|
|
int idx = 0;
|
|
Elf *elf;
|
|
int fd;
|
|
@@ -350,6 +361,13 @@ static int elf_collect(struct object *obj)
|
|
return -1;
|
|
}
|
|
|
|
+ if (gelf_getehdr(obj->efile.elf, &ehdr) == NULL) {
|
|
+ pr_err("FAILED cannot get ELF header: %s\n",
|
|
+ elf_errmsg(-1));
|
|
+ return -1;
|
|
+ }
|
|
+ obj->efile.encoding = ehdr.e_ident[EI_DATA];
|
|
+
|
|
/*
|
|
* Scan all the elf sections and look for save data
|
|
* from .BTF_ids section and symbols.
|
|
@@ -441,7 +459,7 @@ static int symbols_collect(struct object *obj)
|
|
* __BTF_ID__TYPE__vfs_truncate__0
|
|
* prefix = ^
|
|
*/
|
|
- prefix = name + sizeof(BTF_ID) - 1;
|
|
+ prefix = name + sizeof(BTF_ID_PREFIX) - 1;
|
|
|
|
/* struct */
|
|
if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
|
|
@@ -649,19 +667,18 @@ static int cmp_id(const void *pa, const void *pb)
|
|
static int sets_patch(struct object *obj)
|
|
{
|
|
Elf_Data *data = obj->efile.idlist;
|
|
- int *ptr = data->d_buf;
|
|
struct rb_node *next;
|
|
|
|
next = rb_first(&obj->sets);
|
|
while (next) {
|
|
- unsigned long addr, idx;
|
|
+ struct btf_id_set8 *set8;
|
|
+ struct btf_id_set *set;
|
|
+ unsigned long addr, off;
|
|
struct btf_id *id;
|
|
- int *base;
|
|
- int cnt;
|
|
|
|
id = rb_entry(next, struct btf_id, rb_node);
|
|
addr = id->addr[0];
|
|
- idx = addr - obj->efile.idlist_addr;
|
|
+ off = addr - obj->efile.idlist_addr;
|
|
|
|
/* sets are unique */
|
|
if (id->addr_cnt != 1) {
|
|
@@ -670,14 +687,39 @@ static int sets_patch(struct object *obj)
|
|
return -1;
|
|
}
|
|
|
|
- idx = idx / sizeof(int);
|
|
- base = &ptr[idx] + (id->is_set8 ? 2 : 1);
|
|
- cnt = ptr[idx];
|
|
+ if (id->is_set) {
|
|
+ set = data->d_buf + off;
|
|
+ qsort(set->ids, set->cnt, sizeof(set->ids[0]), cmp_id);
|
|
+ } else {
|
|
+ set8 = data->d_buf + off;
|
|
+ /*
|
|
+ * Make sure id is at the beginning of the pairs
|
|
+ * struct, otherwise the below qsort would not work.
|
|
+ */
|
|
+ BUILD_BUG_ON(set8->pairs != &set8->pairs[0].id);
|
|
+ qsort(set8->pairs, set8->cnt, sizeof(set8->pairs[0]), cmp_id);
|
|
|
|
- pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
|
|
- (idx + 1) * sizeof(int), cnt, id->name);
|
|
+ /*
|
|
+ * When ELF endianness does not match endianness of the
|
|
+ * host, libelf will do the translation when updating
|
|
+ * the ELF. This, however, corrupts SET8 flags which are
|
|
+ * already in the target endianness. So, let's bswap
|
|
+ * them to the host endianness and libelf will then
|
|
+ * correctly translate everything.
|
|
+ */
|
|
+ if (obj->efile.encoding != ELFDATANATIVE) {
|
|
+ int i;
|
|
+
|
|
+ set8->flags = bswap_32(set8->flags);
|
|
+ for (i = 0; i < set8->cnt; i++) {
|
|
+ set8->pairs[i].flags =
|
|
+ bswap_32(set8->pairs[i].flags);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
|
|
- qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id);
|
|
+ pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
|
|
+ off, id->is_set ? set->cnt : set8->cnt, id->name);
|
|
|
|
next = rb_next(next);
|
|
}
|
|
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
|
|
index 2f882d5cb30f5..72535f00572f6 100644
|
|
--- a/tools/include/linux/btf_ids.h
|
|
+++ b/tools/include/linux/btf_ids.h
|
|
@@ -8,6 +8,15 @@ struct btf_id_set {
|
|
u32 ids[];
|
|
};
|
|
|
|
+struct btf_id_set8 {
|
|
+ u32 cnt;
|
|
+ u32 flags;
|
|
+ struct {
|
|
+ u32 id;
|
|
+ u32 flags;
|
|
+ } pairs[];
|
|
+};
|
|
+
|
|
#ifdef CONFIG_DEBUG_INFO_BTF
|
|
|
|
#include <linux/compiler.h> /* for __PASTE */
|
|
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
|
|
index fddc05c667b5d..874fe362375de 100644
|
|
--- a/tools/lib/bpf/bpf.h
|
|
+++ b/tools/lib/bpf/bpf.h
|
|
@@ -35,7 +35,7 @@
|
|
extern "C" {
|
|
#endif
|
|
|
|
-int libbpf_set_memlock_rlim(size_t memlock_bytes);
|
|
+LIBBPF_API int libbpf_set_memlock_rlim(size_t memlock_bytes);
|
|
|
|
struct bpf_map_create_opts {
|
|
size_t sz; /* size of this struct for forward/backward compatibility */
|
|
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
|
|
index e2014b1250ea2..c71d4d0f5c6f3 100644
|
|
--- a/tools/lib/bpf/libbpf.c
|
|
+++ b/tools/lib/bpf/libbpf.c
|
|
@@ -70,6 +70,7 @@
|
|
|
|
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
|
|
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
|
|
+static int map_set_def_max_entries(struct bpf_map *map);
|
|
|
|
static const char * const attach_type_name[] = {
|
|
[BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
|
|
@@ -4992,6 +4993,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
|
|
|
|
if (bpf_map_type__is_map_in_map(def->type)) {
|
|
if (map->inner_map) {
|
|
+ err = map_set_def_max_entries(map->inner_map);
|
|
+ if (err)
|
|
+ return err;
|
|
err = bpf_object__create_map(obj, map->inner_map, true);
|
|
if (err) {
|
|
pr_warn("map '%s': failed to create inner map: %d\n",
|
|
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
|
|
index 377642ff51fce..8669f6e0f6e2f 100644
|
|
--- a/tools/lib/bpf/libbpf_internal.h
|
|
+++ b/tools/lib/bpf/libbpf_internal.h
|
|
@@ -17,6 +17,20 @@
|
|
#include <unistd.h>
|
|
#include "relo_core.h"
|
|
|
|
+/* Android's libc doesn't support AT_EACCESS in faccessat() implementation
|
|
+ * ([0]), and just returns -EINVAL even if file exists and is accessible.
|
|
+ * See [1] for issues caused by this.
|
|
+ *
|
|
+ * So just redefine it to 0 on Android.
|
|
+ *
|
|
+ * [0] https://android.googlesource.com/platform/bionic/+/refs/heads/android13-release/libc/bionic/faccessat.cpp#50
|
|
+ * [1] https://github.com/libbpf/libbpf-bootstrap/issues/250#issuecomment-1911324250
|
|
+ */
|
|
+#ifdef __ANDROID__
|
|
+#undef AT_EACCESS
|
|
+#define AT_EACCESS 0
|
|
+#endif
|
|
+
|
|
/* make sure libbpf doesn't use kernel-only integer typedefs */
|
|
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
|
|
|
|
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
|
|
index 7314183cdcb6c..b9b0fda8374e2 100644
|
|
--- a/tools/perf/builtin-record.c
|
|
+++ b/tools/perf/builtin-record.c
|
|
@@ -1785,8 +1785,8 @@ static int
|
|
record__switch_output(struct record *rec, bool at_exit)
|
|
{
|
|
struct perf_data *data = &rec->data;
|
|
+ char *new_filename = NULL;
|
|
int fd, err;
|
|
- char *new_filename;
|
|
|
|
/* Same Size: "2015122520103046"*/
|
|
char timestamp[] = "InvalidTimestamp";
|
|
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
|
|
index 76605fde35078..7db35dbdfcefe 100644
|
|
--- a/tools/perf/util/evsel.c
|
|
+++ b/tools/perf/util/evsel.c
|
|
@@ -2375,7 +2375,6 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
|
|
data->period = evsel->core.attr.sample_period;
|
|
data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
|
data->misc = event->header.misc;
|
|
- data->id = -1ULL;
|
|
data->data_src = PERF_MEM_DATA_SRC_NONE;
|
|
data->vcpu = -1;
|
|
|
|
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
|
|
index bc866d18973e4..ef9a3df459657 100644
|
|
--- a/tools/perf/util/stat-display.c
|
|
+++ b/tools/perf/util/stat-display.c
|
|
@@ -366,7 +366,7 @@ static void print_metric_only(struct perf_stat_config *config,
|
|
if (color)
|
|
mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
|
|
|
|
- color_snprintf(str, sizeof(str), color ?: "", fmt, val);
|
|
+ color_snprintf(str, sizeof(str), color ?: "", fmt ?: "", val);
|
|
fprintf(out, "%*s ", mlen, str);
|
|
}
|
|
|
|
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
|
|
index c9bfe4696943b..cee7fc3b5bb0c 100644
|
|
--- a/tools/perf/util/thread_map.c
|
|
+++ b/tools/perf/util/thread_map.c
|
|
@@ -279,13 +279,13 @@ struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
|
|
threads->nr = ntasks;
|
|
}
|
|
out:
|
|
+ strlist__delete(slist);
|
|
if (threads)
|
|
refcount_set(&threads->refcnt, 1);
|
|
return threads;
|
|
|
|
out_free_threads:
|
|
zfree(&threads);
|
|
- strlist__delete(slist);
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
|
|
index f416032ba858b..b295f9b721bf8 100644
|
|
--- a/tools/testing/selftests/bpf/progs/test_map_in_map.c
|
|
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
|
|
@@ -21,6 +21,32 @@ struct {
|
|
__type(value, __u32);
|
|
} mim_hash SEC(".maps");
|
|
|
|
+/* The following three maps are used to test
|
|
+ * perf_event_array map can be an inner
|
|
+ * map of hash/array_of_maps.
|
|
+ */
|
|
+struct perf_event_array {
|
|
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
|
+ __type(key, __u32);
|
|
+ __type(value, __u32);
|
|
+} inner_map0 SEC(".maps");
|
|
+
|
|
+struct {
|
|
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
|
|
+ __uint(max_entries, 1);
|
|
+ __type(key, __u32);
|
|
+ __array(values, struct perf_event_array);
|
|
+} mim_array_pe SEC(".maps") = {
|
|
+ .values = {&inner_map0}};
|
|
+
|
|
+struct {
|
|
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
|
|
+ __uint(max_entries, 1);
|
|
+ __type(key, __u32);
|
|
+ __array(values, struct perf_event_array);
|
|
+} mim_hash_pe SEC(".maps") = {
|
|
+ .values = {&inner_map0}};
|
|
+
|
|
SEC("xdp")
|
|
int xdp_mimtest0(struct xdp_md *ctx)
|
|
{
|
|
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
|
|
index b73152822aa28..81cd48cc80c23 100644
|
|
--- a/tools/testing/selftests/bpf/test_maps.c
|
|
+++ b/tools/testing/selftests/bpf/test_maps.c
|
|
@@ -1190,7 +1190,11 @@ static void test_map_in_map(void)
|
|
goto out_map_in_map;
|
|
}
|
|
|
|
- bpf_object__load(obj);
|
|
+ err = bpf_object__load(obj);
|
|
+ if (err) {
|
|
+ printf("Failed to load test prog\n");
|
|
+ goto out_map_in_map;
|
|
+ }
|
|
|
|
map = bpf_object__find_map_by_name(obj, "mim_array");
|
|
if (!map) {
|
|
diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config
|
|
index 697994a9278bb..8d7a1a004b7c3 100644
|
|
--- a/tools/testing/selftests/net/forwarding/config
|
|
+++ b/tools/testing/selftests/net/forwarding/config
|
|
@@ -6,14 +6,49 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
|
|
CONFIG_NET_VRF=m
|
|
CONFIG_BPF_SYSCALL=y
|
|
CONFIG_CGROUP_BPF=y
|
|
+CONFIG_DUMMY=m
|
|
+CONFIG_IPV6=y
|
|
+CONFIG_IPV6_GRE=m
|
|
+CONFIG_IPV6_MROUTE=y
|
|
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
|
|
+CONFIG_IPV6_PIMSM_V2=y
|
|
+CONFIG_IP_MROUTE=y
|
|
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
|
|
+CONFIG_IP_PIMSM_V1=y
|
|
+CONFIG_IP_PIMSM_V2=y
|
|
+CONFIG_MACVLAN=m
|
|
CONFIG_NET_ACT_CT=m
|
|
CONFIG_NET_ACT_MIRRED=m
|
|
CONFIG_NET_ACT_MPLS=m
|
|
+CONFIG_NET_ACT_PEDIT=m
|
|
+CONFIG_NET_ACT_POLICE=m
|
|
+CONFIG_NET_ACT_SAMPLE=m
|
|
+CONFIG_NET_ACT_SKBEDIT=m
|
|
+CONFIG_NET_ACT_TUNNEL_KEY=m
|
|
CONFIG_NET_ACT_VLAN=m
|
|
CONFIG_NET_CLS_FLOWER=m
|
|
CONFIG_NET_CLS_MATCHALL=m
|
|
+CONFIG_NET_CLS_BASIC=m
|
|
+CONFIG_NET_EMATCH=y
|
|
+CONFIG_NET_EMATCH_META=m
|
|
+CONFIG_NET_IPGRE=m
|
|
+CONFIG_NET_IPGRE_DEMUX=m
|
|
+CONFIG_NET_IPIP=m
|
|
+CONFIG_NET_SCH_ETS=m
|
|
CONFIG_NET_SCH_INGRESS=m
|
|
CONFIG_NET_ACT_GACT=m
|
|
+CONFIG_NET_SCH_PRIO=m
|
|
+CONFIG_NET_SCH_RED=m
|
|
+CONFIG_NET_SCH_TBF=m
|
|
+CONFIG_NET_TC_SKB_EXT=y
|
|
+CONFIG_NET_TEAM=y
|
|
+CONFIG_NET_TEAM_MODE_LOADBALANCE=y
|
|
+CONFIG_NETFILTER=y
|
|
+CONFIG_NF_CONNTRACK=m
|
|
+CONFIG_NF_FLOW_TABLE=m
|
|
+CONFIG_NF_TABLES=m
|
|
CONFIG_VETH=m
|
|
CONFIG_NAMESPACES=y
|
|
CONFIG_NET_NS=y
|
|
+CONFIG_VXLAN=m
|
|
+CONFIG_XFRM_USER=m
|
|
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
|
|
index ac97f07e5ce82..bd3f7d492af2b 100755
|
|
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
|
|
@@ -354,7 +354,7 @@ __ping_ipv4()
|
|
|
|
# Send 100 packets and verify that at least 100 packets hit the rule,
|
|
# to overcome ARP noise.
|
|
- PING_COUNT=100 PING_TIMEOUT=11 ping_do $dev $dst_ip
|
|
+ PING_COUNT=100 PING_TIMEOUT=20 ping_do $dev $dst_ip
|
|
check_err $? "Ping failed"
|
|
|
|
tc_check_at_least_x_packets "dev $rp1 egress" 101 10 100
|
|
@@ -410,7 +410,7 @@ __ping_ipv6()
|
|
|
|
# Send 100 packets and verify that at least 100 packets hit the rule,
|
|
# to overcome neighbor discovery noise.
|
|
- PING_COUNT=100 PING_TIMEOUT=11 ping6_do $dev $dst_ip
|
|
+ PING_COUNT=100 PING_TIMEOUT=20 ping6_do $dev $dst_ip
|
|
check_err $? "Ping failed"
|
|
|
|
tc_check_at_least_x_packets "dev $rp1 egress" 101 100
|
|
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
|
|
index d880df89bc8bd..e83fde79f40d0 100755
|
|
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
|
|
@@ -457,7 +457,7 @@ __ping_ipv4()
|
|
|
|
# Send 100 packets and verify that at least 100 packets hit the rule,
|
|
# to overcome ARP noise.
|
|
- PING_COUNT=100 PING_TIMEOUT=11 ping_do $dev $dst_ip
|
|
+ PING_COUNT=100 PING_TIMEOUT=20 ping_do $dev $dst_ip
|
|
check_err $? "Ping failed"
|
|
|
|
tc_check_at_least_x_packets "dev $rp1 egress" 101 10 100
|
|
@@ -522,7 +522,7 @@ __ping_ipv6()
|
|
|
|
# Send 100 packets and verify that at least 100 packets hit the rule,
|
|
# to overcome neighbor discovery noise.
|
|
- PING_COUNT=100 PING_TIMEOUT=11 ping6_do $dev $dst_ip
|
|
+ PING_COUNT=100 PING_TIMEOUT=20 ping6_do $dev $dst_ip
|
|
check_err $? "Ping failed"
|
|
|
|
tc_check_at_least_x_packets "dev $rp1 egress" 101 100
|
|
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
|
|
index 5b80fb155d549..d89ee6e1926c7 100644
|
|
--- a/tools/testing/selftests/net/tls.c
|
|
+++ b/tools/testing/selftests/net/tls.c
|
|
@@ -926,12 +926,12 @@ TEST_F(tls, recv_partial)
|
|
|
|
memset(recv_mem, 0, sizeof(recv_mem));
|
|
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
|
|
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
|
|
- MSG_WAITALL), -1);
|
|
+ EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_first),
|
|
+ MSG_WAITALL), strlen(test_str_first));
|
|
EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
|
|
memset(recv_mem, 0, sizeof(recv_mem));
|
|
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
|
|
- MSG_WAITALL), -1);
|
|
+ EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_second),
|
|
+ MSG_WAITALL), strlen(test_str_second));
|
|
EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
|
|
0);
|
|
}
|