mirror of
https://github.com/armbian/build.git
synced 2025-08-13 22:56:57 +02:00
7484 lines
242 KiB
Diff
7484 lines
242 KiB
Diff
diff --git a/Documentation/hid/uhid.txt b/Documentation/hid/uhid.txt
|
|
index c8656dd029a9..958fff945304 100644
|
|
--- a/Documentation/hid/uhid.txt
|
|
+++ b/Documentation/hid/uhid.txt
|
|
@@ -160,7 +160,7 @@ them but you should handle them according to your needs.
|
|
UHID_OUTPUT:
|
|
This is sent if the HID device driver wants to send raw data to the I/O
|
|
device on the interrupt channel. You should read the payload and forward it to
|
|
- the device. The payload is of type "struct uhid_data_req".
|
|
+ the device. The payload is of type "struct uhid_output_req".
|
|
This may be received even though you haven't received UHID_OPEN, yet.
|
|
|
|
UHID_GET_REPORT:
|
|
diff --git a/Makefile b/Makefile
|
|
index dad90f53faeb..d97288c0754f 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 14
|
|
-SUBLEVEL = 157
|
|
+SUBLEVEL = 158
|
|
EXTRAVERSION =
|
|
NAME = Petit Gorille
|
|
|
|
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
|
|
index 954ba8b81052..fd4b679945d3 100644
|
|
--- a/arch/arm/Kconfig.debug
|
|
+++ b/arch/arm/Kconfig.debug
|
|
@@ -1376,21 +1376,21 @@ config DEBUG_OMAP2PLUS_UART
|
|
depends on ARCH_OMAP2PLUS
|
|
|
|
config DEBUG_IMX_UART_PORT
|
|
- int "i.MX Debug UART Port Selection" if DEBUG_IMX1_UART || \
|
|
- DEBUG_IMX25_UART || \
|
|
- DEBUG_IMX21_IMX27_UART || \
|
|
- DEBUG_IMX31_UART || \
|
|
- DEBUG_IMX35_UART || \
|
|
- DEBUG_IMX50_UART || \
|
|
- DEBUG_IMX51_UART || \
|
|
- DEBUG_IMX53_UART || \
|
|
- DEBUG_IMX6Q_UART || \
|
|
- DEBUG_IMX6SL_UART || \
|
|
- DEBUG_IMX6SX_UART || \
|
|
- DEBUG_IMX6UL_UART || \
|
|
- DEBUG_IMX7D_UART
|
|
+ int "i.MX Debug UART Port Selection"
|
|
+ depends on DEBUG_IMX1_UART || \
|
|
+ DEBUG_IMX25_UART || \
|
|
+ DEBUG_IMX21_IMX27_UART || \
|
|
+ DEBUG_IMX31_UART || \
|
|
+ DEBUG_IMX35_UART || \
|
|
+ DEBUG_IMX50_UART || \
|
|
+ DEBUG_IMX51_UART || \
|
|
+ DEBUG_IMX53_UART || \
|
|
+ DEBUG_IMX6Q_UART || \
|
|
+ DEBUG_IMX6SL_UART || \
|
|
+ DEBUG_IMX6SX_UART || \
|
|
+ DEBUG_IMX6UL_UART || \
|
|
+ DEBUG_IMX7D_UART
|
|
default 1
|
|
- depends on ARCH_MXC
|
|
help
|
|
Choose UART port on which kernel low-level debug messages
|
|
should be output.
|
|
diff --git a/arch/arm/boot/dts/gemini-sq201.dts b/arch/arm/boot/dts/gemini-sq201.dts
|
|
index 63c02ca9513c..e9e2f6ff0c58 100644
|
|
--- a/arch/arm/boot/dts/gemini-sq201.dts
|
|
+++ b/arch/arm/boot/dts/gemini-sq201.dts
|
|
@@ -20,7 +20,7 @@
|
|
};
|
|
|
|
chosen {
|
|
- bootargs = "console=ttyS0,115200n8";
|
|
+ bootargs = "console=ttyS0,115200n8 root=/dev/mtdblock2 rw rootfstype=squashfs,jffs2 rootwait";
|
|
stdout-path = &uart0;
|
|
};
|
|
|
|
@@ -71,37 +71,10 @@
|
|
/* 16MB of flash */
|
|
reg = <0x30000000 0x01000000>;
|
|
|
|
- partition@0 {
|
|
- label = "RedBoot";
|
|
- reg = <0x00000000 0x00120000>;
|
|
- read-only;
|
|
- };
|
|
- partition@120000 {
|
|
- label = "Kernel";
|
|
- reg = <0x00120000 0x00200000>;
|
|
- };
|
|
- partition@320000 {
|
|
- label = "Ramdisk";
|
|
- reg = <0x00320000 0x00600000>;
|
|
- };
|
|
- partition@920000 {
|
|
- label = "Application";
|
|
- reg = <0x00920000 0x00600000>;
|
|
- };
|
|
- partition@f20000 {
|
|
- label = "VCTL";
|
|
- reg = <0x00f20000 0x00020000>;
|
|
- read-only;
|
|
- };
|
|
- partition@f40000 {
|
|
- label = "CurConf";
|
|
- reg = <0x00f40000 0x000a0000>;
|
|
- read-only;
|
|
- };
|
|
- partition@fe0000 {
|
|
- label = "FIS directory";
|
|
- reg = <0x00fe0000 0x00020000>;
|
|
- read-only;
|
|
+ partitions {
|
|
+ compatible = "redboot-fis";
|
|
+ /* Eraseblock at 0xfe0000 */
|
|
+ fis-index-block = <0x1fc>;
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi b/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
|
|
index df8dafe2564d..2297ed90ee89 100644
|
|
--- a/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
|
|
+++ b/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
|
|
@@ -17,12 +17,8 @@
|
|
|
|
memory@70000000 {
|
|
device_type = "memory";
|
|
- reg = <0x70000000 0x20000000>;
|
|
- };
|
|
-
|
|
- memory@b0000000 {
|
|
- device_type = "memory";
|
|
- reg = <0xb0000000 0x20000000>;
|
|
+ reg = <0x70000000 0x20000000>,
|
|
+ <0xb0000000 0x20000000>;
|
|
};
|
|
|
|
regulators {
|
|
diff --git a/arch/arm/mach-ks8695/board-acs5k.c b/arch/arm/mach-ks8695/board-acs5k.c
|
|
index e4d709c8ed32..76d3083f1f63 100644
|
|
--- a/arch/arm/mach-ks8695/board-acs5k.c
|
|
+++ b/arch/arm/mach-ks8695/board-acs5k.c
|
|
@@ -92,7 +92,7 @@ static struct i2c_board_info acs5k_i2c_devs[] __initdata = {
|
|
},
|
|
};
|
|
|
|
-static void acs5k_i2c_init(void)
|
|
+static void __init acs5k_i2c_init(void)
|
|
{
|
|
/* The gpio interface */
|
|
platform_device_register(&acs5k_i2c_device);
|
|
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
|
|
index e8ccf51c6f29..ec0235899de2 100644
|
|
--- a/arch/arm/mach-omap1/Makefile
|
|
+++ b/arch/arm/mach-omap1/Makefile
|
|
@@ -25,7 +25,7 @@ obj-y += $(i2c-omap-m) $(i2c-omap-y)
|
|
|
|
led-y := leds.o
|
|
|
|
-usb-fs-$(CONFIG_USB) := usb.o
|
|
+usb-fs-$(CONFIG_USB_SUPPORT) := usb.o
|
|
obj-y += $(usb-fs-m) $(usb-fs-y)
|
|
|
|
# Specific board support
|
|
diff --git a/arch/arm/mach-omap1/include/mach/usb.h b/arch/arm/mach-omap1/include/mach/usb.h
|
|
index 77867778d4ec..5429d86c7190 100644
|
|
--- a/arch/arm/mach-omap1/include/mach/usb.h
|
|
+++ b/arch/arm/mach-omap1/include/mach/usb.h
|
|
@@ -11,7 +11,7 @@
|
|
|
|
#include <linux/platform_data/usb-omap1.h>
|
|
|
|
-#if IS_ENABLED(CONFIG_USB)
|
|
+#if IS_ENABLED(CONFIG_USB_SUPPORT)
|
|
void omap1_usb_init(struct omap_usb_config *pdata);
|
|
#else
|
|
static inline void omap1_usb_init(struct omap_usb_config *pdata)
|
|
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
|
|
index 92cc7b51f100..9c00fd2acc2a 100644
|
|
--- a/arch/arm64/kernel/head.S
|
|
+++ b/arch/arm64/kernel/head.S
|
|
@@ -594,6 +594,7 @@ secondary_startup:
|
|
/*
|
|
* Common entry point for secondary CPUs.
|
|
*/
|
|
+ bl __cpu_secondary_check52bitva
|
|
bl __cpu_setup // initialise processor
|
|
bl __enable_mmu
|
|
ldr x8, =__secondary_switched
|
|
@@ -668,6 +669,31 @@ ENTRY(__enable_mmu)
|
|
ret
|
|
ENDPROC(__enable_mmu)
|
|
|
|
+ENTRY(__cpu_secondary_check52bitva)
|
|
+#ifdef CONFIG_ARM64_52BIT_VA
|
|
+ ldr_l x0, vabits_user
|
|
+ cmp x0, #52
|
|
+ b.ne 2f
|
|
+
|
|
+ mrs_s x0, SYS_ID_AA64MMFR2_EL1
|
|
+ and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
|
|
+ cbnz x0, 2f
|
|
+
|
|
+ adr_l x0, va52mismatch
|
|
+ mov w1, #1
|
|
+ strb w1, [x0]
|
|
+ dmb sy
|
|
+ dc ivac, x0 // Invalidate potentially stale cache line
|
|
+
|
|
+ update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x0, x1
|
|
+1: wfe
|
|
+ wfi
|
|
+ b 1b
|
|
+
|
|
+#endif
|
|
+2: ret
|
|
+ENDPROC(__cpu_secondary_check52bitva)
|
|
+
|
|
__no_granule_support:
|
|
/* Indicate that this CPU can't boot and is stuck in the kernel */
|
|
update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
|
|
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
|
|
index a683cd499515..909bf3926fd2 100644
|
|
--- a/arch/arm64/kernel/smp.c
|
|
+++ b/arch/arm64/kernel/smp.c
|
|
@@ -106,6 +106,7 @@ static int boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
}
|
|
|
|
static DECLARE_COMPLETION(cpu_running);
|
|
+bool va52mismatch __ro_after_init;
|
|
|
|
int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|
{
|
|
@@ -135,10 +136,15 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|
|
|
if (!cpu_online(cpu)) {
|
|
pr_crit("CPU%u: failed to come online\n", cpu);
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_ARM64_52BIT_VA) && va52mismatch)
|
|
+ pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
|
|
+
|
|
ret = -EIO;
|
|
}
|
|
} else {
|
|
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
|
|
+ return ret;
|
|
}
|
|
|
|
secondary_data.task = NULL;
|
|
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
|
|
index d269dd4b8279..fe5e48184c3c 100644
|
|
--- a/arch/microblaze/Makefile
|
|
+++ b/arch/microblaze/Makefile
|
|
@@ -83,19 +83,21 @@ archclean:
|
|
|
|
linux.bin linux.bin.gz linux.bin.ub: vmlinux
|
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
|
+ @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')'
|
|
|
|
simpleImage.%: vmlinux
|
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
|
+ @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')'
|
|
|
|
define archhelp
|
|
echo '* linux.bin - Create raw binary'
|
|
echo ' linux.bin.gz - Create compressed raw binary'
|
|
echo ' linux.bin.ub - Create U-Boot wrapped raw binary'
|
|
- echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
|
|
- echo ' - stripped elf with fdt blob'
|
|
- echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
|
|
- echo ' *_defconfig - Select default config from arch/microblaze/configs'
|
|
- echo ''
|
|
+ echo ' simpleImage.<dt> - Create the following images with <dt>.dtb linked in'
|
|
+ echo ' simpleImage.<dt> : raw image'
|
|
+ echo ' simpleImage.<dt>.ub : raw image with U-Boot header'
|
|
+ echo ' simpleImage.<dt>.unstrip: ELF (identical to vmlinux)'
|
|
+ echo ' simpleImage.<dt>.strip : stripped ELF'
|
|
echo ' Targets with <dt> embed a device tree blob inside the image'
|
|
echo ' These targets support board with firmware that does not'
|
|
echo ' support passing a device tree directly. Replace <dt> with the'
|
|
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
|
|
index 7c2f52d4a0e4..49dbd1063d71 100644
|
|
--- a/arch/microblaze/boot/Makefile
|
|
+++ b/arch/microblaze/boot/Makefile
|
|
@@ -9,15 +9,12 @@ OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary
|
|
|
|
$(obj)/linux.bin: vmlinux FORCE
|
|
$(call if_changed,objcopy)
|
|
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
|
|
|
|
$(obj)/linux.bin.ub: $(obj)/linux.bin FORCE
|
|
$(call if_changed,uimage)
|
|
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
|
|
|
|
$(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
|
|
$(call if_changed,gzip)
|
|
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
|
|
|
|
quiet_cmd_cp = CP $< $@$2
|
|
cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
|
|
@@ -35,6 +32,5 @@ $(obj)/simpleImage.%: vmlinux FORCE
|
|
$(call if_changed,objcopy)
|
|
$(call if_changed,uimage)
|
|
$(call if_changed,strip,.strip)
|
|
- @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')'
|
|
|
|
clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
|
|
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
|
|
index b16e95a4e875..1107d34e45bf 100644
|
|
--- a/arch/openrisc/kernel/entry.S
|
|
+++ b/arch/openrisc/kernel/entry.S
|
|
@@ -184,7 +184,7 @@ handler: ;\
|
|
* occured. in fact they never do. if you need them use
|
|
* values saved on stack (for SPR_EPC, SPR_ESR) or content
|
|
* of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE()
|
|
- * in 'arch/or32/kernel/head.S'
|
|
+ * in 'arch/openrisc/kernel/head.S'
|
|
*/
|
|
|
|
/* =====================================================[ exceptions] === */
|
|
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
|
|
index 90979acdf165..4d878d13b860 100644
|
|
--- a/arch/openrisc/kernel/head.S
|
|
+++ b/arch/openrisc/kernel/head.S
|
|
@@ -1551,7 +1551,7 @@ _string_nl:
|
|
|
|
/*
|
|
* .data section should be page aligned
|
|
- * (look into arch/or32/kernel/vmlinux.lds)
|
|
+ * (look into arch/openrisc/kernel/vmlinux.lds.S)
|
|
*/
|
|
.section .data,"aw"
|
|
.align 8192
|
|
diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts
|
|
index aa68911f6560..084b82ba7493 100644
|
|
--- a/arch/powerpc/boot/dts/bamboo.dts
|
|
+++ b/arch/powerpc/boot/dts/bamboo.dts
|
|
@@ -268,8 +268,10 @@
|
|
/* Outbound ranges, one memory and one IO,
|
|
* later cannot be changed. Chip supports a second
|
|
* IO range but we don't use it for now
|
|
+ * The chip also supports a larger memory range but
|
|
+ * it's not naturally aligned, so our code will break
|
|
*/
|
|
- ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x40000000
|
|
+ ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x20000000
|
|
0x02000000 0x00000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00100000
|
|
0x01000000 0x00000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>;
|
|
|
|
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
|
|
index 7e3ccf21830e..e4451b30d7e3 100644
|
|
--- a/arch/powerpc/include/asm/cputable.h
|
|
+++ b/arch/powerpc/include/asm/cputable.h
|
|
@@ -45,6 +45,7 @@ extern int machine_check_e500(struct pt_regs *regs);
|
|
extern int machine_check_e200(struct pt_regs *regs);
|
|
extern int machine_check_47x(struct pt_regs *regs);
|
|
int machine_check_8xx(struct pt_regs *regs);
|
|
+int machine_check_83xx(struct pt_regs *regs);
|
|
|
|
extern void cpu_down_flush_e500v2(void);
|
|
extern void cpu_down_flush_e500mc(void);
|
|
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
|
|
index b779f3ccd412..05f3c2b3aa0e 100644
|
|
--- a/arch/powerpc/include/asm/reg.h
|
|
+++ b/arch/powerpc/include/asm/reg.h
|
|
@@ -733,6 +733,8 @@
|
|
#define SRR1_PROGTRAP 0x00020000 /* Trap */
|
|
#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
|
|
|
|
+#define SRR1_MCE_MCP 0x00080000 /* Machine check signal caused interrupt */
|
|
+
|
|
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
|
|
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
|
|
#define HSRR1_DENORM 0x00100000 /* Denorm exception */
|
|
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
|
|
index 760872916013..da4b0e379238 100644
|
|
--- a/arch/powerpc/kernel/cputable.c
|
|
+++ b/arch/powerpc/kernel/cputable.c
|
|
@@ -1185,6 +1185,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|
.machine_check = machine_check_generic,
|
|
.platform = "ppc603",
|
|
},
|
|
+#ifdef CONFIG_PPC_83xx
|
|
{ /* e300c1 (a 603e core, plus some) on 83xx */
|
|
.pvr_mask = 0x7fff0000,
|
|
.pvr_value = 0x00830000,
|
|
@@ -1195,7 +1196,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|
.icache_bsize = 32,
|
|
.dcache_bsize = 32,
|
|
.cpu_setup = __setup_cpu_603,
|
|
- .machine_check = machine_check_generic,
|
|
+ .machine_check = machine_check_83xx,
|
|
.platform = "ppc603",
|
|
},
|
|
{ /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */
|
|
@@ -1209,7 +1210,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|
.icache_bsize = 32,
|
|
.dcache_bsize = 32,
|
|
.cpu_setup = __setup_cpu_603,
|
|
- .machine_check = machine_check_generic,
|
|
+ .machine_check = machine_check_83xx,
|
|
.platform = "ppc603",
|
|
},
|
|
{ /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */
|
|
@@ -1223,7 +1224,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|
.icache_bsize = 32,
|
|
.dcache_bsize = 32,
|
|
.cpu_setup = __setup_cpu_603,
|
|
- .machine_check = machine_check_generic,
|
|
+ .machine_check = machine_check_83xx,
|
|
.num_pmcs = 4,
|
|
.oprofile_cpu_type = "ppc/e300",
|
|
.oprofile_type = PPC_OPROFILE_FSL_EMB,
|
|
@@ -1240,12 +1241,13 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|
.icache_bsize = 32,
|
|
.dcache_bsize = 32,
|
|
.cpu_setup = __setup_cpu_603,
|
|
- .machine_check = machine_check_generic,
|
|
+ .machine_check = machine_check_83xx,
|
|
.num_pmcs = 4,
|
|
.oprofile_cpu_type = "ppc/e300",
|
|
.oprofile_type = PPC_OPROFILE_FSL_EMB,
|
|
.platform = "ppc603",
|
|
},
|
|
+#endif
|
|
{ /* default match, we assume split I/D cache & TB (non-601)... */
|
|
.pvr_mask = 0x00000000,
|
|
.pvr_value = 0x00000000,
|
|
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
|
|
index f83056297441..d96b28415090 100644
|
|
--- a/arch/powerpc/kernel/prom.c
|
|
+++ b/arch/powerpc/kernel/prom.c
|
|
@@ -128,7 +128,7 @@ static void __init move_device_tree(void)
|
|
p = __va(memblock_alloc(size, PAGE_SIZE));
|
|
memcpy(p, initial_boot_params, size);
|
|
initial_boot_params = p;
|
|
- DBG("Moved device tree to 0x%p\n", p);
|
|
+ DBG("Moved device tree to 0x%px\n", p);
|
|
}
|
|
|
|
DBG("<- move_device_tree\n");
|
|
@@ -662,7 +662,7 @@ void __init early_init_devtree(void *params)
|
|
{
|
|
phys_addr_t limit;
|
|
|
|
- DBG(" -> early_init_devtree(%p)\n", params);
|
|
+ DBG(" -> early_init_devtree(%px)\n", params);
|
|
|
|
/* Too early to BUG_ON(), do it by hand */
|
|
if (!early_init_dt_verify(params))
|
|
@@ -722,7 +722,7 @@ void __init early_init_devtree(void *params)
|
|
memblock_allow_resize();
|
|
memblock_dump_all();
|
|
|
|
- DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
|
|
+ DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
|
|
|
|
/* We may need to relocate the flat tree, do it now.
|
|
* FIXME .. and the initrd too? */
|
|
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
|
|
index 52863deed65d..5fc8a010fdf0 100644
|
|
--- a/arch/powerpc/mm/fault.c
|
|
+++ b/arch/powerpc/mm/fault.c
|
|
@@ -581,21 +581,22 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
|
|
switch (regs->trap) {
|
|
case 0x300:
|
|
case 0x380:
|
|
- printk(KERN_ALERT "Unable to handle kernel paging request for "
|
|
- "data at address 0x%08lx\n", regs->dar);
|
|
+ pr_alert("BUG: %s at 0x%08lx\n",
|
|
+ regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" :
|
|
+ "Unable to handle kernel data access", regs->dar);
|
|
break;
|
|
case 0x400:
|
|
case 0x480:
|
|
- printk(KERN_ALERT "Unable to handle kernel paging request for "
|
|
- "instruction fetch\n");
|
|
+ pr_alert("BUG: Unable to handle kernel instruction fetch%s",
|
|
+ regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
|
|
break;
|
|
case 0x600:
|
|
- printk(KERN_ALERT "Unable to handle kernel paging request for "
|
|
- "unaligned access at address 0x%08lx\n", regs->dar);
|
|
+ pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
|
|
+ regs->dar);
|
|
break;
|
|
default:
|
|
- printk(KERN_ALERT "Unable to handle kernel paging request for "
|
|
- "unknown fault\n");
|
|
+ pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
|
|
+ regs->dar);
|
|
break;
|
|
}
|
|
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
|
|
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
|
|
index 2a049fb8523d..96c52271e9c2 100644
|
|
--- a/arch/powerpc/mm/ppc_mmu_32.c
|
|
+++ b/arch/powerpc/mm/ppc_mmu_32.c
|
|
@@ -52,7 +52,7 @@ struct batrange { /* stores address ranges mapped by BATs */
|
|
phys_addr_t v_block_mapped(unsigned long va)
|
|
{
|
|
int b;
|
|
- for (b = 0; b < 4; ++b)
|
|
+ for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
|
|
if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
|
|
return bat_addrs[b].phys + (va - bat_addrs[b].start);
|
|
return 0;
|
|
@@ -64,7 +64,7 @@ phys_addr_t v_block_mapped(unsigned long va)
|
|
unsigned long p_block_mapped(phys_addr_t pa)
|
|
{
|
|
int b;
|
|
- for (b = 0; b < 4; ++b)
|
|
+ for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
|
|
if (pa >= bat_addrs[b].phys
|
|
&& pa < (bat_addrs[b].limit-bat_addrs[b].start)
|
|
+bat_addrs[b].phys)
|
|
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
|
|
index cf9c35aa0cf4..7ecea7143e58 100644
|
|
--- a/arch/powerpc/perf/isa207-common.c
|
|
+++ b/arch/powerpc/perf/isa207-common.c
|
|
@@ -150,6 +150,14 @@ static bool is_thresh_cmp_valid(u64 event)
|
|
return true;
|
|
}
|
|
|
|
+static unsigned int dc_ic_rld_quad_l1_sel(u64 event)
|
|
+{
|
|
+ unsigned int cache;
|
|
+
|
|
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK;
|
|
+ return cache;
|
|
+}
|
|
+
|
|
static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
|
|
{
|
|
u64 ret = PERF_MEM_NA;
|
|
@@ -290,10 +298,10 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
|
* have a cache selector of zero. The bank selector (bit 3) is
|
|
* irrelevant, as long as the rest of the value is 0.
|
|
*/
|
|
- if (cache & 0x7)
|
|
+ if (!cpu_has_feature(CPU_FTR_ARCH_300) && (cache & 0x7))
|
|
return -1;
|
|
|
|
- } else if (event & EVENT_IS_L1) {
|
|
+ } else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) {
|
|
mask |= CNST_L1_QUAL_MASK;
|
|
value |= CNST_L1_QUAL_VAL(cache);
|
|
}
|
|
@@ -396,11 +404,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
|
|
/* In continuous sampling mode, update SDAR on TLB miss */
|
|
mmcra_sdar_mode(event[i], &mmcra);
|
|
|
|
- if (event[i] & EVENT_IS_L1) {
|
|
- cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
|
|
- mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
|
|
- cache >>= 1;
|
|
- mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
|
+ cache = dc_ic_rld_quad_l1_sel(event[i]);
|
|
+ mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
|
|
+ } else {
|
|
+ if (event[i] & EVENT_IS_L1) {
|
|
+ cache = dc_ic_rld_quad_l1_sel(event[i]);
|
|
+ mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
|
|
+ }
|
|
}
|
|
|
|
if (is_event_marked(event[i])) {
|
|
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
|
|
index 6c737d675792..493e5cc5fa8a 100644
|
|
--- a/arch/powerpc/perf/isa207-common.h
|
|
+++ b/arch/powerpc/perf/isa207-common.h
|
|
@@ -232,8 +232,8 @@
|
|
#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
|
|
#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
|
|
#define MMCR1_FAB_SHIFT 36
|
|
-#define MMCR1_DC_QUAL_SHIFT 47
|
|
-#define MMCR1_IC_QUAL_SHIFT 46
|
|
+#define MMCR1_DC_IC_QUAL_MASK 0x3
|
|
+#define MMCR1_DC_IC_QUAL_SHIFT 46
|
|
|
|
/* MMCR1 Combine bits macro for power9 */
|
|
#define p9_MMCR1_COMBINE_SHIFT(pmc) (38 - ((pmc - 1) * 2))
|
|
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
|
|
index d75c9816a5c9..2b6589fe812d 100644
|
|
--- a/arch/powerpc/platforms/83xx/misc.c
|
|
+++ b/arch/powerpc/platforms/83xx/misc.c
|
|
@@ -14,6 +14,7 @@
|
|
#include <linux/of_platform.h>
|
|
#include <linux/pci.h>
|
|
|
|
+#include <asm/debug.h>
|
|
#include <asm/io.h>
|
|
#include <asm/hw_irq.h>
|
|
#include <asm/ipic.h>
|
|
@@ -150,3 +151,19 @@ void __init mpc83xx_setup_arch(void)
|
|
|
|
mpc83xx_setup_pci();
|
|
}
|
|
+
|
|
+int machine_check_83xx(struct pt_regs *regs)
|
|
+{
|
|
+ u32 mask = 1 << (31 - IPIC_MCP_WDT);
|
|
+
|
|
+ if (!(regs->msr & SRR1_MCE_MCP) || !(ipic_get_mcp_status() & mask))
|
|
+ return machine_check_generic(regs);
|
|
+ ipic_clear_mcp_status(mask);
|
|
+
|
|
+ if (debugger_fault_handler(regs))
|
|
+ return 1;
|
|
+
|
|
+ die("Watchdog NMI Reset", regs, 0);
|
|
+
|
|
+ return 1;
|
|
+}
|
|
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
|
|
index 8864065eba22..fa2965c96155 100644
|
|
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
|
|
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
|
|
@@ -548,8 +548,8 @@ static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
|
|
static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
|
|
{
|
|
struct pnv_phb *phb = pe->phb->private_data;
|
|
- u8 fstate;
|
|
- __be16 pcierr;
|
|
+ u8 fstate = 0;
|
|
+ __be16 pcierr = 0;
|
|
s64 rc;
|
|
int result = 0;
|
|
|
|
@@ -587,8 +587,8 @@ static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
|
|
static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
|
|
{
|
|
struct pnv_phb *phb = pe->phb->private_data;
|
|
- u8 fstate;
|
|
- __be16 pcierr;
|
|
+ u8 fstate = 0;
|
|
+ __be16 pcierr = 0;
|
|
s64 rc;
|
|
int result;
|
|
|
|
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
index ddef22e00ddd..d3d5796f7df6 100644
|
|
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
@@ -598,8 +598,8 @@ static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
|
|
static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
|
|
{
|
|
struct pnv_ioda_pe *slave, *pe;
|
|
- u8 fstate, state;
|
|
- __be16 pcierr;
|
|
+ u8 fstate = 0, state;
|
|
+ __be16 pcierr = 0;
|
|
s64 rc;
|
|
|
|
/* Sanity check on PE number */
|
|
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
|
|
index 5422f4a6317c..e2d031a3ec15 100644
|
|
--- a/arch/powerpc/platforms/powernv/pci.c
|
|
+++ b/arch/powerpc/platforms/powernv/pci.c
|
|
@@ -600,8 +600,8 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
|
|
static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
|
|
{
|
|
struct pnv_phb *phb = pdn->phb->private_data;
|
|
- u8 fstate;
|
|
- __be16 pcierr;
|
|
+ u8 fstate = 0;
|
|
+ __be16 pcierr = 0;
|
|
unsigned int pe_no;
|
|
s64 rc;
|
|
|
|
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
|
|
index f4e6565dd7a9..fb2876a84fbe 100644
|
|
--- a/arch/powerpc/platforms/pseries/dlpar.c
|
|
+++ b/arch/powerpc/platforms/pseries/dlpar.c
|
|
@@ -63,6 +63,10 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
|
|
|
|
name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
|
|
prop->name = kstrdup(name, GFP_KERNEL);
|
|
+ if (!prop->name) {
|
|
+ dlpar_free_cc_property(prop);
|
|
+ return NULL;
|
|
+ }
|
|
|
|
prop->length = be32_to_cpu(ccwa->prop_length);
|
|
value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
|
|
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
index 99a3cf51c5ba..fdfce7a46d73 100644
|
|
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
@@ -295,6 +295,7 @@ static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
|
|
|
|
aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
|
|
|
|
+ of_node_put(dr_node);
|
|
dlpar_free_cc_nodes(lmb_node);
|
|
return aa_index;
|
|
}
|
|
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
|
|
index 5a739588aa50..51a53fd51722 100644
|
|
--- a/arch/powerpc/xmon/xmon.c
|
|
+++ b/arch/powerpc/xmon/xmon.c
|
|
@@ -3293,7 +3293,7 @@ void dump_segments(void)
|
|
|
|
printf("sr0-15 =");
|
|
for (i = 0; i < 16; ++i)
|
|
- printf(" %x", mfsrin(i));
|
|
+ printf(" %x", mfsrin(i << 28));
|
|
printf("\n");
|
|
}
|
|
#endif
|
|
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
|
|
index ff62a4fe2159..91c24e87fe10 100644
|
|
--- a/arch/s390/kvm/kvm-s390.c
|
|
+++ b/arch/s390/kvm/kvm-s390.c
|
|
@@ -361,19 +361,30 @@ static void kvm_s390_cpu_feat_init(void)
|
|
|
|
int kvm_arch_init(void *opaque)
|
|
{
|
|
+ int rc;
|
|
+
|
|
kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
|
|
if (!kvm_s390_dbf)
|
|
return -ENOMEM;
|
|
|
|
if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
|
|
- debug_unregister(kvm_s390_dbf);
|
|
- return -ENOMEM;
|
|
+ rc = -ENOMEM;
|
|
+ goto out_debug_unreg;
|
|
}
|
|
|
|
kvm_s390_cpu_feat_init();
|
|
|
|
/* Register floating interrupt controller interface. */
|
|
- return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
|
|
+ rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
|
|
+ if (rc) {
|
|
+ pr_err("Failed to register FLIC rc=%d\n", rc);
|
|
+ goto out_debug_unreg;
|
|
+ }
|
|
+ return 0;
|
|
+
|
|
+out_debug_unreg:
|
|
+ debug_unregister(kvm_s390_dbf);
|
|
+ return rc;
|
|
}
|
|
|
|
void kvm_arch_exit(void)
|
|
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
|
|
index 05c8abd864f1..9bce54eac0b0 100644
|
|
--- a/arch/s390/mm/gup.c
|
|
+++ b/arch/s390/mm/gup.c
|
|
@@ -39,7 +39,8 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
|
|
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
|
page = pte_page(pte);
|
|
head = compound_head(page);
|
|
- if (!page_cache_get_speculative(head))
|
|
+ if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
|
|
+ || !page_cache_get_speculative(head)))
|
|
return 0;
|
|
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
|
put_page(head);
|
|
@@ -77,7 +78,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
|
|
refs++;
|
|
} while (addr += PAGE_SIZE, addr != end);
|
|
|
|
- if (!page_cache_add_speculative(head, refs)) {
|
|
+ if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
|
|
+ || !page_cache_add_speculative(head, refs))) {
|
|
*nr -= refs;
|
|
return 0;
|
|
}
|
|
@@ -151,7 +153,8 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
|
|
refs++;
|
|
} while (addr += PAGE_SIZE, addr != end);
|
|
|
|
- if (!page_cache_add_speculative(head, refs)) {
|
|
+ if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
|
|
+ || !page_cache_add_speculative(head, refs))) {
|
|
*nr -= refs;
|
|
return 0;
|
|
}
|
|
diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
|
|
index 967d3109689f..39d44bfb241d 100644
|
|
--- a/arch/um/Kconfig.debug
|
|
+++ b/arch/um/Kconfig.debug
|
|
@@ -19,6 +19,7 @@ config GPROF
|
|
config GCOV
|
|
bool "Enable gcov support"
|
|
depends on DEBUG_INFO
|
|
+ depends on !KCOV
|
|
help
|
|
This option allows developers to retrieve coverage data from a UML
|
|
session.
|
|
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
|
|
index f6ea94f8954a..f892cb0b485e 100644
|
|
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
|
|
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
|
|
@@ -313,6 +313,10 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
|
int ret = 0;
|
|
|
|
rdtgrp = rdtgroup_kn_lock_live(of->kn);
|
|
+ if (!rdtgrp) {
|
|
+ ret = -ENOENT;
|
|
+ goto out;
|
|
+ }
|
|
|
|
md.priv = of->kn->priv;
|
|
resid = md.u.rid;
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index f67fc0f359ff..c579cda1721e 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -2818,9 +2818,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
|
|
index = __find_msr_index(vmx, MSR_CSTAR);
|
|
if (index >= 0)
|
|
move_msr_up(vmx, index, save_nmsrs++);
|
|
- index = __find_msr_index(vmx, MSR_TSC_AUX);
|
|
- if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
|
|
- move_msr_up(vmx, index, save_nmsrs++);
|
|
/*
|
|
* MSR_STAR is only needed on long mode guests, and only
|
|
* if efer.sce is enabled.
|
|
@@ -2833,6 +2830,9 @@ static void setup_msrs(struct vcpu_vmx *vmx)
|
|
index = __find_msr_index(vmx, MSR_EFER);
|
|
if (index >= 0 && update_transition_efer(vmx, index))
|
|
move_msr_up(vmx, index, save_nmsrs++);
|
|
+ index = __find_msr_index(vmx, MSR_TSC_AUX);
|
|
+ if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
|
|
+ move_msr_up(vmx, index, save_nmsrs++);
|
|
|
|
vmx->save_nmsrs = save_nmsrs;
|
|
|
|
@@ -10000,10 +10000,6 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
|
|
vmx_vcpu_load(vcpu, cpu);
|
|
vcpu->cpu = cpu;
|
|
put_cpu();
|
|
-
|
|
- vm_entry_controls_reset_shadow(vmx);
|
|
- vm_exit_controls_reset_shadow(vmx);
|
|
- vmx_segment_cache_clear(vmx);
|
|
}
|
|
|
|
/*
|
|
@@ -11432,6 +11428,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
|
|
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
|
|
|
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
|
|
+ vmx_segment_cache_clear(vmx);
|
|
|
|
if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) {
|
|
leave_guest_mode(vcpu);
|
|
@@ -12175,6 +12172,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|
}
|
|
|
|
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
|
+ vm_entry_controls_reset_shadow(vmx);
|
|
+ vm_exit_controls_reset_shadow(vmx);
|
|
+ vmx_segment_cache_clear(vmx);
|
|
|
|
/* Update any VMCS fields that might have changed while L2 ran */
|
|
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
|
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
|
|
index 3a6feed76dfc..a93d8a7cef26 100644
|
|
--- a/arch/x86/xen/xen-asm_64.S
|
|
+++ b/arch/x86/xen/xen-asm_64.S
|
|
@@ -12,6 +12,7 @@
|
|
#include <asm/segment.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/thread_info.h>
|
|
+#include <asm/asm.h>
|
|
|
|
#include <xen/interface/xen.h>
|
|
|
|
@@ -24,6 +25,7 @@ ENTRY(xen_\name)
|
|
pop %r11
|
|
jmp \name
|
|
END(xen_\name)
|
|
+_ASM_NOKPROBE(xen_\name)
|
|
.endm
|
|
|
|
xen_pv_trap divide_error
|
|
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
|
|
index 2b8fb8f1391e..5e457a7dd1c9 100644
|
|
--- a/crypto/crypto_user.c
|
|
+++ b/crypto/crypto_user.c
|
|
@@ -296,30 +296,33 @@ drop_alg:
|
|
|
|
static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
|
|
{
|
|
- struct crypto_alg *alg;
|
|
+ const size_t start_pos = cb->args[0];
|
|
+ size_t pos = 0;
|
|
struct crypto_dump_info info;
|
|
- int err;
|
|
-
|
|
- if (cb->args[0])
|
|
- goto out;
|
|
-
|
|
- cb->args[0] = 1;
|
|
+ struct crypto_alg *alg;
|
|
+ int res;
|
|
|
|
info.in_skb = cb->skb;
|
|
info.out_skb = skb;
|
|
info.nlmsg_seq = cb->nlh->nlmsg_seq;
|
|
info.nlmsg_flags = NLM_F_MULTI;
|
|
|
|
+ down_read(&crypto_alg_sem);
|
|
list_for_each_entry(alg, &crypto_alg_list, cra_list) {
|
|
- err = crypto_report_alg(alg, &info);
|
|
- if (err)
|
|
- goto out_err;
|
|
+ if (pos >= start_pos) {
|
|
+ res = crypto_report_alg(alg, &info);
|
|
+ if (res == -EMSGSIZE)
|
|
+ break;
|
|
+ if (res)
|
|
+ goto out;
|
|
+ }
|
|
+ pos++;
|
|
}
|
|
-
|
|
+ cb->args[0] = pos;
|
|
+ res = skb->len;
|
|
out:
|
|
- return skb->len;
|
|
-out_err:
|
|
- return err;
|
|
+ up_read(&crypto_alg_sem);
|
|
+ return res;
|
|
}
|
|
|
|
static int crypto_dump_report_done(struct netlink_callback *cb)
|
|
@@ -503,7 +506,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
|
|
(nlh->nlmsg_flags & NLM_F_DUMP))) {
|
|
struct crypto_alg *alg;
|
|
- u16 dump_alloc = 0;
|
|
+ unsigned long dump_alloc = 0;
|
|
|
|
if (link->dump == NULL)
|
|
return -EINVAL;
|
|
@@ -511,16 +514,16 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
down_read(&crypto_alg_sem);
|
|
list_for_each_entry(alg, &crypto_alg_list, cra_list)
|
|
dump_alloc += CRYPTO_REPORT_MAXSIZE;
|
|
+ up_read(&crypto_alg_sem);
|
|
|
|
{
|
|
struct netlink_dump_control c = {
|
|
.dump = link->dump,
|
|
.done = link->done,
|
|
- .min_dump_alloc = dump_alloc,
|
|
+ .min_dump_alloc = min(dump_alloc, 65535UL),
|
|
};
|
|
err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
|
|
}
|
|
- up_read(&crypto_alg_sem);
|
|
|
|
return err;
|
|
}
|
|
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
|
|
index 1ab8d7223b25..84b1d30f699c 100644
|
|
--- a/drivers/acpi/acpi_lpss.c
|
|
+++ b/drivers/acpi/acpi_lpss.c
|
|
@@ -518,12 +518,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
|
|
* have _PS0 and _PS3 without _PSC (and no power resources), so
|
|
* acpi_bus_init_power() will assume that the BIOS has put them into D0.
|
|
*/
|
|
- ret = acpi_device_fix_up_power(adev);
|
|
- if (ret) {
|
|
- /* Skip the device, but continue the namespace scan. */
|
|
- ret = 0;
|
|
- goto err_out;
|
|
- }
|
|
+ acpi_device_fix_up_power(adev);
|
|
|
|
adev->driver_data = pdata;
|
|
pdev = acpi_create_platform_device(adev, dev_desc->properties);
|
|
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
|
|
index 5889f6407fea..cd6fae6ad4c2 100644
|
|
--- a/drivers/acpi/apei/ghes.c
|
|
+++ b/drivers/acpi/apei/ghes.c
|
|
@@ -33,7 +33,6 @@
|
|
#include <linux/interrupt.h>
|
|
#include <linux/timer.h>
|
|
#include <linux/cper.h>
|
|
-#include <linux/kdebug.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/ratelimit.h>
|
|
@@ -171,40 +170,40 @@ static int ghes_estatus_pool_init(void)
|
|
return 0;
|
|
}
|
|
|
|
-static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
|
|
+static void ghes_estatus_pool_free_chunk(struct gen_pool *pool,
|
|
struct gen_pool_chunk *chunk,
|
|
void *data)
|
|
{
|
|
- free_page(chunk->start_addr);
|
|
+ vfree((void *)chunk->start_addr);
|
|
}
|
|
|
|
static void ghes_estatus_pool_exit(void)
|
|
{
|
|
gen_pool_for_each_chunk(ghes_estatus_pool,
|
|
- ghes_estatus_pool_free_chunk_page, NULL);
|
|
+ ghes_estatus_pool_free_chunk, NULL);
|
|
gen_pool_destroy(ghes_estatus_pool);
|
|
}
|
|
|
|
static int ghes_estatus_pool_expand(unsigned long len)
|
|
{
|
|
- unsigned long i, pages, size, addr;
|
|
- int ret;
|
|
+ unsigned long size, addr;
|
|
|
|
ghes_estatus_pool_size_request += PAGE_ALIGN(len);
|
|
size = gen_pool_size(ghes_estatus_pool);
|
|
if (size >= ghes_estatus_pool_size_request)
|
|
return 0;
|
|
- pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
|
|
- for (i = 0; i < pages; i++) {
|
|
- addr = __get_free_page(GFP_KERNEL);
|
|
- if (!addr)
|
|
- return -ENOMEM;
|
|
- ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
|
|
- return 0;
|
|
+ addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
|
|
+ if (!addr)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ /*
|
|
+ * New allocation must be visible in all pgd before it can be found by
|
|
+ * an NMI allocating from the pool.
|
|
+ */
|
|
+ vmalloc_sync_all();
|
|
+
|
|
+ return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
|
|
}
|
|
|
|
static int map_gen_v2(struct ghes *ghes)
|
|
@@ -936,7 +935,6 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
|
|
|
|
sev = ghes_severity(ghes->estatus->error_severity);
|
|
if (sev >= GHES_SEV_PANIC) {
|
|
- oops_begin();
|
|
ghes_print_queued_estatus();
|
|
__ghes_panic(ghes);
|
|
}
|
|
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
|
|
index 9045c5f3734e..f1105de0d9fe 100644
|
|
--- a/drivers/base/platform.c
|
|
+++ b/drivers/base/platform.c
|
|
@@ -27,6 +27,7 @@
|
|
#include <linux/clk/clk-conf.h>
|
|
#include <linux/limits.h>
|
|
#include <linux/property.h>
|
|
+#include <linux/kmemleak.h>
|
|
|
|
#include "base.h"
|
|
#include "power/power.h"
|
|
@@ -526,6 +527,8 @@ struct platform_device *platform_device_register_full(
|
|
if (!pdev->dev.dma_mask)
|
|
goto err;
|
|
|
|
+ kmemleak_ignore(pdev->dev.dma_mask);
|
|
+
|
|
*pdev->dev.dma_mask = pdevinfo->dma_mask;
|
|
pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
|
|
}
|
|
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
|
|
index 8cb3791898ae..7ea13b5497fd 100644
|
|
--- a/drivers/block/drbd/drbd_main.c
|
|
+++ b/drivers/block/drbd/drbd_main.c
|
|
@@ -795,7 +795,6 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm
|
|
|
|
if (nc->tentative && connection->agreed_pro_version < 92) {
|
|
rcu_read_unlock();
|
|
- mutex_unlock(&sock->mutex);
|
|
drbd_err(connection, "--dry-run is not supported by peer");
|
|
return -EOPNOTSUPP;
|
|
}
|
|
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
|
|
index ad13ec66c8e4..31d7fe4480af 100644
|
|
--- a/drivers/block/drbd/drbd_nl.c
|
|
+++ b/drivers/block/drbd/drbd_nl.c
|
|
@@ -1515,6 +1515,30 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis
|
|
}
|
|
}
|
|
|
|
+static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
|
|
+{
|
|
+ int err = -EBUSY;
|
|
+
|
|
+ if (device->act_log &&
|
|
+ device->act_log->nr_elements == dc->al_extents)
|
|
+ return 0;
|
|
+
|
|
+ drbd_suspend_io(device);
|
|
+ /* If IO completion is currently blocked, we would likely wait
|
|
+ * "forever" for the activity log to become unused. So we don't. */
|
|
+ if (atomic_read(&device->ap_bio_cnt))
|
|
+ goto out;
|
|
+
|
|
+ wait_event(device->al_wait, lc_try_lock(device->act_log));
|
|
+ drbd_al_shrink(device);
|
|
+ err = drbd_check_al_size(device, dc);
|
|
+ lc_unlock(device->act_log);
|
|
+ wake_up(&device->al_wait);
|
|
+out:
|
|
+ drbd_resume_io(device);
|
|
+ return err;
|
|
+}
|
|
+
|
|
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
|
|
{
|
|
struct drbd_config_context adm_ctx;
|
|
@@ -1577,15 +1601,12 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
|
|
}
|
|
}
|
|
|
|
- drbd_suspend_io(device);
|
|
- wait_event(device->al_wait, lc_try_lock(device->act_log));
|
|
- drbd_al_shrink(device);
|
|
- err = drbd_check_al_size(device, new_disk_conf);
|
|
- lc_unlock(device->act_log);
|
|
- wake_up(&device->al_wait);
|
|
- drbd_resume_io(device);
|
|
-
|
|
+ err = disk_opts_check_al_size(device, new_disk_conf);
|
|
if (err) {
|
|
+ /* Could be just "busy". Ignore?
|
|
+ * Introduce dedicated error code? */
|
|
+ drbd_msg_put_info(adm_ctx.reply_skb,
|
|
+ "Try again without changing current al-extents setting");
|
|
retcode = ERR_NOMEM;
|
|
goto fail_unlock;
|
|
}
|
|
@@ -1935,9 +1956,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
|
|
}
|
|
}
|
|
|
|
- if (device->state.conn < C_CONNECTED &&
|
|
- device->state.role == R_PRIMARY && device->ed_uuid &&
|
|
- (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
|
|
+ if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
|
|
+ (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
|
|
+ (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
|
|
drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
|
|
(unsigned long long)device->ed_uuid);
|
|
retcode = ERR_DATA_NOT_CURRENT;
|
|
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
|
|
index 8fbdfaacc222..a7c180426c60 100644
|
|
--- a/drivers/block/drbd/drbd_receiver.c
|
|
+++ b/drivers/block/drbd/drbd_receiver.c
|
|
@@ -3977,6 +3977,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
|
|
struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
|
|
enum determine_dev_size dd = DS_UNCHANGED;
|
|
sector_t p_size, p_usize, p_csize, my_usize;
|
|
+ sector_t new_size, cur_size;
|
|
int ldsc = 0; /* local disk size changed */
|
|
enum dds_flags ddsf;
|
|
|
|
@@ -3984,6 +3985,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
|
|
if (!peer_device)
|
|
return config_unknown_volume(connection, pi);
|
|
device = peer_device->device;
|
|
+ cur_size = drbd_get_capacity(device->this_bdev);
|
|
|
|
p_size = be64_to_cpu(p->d_size);
|
|
p_usize = be64_to_cpu(p->u_size);
|
|
@@ -3994,7 +3996,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
|
|
device->p_size = p_size;
|
|
|
|
if (get_ldev(device)) {
|
|
- sector_t new_size, cur_size;
|
|
rcu_read_lock();
|
|
my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
|
|
rcu_read_unlock();
|
|
@@ -4012,7 +4013,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
|
|
/* Never shrink a device with usable data during connect.
|
|
But allow online shrinking if we are connected. */
|
|
new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
|
|
- cur_size = drbd_get_capacity(device->this_bdev);
|
|
if (new_size < cur_size &&
|
|
device->state.disk >= D_OUTDATED &&
|
|
device->state.conn < C_CONNECTED) {
|
|
@@ -4077,9 +4077,36 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
|
|
*
|
|
* However, if he sends a zero current size,
|
|
* take his (user-capped or) backing disk size anyways.
|
|
+ *
|
|
+ * Unless of course he does not have a disk himself.
|
|
+ * In which case we ignore this completely.
|
|
*/
|
|
+ sector_t new_size = p_csize ?: p_usize ?: p_size;
|
|
drbd_reconsider_queue_parameters(device, NULL, o);
|
|
- drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size);
|
|
+ if (new_size == 0) {
|
|
+ /* Ignore, peer does not know nothing. */
|
|
+ } else if (new_size == cur_size) {
|
|
+ /* nothing to do */
|
|
+ } else if (cur_size != 0 && p_size == 0) {
|
|
+ drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n",
|
|
+ (unsigned long long)new_size, (unsigned long long)cur_size);
|
|
+ } else if (new_size < cur_size && device->state.role == R_PRIMARY) {
|
|
+ drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n",
|
|
+ (unsigned long long)new_size, (unsigned long long)cur_size);
|
|
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
|
|
+ return -EIO;
|
|
+ } else {
|
|
+ /* I believe the peer, if
|
|
+ * - I don't have a current size myself
|
|
+ * - we agree on the size anyways
|
|
+ * - I do have a current size, am Secondary,
|
|
+ * and he has the only disk
|
|
+ * - I do have a current size, am Primary,
|
|
+ * and he has the only disk,
|
|
+ * which is larger than my current size
|
|
+ */
|
|
+ drbd_set_my_capacity(device, new_size);
|
|
+ }
|
|
}
|
|
|
|
if (get_ldev(device)) {
|
|
@@ -4365,6 +4392,25 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
|
|
if (peer_state.conn == C_AHEAD)
|
|
ns.conn = C_BEHIND;
|
|
|
|
+ /* TODO:
|
|
+ * if (primary and diskless and peer uuid != effective uuid)
|
|
+ * abort attach on peer;
|
|
+ *
|
|
+ * If this node does not have good data, was already connected, but
|
|
+ * the peer did a late attach only now, trying to "negotiate" with me,
|
|
+ * AND I am currently Primary, possibly frozen, with some specific
|
|
+ * "effective" uuid, this should never be reached, really, because
|
|
+ * we first send the uuids, then the current state.
|
|
+ *
|
|
+ * In this scenario, we already dropped the connection hard
|
|
+ * when we received the unsuitable uuids (receive_uuids().
|
|
+ *
|
|
+ * Should we want to change this, that is: not drop the connection in
|
|
+ * receive_uuids() already, then we would need to add a branch here
|
|
+ * that aborts the attach of "unsuitable uuids" on the peer in case
|
|
+ * this node is currently Diskless Primary.
|
|
+ */
|
|
+
|
|
if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
|
|
get_ldev_if_state(device, D_NEGOTIATING)) {
|
|
int cr; /* consider resync */
|
|
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
|
|
index ea58301d0895..b2a390ba73a0 100644
|
|
--- a/drivers/block/drbd/drbd_state.h
|
|
+++ b/drivers/block/drbd/drbd_state.h
|
|
@@ -131,7 +131,7 @@ extern enum drbd_state_rv _drbd_set_state(struct drbd_device *, union drbd_state
|
|
enum chg_state_flags,
|
|
struct completion *done);
|
|
extern void print_st_err(struct drbd_device *, union drbd_state,
|
|
- union drbd_state, int);
|
|
+ union drbd_state, enum drbd_state_rv);
|
|
|
|
enum drbd_state_rv
|
|
_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
|
|
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
|
|
index 6d41b2023f09..61971ddbd231 100644
|
|
--- a/drivers/bluetooth/hci_bcm.c
|
|
+++ b/drivers/bluetooth/hci_bcm.c
|
|
@@ -50,6 +50,12 @@
|
|
#define BCM_LM_DIAG_PKT 0x07
|
|
#define BCM_LM_DIAG_SIZE 63
|
|
|
|
+#define BCM_TYPE49_PKT 0x31
|
|
+#define BCM_TYPE49_SIZE 0
|
|
+
|
|
+#define BCM_TYPE52_PKT 0x34
|
|
+#define BCM_TYPE52_SIZE 0
|
|
+
|
|
#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */
|
|
|
|
/* platform device driver resources */
|
|
@@ -483,12 +489,28 @@ finalize:
|
|
.lsize = 0, \
|
|
.maxlen = BCM_NULL_SIZE
|
|
|
|
+#define BCM_RECV_TYPE49 \
|
|
+ .type = BCM_TYPE49_PKT, \
|
|
+ .hlen = BCM_TYPE49_SIZE, \
|
|
+ .loff = 0, \
|
|
+ .lsize = 0, \
|
|
+ .maxlen = BCM_TYPE49_SIZE
|
|
+
|
|
+#define BCM_RECV_TYPE52 \
|
|
+ .type = BCM_TYPE52_PKT, \
|
|
+ .hlen = BCM_TYPE52_SIZE, \
|
|
+ .loff = 0, \
|
|
+ .lsize = 0, \
|
|
+ .maxlen = BCM_TYPE52_SIZE
|
|
+
|
|
static const struct h4_recv_pkt bcm_recv_pkts[] = {
|
|
{ H4_RECV_ACL, .recv = hci_recv_frame },
|
|
{ H4_RECV_SCO, .recv = hci_recv_frame },
|
|
{ H4_RECV_EVENT, .recv = hci_recv_frame },
|
|
{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag },
|
|
{ BCM_RECV_NULL, .recv = hci_recv_diag },
|
|
+ { BCM_RECV_TYPE49, .recv = hci_recv_diag },
|
|
+ { BCM_RECV_TYPE52, .recv = hci_recv_diag },
|
|
};
|
|
|
|
static int bcm_recv(struct hci_uart *hu, const void *data, int count)
|
|
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
|
|
index 83c695938a2d..f53d47e3355d 100644
|
|
--- a/drivers/char/hw_random/stm32-rng.c
|
|
+++ b/drivers/char/hw_random/stm32-rng.c
|
|
@@ -166,6 +166,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
|
|
return devm_hwrng_register(dev, &priv->rng);
|
|
}
|
|
|
|
+static int stm32_rng_remove(struct platform_device *ofdev)
|
|
+{
|
|
+ pm_runtime_disable(&ofdev->dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
#ifdef CONFIG_PM
|
|
static int stm32_rng_runtime_suspend(struct device *dev)
|
|
{
|
|
@@ -202,6 +209,7 @@ static struct platform_driver stm32_rng_driver = {
|
|
.of_match_table = stm32_rng_match,
|
|
},
|
|
.probe = stm32_rng_probe,
|
|
+ .remove = stm32_rng_remove,
|
|
};
|
|
|
|
module_platform_driver(stm32_rng_driver);
|
|
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
|
|
index 113152425a95..ea23002be4de 100644
|
|
--- a/drivers/clk/at91/clk-generated.c
|
|
+++ b/drivers/clk/at91/clk-generated.c
|
|
@@ -284,7 +284,7 @@ static void clk_generated_startup(struct clk_generated *gck)
|
|
static struct clk_hw * __init
|
|
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
|
|
const char *name, const char **parent_names,
|
|
- u8 num_parents, u8 id,
|
|
+ u8 num_parents, u8 id, bool pll_audio,
|
|
const struct clk_range *range)
|
|
{
|
|
struct clk_generated *gck;
|
|
@@ -308,6 +308,7 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
|
|
gck->regmap = regmap;
|
|
gck->lock = lock;
|
|
gck->range = *range;
|
|
+ gck->audio_pll_allowed = pll_audio;
|
|
|
|
clk_generated_startup(gck);
|
|
hw = &gck->hw;
|
|
@@ -333,7 +334,6 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
|
|
struct device_node *gcknp;
|
|
struct clk_range range = CLK_RANGE(0, 0);
|
|
struct regmap *regmap;
|
|
- struct clk_generated *gck;
|
|
|
|
num_parents = of_clk_get_parent_count(np);
|
|
if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX)
|
|
@@ -350,6 +350,8 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
|
|
return;
|
|
|
|
for_each_child_of_node(np, gcknp) {
|
|
+ bool pll_audio = false;
|
|
+
|
|
if (of_property_read_u32(gcknp, "reg", &id))
|
|
continue;
|
|
|
|
@@ -362,24 +364,14 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
|
|
of_at91_get_clk_range(gcknp, "atmel,clk-output-range",
|
|
&range);
|
|
|
|
+ if (of_device_is_compatible(np, "atmel,sama5d2-clk-generated") &&
|
|
+ (id == GCK_ID_I2S0 || id == GCK_ID_I2S1 ||
|
|
+ id == GCK_ID_CLASSD))
|
|
+ pll_audio = true;
|
|
+
|
|
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
|
|
parent_names, num_parents,
|
|
- id, &range);
|
|
-
|
|
- gck = to_clk_generated(hw);
|
|
-
|
|
- if (of_device_is_compatible(np,
|
|
- "atmel,sama5d2-clk-generated")) {
|
|
- if (gck->id == GCK_ID_SSC0 || gck->id == GCK_ID_SSC1 ||
|
|
- gck->id == GCK_ID_I2S0 || gck->id == GCK_ID_I2S1 ||
|
|
- gck->id == GCK_ID_CLASSD)
|
|
- gck->audio_pll_allowed = true;
|
|
- else
|
|
- gck->audio_pll_allowed = false;
|
|
- } else {
|
|
- gck->audio_pll_allowed = false;
|
|
- }
|
|
-
|
|
+ id, pll_audio, &range);
|
|
if (IS_ERR(hw))
|
|
continue;
|
|
|
|
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
|
|
index 2f97a843d6d6..90988e7a5b47 100644
|
|
--- a/drivers/clk/at91/clk-main.c
|
|
+++ b/drivers/clk/at91/clk-main.c
|
|
@@ -162,7 +162,7 @@ at91_clk_register_main_osc(struct regmap *regmap,
|
|
if (bypass)
|
|
regmap_update_bits(regmap,
|
|
AT91_CKGR_MOR, MOR_KEY_MASK |
|
|
- AT91_PMC_MOSCEN,
|
|
+ AT91_PMC_OSCBYPASS,
|
|
AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
|
|
|
|
hw = &osc->hw;
|
|
@@ -354,7 +354,10 @@ static int clk_main_probe_frequency(struct regmap *regmap)
|
|
regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
|
|
if (mcfr & AT91_PMC_MAINRDY)
|
|
return 0;
|
|
- usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
|
|
+ if (system_state < SYSTEM_RUNNING)
|
|
+ udelay(MAINF_LOOP_MIN_WAIT);
|
|
+ else
|
|
+ usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
|
|
} while (time_before(prep_time, timeout));
|
|
|
|
return -ETIMEDOUT;
|
|
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
|
|
index ab6ecefc49ad..43ba2a8b03fa 100644
|
|
--- a/drivers/clk/at91/sckc.c
|
|
+++ b/drivers/clk/at91/sckc.c
|
|
@@ -74,7 +74,10 @@ static int clk_slow_osc_prepare(struct clk_hw *hw)
|
|
|
|
writel(tmp | AT91_SCKC_OSC32EN, sckcr);
|
|
|
|
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
|
|
+ if (system_state < SYSTEM_RUNNING)
|
|
+ udelay(osc->startup_usec);
|
|
+ else
|
|
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
|
|
|
|
return 0;
|
|
}
|
|
@@ -197,7 +200,10 @@ static int clk_slow_rc_osc_prepare(struct clk_hw *hw)
|
|
|
|
writel(readl(sckcr) | AT91_SCKC_RCEN, sckcr);
|
|
|
|
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
|
|
+ if (system_state < SYSTEM_RUNNING)
|
|
+ udelay(osc->startup_usec);
|
|
+ else
|
|
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
|
|
|
|
return 0;
|
|
}
|
|
@@ -310,7 +316,10 @@ static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
|
|
|
|
writel(tmp, sckcr);
|
|
|
|
- usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
|
|
+ if (system_state < SYSTEM_RUNNING)
|
|
+ udelay(SLOWCK_SW_TIME_USEC);
|
|
+ else
|
|
+ usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
|
|
|
|
return 0;
|
|
}
|
|
@@ -443,7 +452,10 @@ static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw)
|
|
return 0;
|
|
}
|
|
|
|
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
|
|
+ if (system_state < SYSTEM_RUNNING)
|
|
+ udelay(osc->startup_usec);
|
|
+ else
|
|
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
|
|
osc->prepared = true;
|
|
|
|
return 0;
|
|
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
|
|
index 92168348ffa6..f2d27addf485 100644
|
|
--- a/drivers/clk/meson/gxbb.c
|
|
+++ b/drivers/clk/meson/gxbb.c
|
|
@@ -687,6 +687,7 @@ static struct clk_divider gxbb_sar_adc_clk_div = {
|
|
.ops = &clk_divider_ops,
|
|
.parent_names = (const char *[]){ "sar_adc_clk_sel" },
|
|
.num_parents = 1,
|
|
+ .flags = CLK_SET_RATE_PARENT,
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
|
|
index a882f7038bce..47a14f93f869 100644
|
|
--- a/drivers/clk/samsung/clk-exynos5420.c
|
|
+++ b/drivers/clk/samsung/clk-exynos5420.c
|
|
@@ -170,12 +170,18 @@ static const unsigned long exynos5x_clk_regs[] __initconst = {
|
|
GATE_BUS_CPU,
|
|
GATE_SCLK_CPU,
|
|
CLKOUT_CMU_CPU,
|
|
+ CPLL_CON0,
|
|
+ DPLL_CON0,
|
|
EPLL_CON0,
|
|
EPLL_CON1,
|
|
EPLL_CON2,
|
|
RPLL_CON0,
|
|
RPLL_CON1,
|
|
RPLL_CON2,
|
|
+ IPLL_CON0,
|
|
+ SPLL_CON0,
|
|
+ VPLL_CON0,
|
|
+ MPLL_CON0,
|
|
SRC_TOP0,
|
|
SRC_TOP1,
|
|
SRC_TOP2,
|
|
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
|
|
index 8936ef87652c..c14bf782b2b3 100644
|
|
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
|
|
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
|
|
@@ -1231,7 +1231,7 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev)
|
|
|
|
/* Enforce d1 = 0, d2 = 0 for Audio PLL */
|
|
val = readl(reg + SUN9I_A80_PLL_AUDIO_REG);
|
|
- val &= (BIT(16) & BIT(18));
|
|
+ val &= ~(BIT(16) | BIT(18));
|
|
writel(val, reg + SUN9I_A80_PLL_AUDIO_REG);
|
|
|
|
/* Enforce P = 1 for both CPU cluster PLLs */
|
|
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
|
|
index 148815470431..beb672a215b6 100644
|
|
--- a/drivers/clk/ti/clk-dra7-atl.c
|
|
+++ b/drivers/clk/ti/clk-dra7-atl.c
|
|
@@ -174,7 +174,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
|
|
struct clk_init_data init = { NULL };
|
|
const char **parent_names = NULL;
|
|
struct clk *clk;
|
|
- int ret;
|
|
|
|
clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
|
|
if (!clk_hw) {
|
|
@@ -207,11 +206,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
|
|
clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
|
|
|
|
if (!IS_ERR(clk)) {
|
|
- ret = ti_clk_add_alias(NULL, clk, node->name);
|
|
- if (ret) {
|
|
- clk_unregister(clk);
|
|
- goto cleanup;
|
|
- }
|
|
of_clk_add_provider(node, of_clk_src_simple_get, clk);
|
|
kfree(parent_names);
|
|
return;
|
|
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
|
|
index cdfe1c82f3f0..3928f3999015 100644
|
|
--- a/drivers/clocksource/timer-fttmr010.c
|
|
+++ b/drivers/clocksource/timer-fttmr010.c
|
|
@@ -21,7 +21,7 @@
|
|
#include <linux/delay.h>
|
|
|
|
/*
|
|
- * Register definitions for the timers
|
|
+ * Register definitions common for all the timer variants.
|
|
*/
|
|
#define TIMER1_COUNT (0x00)
|
|
#define TIMER1_LOAD (0x04)
|
|
@@ -36,9 +36,10 @@
|
|
#define TIMER3_MATCH1 (0x28)
|
|
#define TIMER3_MATCH2 (0x2c)
|
|
#define TIMER_CR (0x30)
|
|
-#define TIMER_INTR_STATE (0x34)
|
|
-#define TIMER_INTR_MASK (0x38)
|
|
|
|
+/*
|
|
+ * Control register (TMC30) bit fields for fttmr010/gemini/moxart timers.
|
|
+ */
|
|
#define TIMER_1_CR_ENABLE BIT(0)
|
|
#define TIMER_1_CR_CLOCK BIT(1)
|
|
#define TIMER_1_CR_INT BIT(2)
|
|
@@ -53,8 +54,9 @@
|
|
#define TIMER_3_CR_UPDOWN BIT(11)
|
|
|
|
/*
|
|
- * The Aspeed AST2400 moves bits around in the control register
|
|
- * and lacks bits for setting the timer to count upwards.
|
|
+ * Control register (TMC30) bit fields for aspeed ast2400/ast2500 timers.
|
|
+ * The aspeed timers move bits around in the control register and lacks
|
|
+ * bits for setting the timer to count upwards.
|
|
*/
|
|
#define TIMER_1_CR_ASPEED_ENABLE BIT(0)
|
|
#define TIMER_1_CR_ASPEED_CLOCK BIT(1)
|
|
@@ -66,6 +68,18 @@
|
|
#define TIMER_3_CR_ASPEED_CLOCK BIT(9)
|
|
#define TIMER_3_CR_ASPEED_INT BIT(10)
|
|
|
|
+/*
|
|
+ * Interrupt status/mask register definitions for fttmr010/gemini/moxart
|
|
+ * timers.
|
|
+ * The registers don't exist and they are not needed on aspeed timers
|
|
+ * because:
|
|
+ * - aspeed timer overflow interrupt is controlled by bits in Control
|
|
+ * Register (TMC30).
|
|
+ * - aspeed timers always generate interrupt when either one of the
|
|
+ * Match registers equals to Status register.
|
|
+ */
|
|
+#define TIMER_INTR_STATE (0x34)
|
|
+#define TIMER_INTR_MASK (0x38)
|
|
#define TIMER_1_INT_MATCH1 BIT(0)
|
|
#define TIMER_1_INT_MATCH2 BIT(1)
|
|
#define TIMER_1_INT_OVERFLOW BIT(2)
|
|
@@ -80,7 +94,7 @@
|
|
struct fttmr010 {
|
|
void __iomem *base;
|
|
unsigned int tick_rate;
|
|
- bool count_down;
|
|
+ bool is_aspeed;
|
|
u32 t1_enable_val;
|
|
struct clock_event_device clkevt;
|
|
#ifdef CONFIG_ARM
|
|
@@ -130,7 +144,7 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
|
|
cr &= ~fttmr010->t1_enable_val;
|
|
writel(cr, fttmr010->base + TIMER_CR);
|
|
|
|
- if (fttmr010->count_down) {
|
|
+ if (fttmr010->is_aspeed) {
|
|
/*
|
|
* ASPEED Timer Controller will load TIMER1_LOAD register
|
|
* into TIMER1_COUNT register when the timer is re-enabled.
|
|
@@ -175,16 +189,17 @@ static int fttmr010_timer_set_oneshot(struct clock_event_device *evt)
|
|
|
|
/* Setup counter start from 0 or ~0 */
|
|
writel(0, fttmr010->base + TIMER1_COUNT);
|
|
- if (fttmr010->count_down)
|
|
+ if (fttmr010->is_aspeed) {
|
|
writel(~0, fttmr010->base + TIMER1_LOAD);
|
|
- else
|
|
+ } else {
|
|
writel(0, fttmr010->base + TIMER1_LOAD);
|
|
|
|
- /* Enable interrupt */
|
|
- cr = readl(fttmr010->base + TIMER_INTR_MASK);
|
|
- cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2);
|
|
- cr |= TIMER_1_INT_MATCH1;
|
|
- writel(cr, fttmr010->base + TIMER_INTR_MASK);
|
|
+ /* Enable interrupt */
|
|
+ cr = readl(fttmr010->base + TIMER_INTR_MASK);
|
|
+ cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2);
|
|
+ cr |= TIMER_1_INT_MATCH1;
|
|
+ writel(cr, fttmr010->base + TIMER_INTR_MASK);
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
@@ -201,9 +216,8 @@ static int fttmr010_timer_set_periodic(struct clock_event_device *evt)
|
|
writel(cr, fttmr010->base + TIMER_CR);
|
|
|
|
/* Setup timer to fire at 1/HZ intervals. */
|
|
- if (fttmr010->count_down) {
|
|
+ if (fttmr010->is_aspeed) {
|
|
writel(period, fttmr010->base + TIMER1_LOAD);
|
|
- writel(0, fttmr010->base + TIMER1_MATCH1);
|
|
} else {
|
|
cr = 0xffffffff - (period - 1);
|
|
writel(cr, fttmr010->base + TIMER1_COUNT);
|
|
@@ -281,23 +295,21 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
|
|
}
|
|
|
|
/*
|
|
- * The Aspeed AST2400 moves bits around in the control register,
|
|
- * otherwise it works the same.
|
|
+ * The Aspeed timers move bits around in the control register.
|
|
*/
|
|
if (is_aspeed) {
|
|
fttmr010->t1_enable_val = TIMER_1_CR_ASPEED_ENABLE |
|
|
TIMER_1_CR_ASPEED_INT;
|
|
- /* Downward not available */
|
|
- fttmr010->count_down = true;
|
|
+ fttmr010->is_aspeed = true;
|
|
} else {
|
|
fttmr010->t1_enable_val = TIMER_1_CR_ENABLE | TIMER_1_CR_INT;
|
|
- }
|
|
|
|
- /*
|
|
- * Reset the interrupt mask and status
|
|
- */
|
|
- writel(TIMER_INT_ALL_MASK, fttmr010->base + TIMER_INTR_MASK);
|
|
- writel(0, fttmr010->base + TIMER_INTR_STATE);
|
|
+ /*
|
|
+ * Reset the interrupt mask and status
|
|
+ */
|
|
+ writel(TIMER_INT_ALL_MASK, fttmr010->base + TIMER_INTR_MASK);
|
|
+ writel(0, fttmr010->base + TIMER_INTR_STATE);
|
|
+ }
|
|
|
|
/*
|
|
* Enable timer 1 count up, timer 2 count up, except on Aspeed,
|
|
@@ -306,9 +318,8 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
|
|
if (is_aspeed)
|
|
val = TIMER_2_CR_ASPEED_ENABLE;
|
|
else {
|
|
- val = TIMER_2_CR_ENABLE;
|
|
- if (!fttmr010->count_down)
|
|
- val |= TIMER_1_CR_UPDOWN | TIMER_2_CR_UPDOWN;
|
|
+ val = TIMER_2_CR_ENABLE | TIMER_1_CR_UPDOWN |
|
|
+ TIMER_2_CR_UPDOWN;
|
|
}
|
|
writel(val, fttmr010->base + TIMER_CR);
|
|
|
|
@@ -321,7 +332,7 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
|
|
writel(0, fttmr010->base + TIMER2_MATCH1);
|
|
writel(0, fttmr010->base + TIMER2_MATCH2);
|
|
|
|
- if (fttmr010->count_down) {
|
|
+ if (fttmr010->is_aspeed) {
|
|
writel(~0, fttmr010->base + TIMER2_LOAD);
|
|
clocksource_mmio_init(fttmr010->base + TIMER2_COUNT,
|
|
"FTTMR010-TIMER2",
|
|
@@ -371,7 +382,7 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
|
|
|
|
#ifdef CONFIG_ARM
|
|
/* Also use this timer for delays */
|
|
- if (fttmr010->count_down)
|
|
+ if (fttmr010->is_aspeed)
|
|
fttmr010->delay_timer.read_current_timer =
|
|
fttmr010_read_current_timer_down;
|
|
else
|
|
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
|
|
index e01c46387df8..519086730791 100644
|
|
--- a/drivers/crypto/mxc-scc.c
|
|
+++ b/drivers/crypto/mxc-scc.c
|
|
@@ -178,12 +178,12 @@ static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
|
|
else
|
|
from = scc->black_memory;
|
|
|
|
- dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
|
|
+ dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from,
|
|
ctx->dst_nents * 8);
|
|
len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
|
|
from, ctx->size, ctx->offset);
|
|
if (!len) {
|
|
- dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
|
|
+ dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len);
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -274,7 +274,7 @@ static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
|
|
len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
|
|
to, len, ctx->offset);
|
|
if (!len) {
|
|
- dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
|
|
+ dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len);
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -335,9 +335,9 @@ static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
|
|
return;
|
|
}
|
|
|
|
- dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
|
|
- (void *)readl(scc->base + SCC_SCM_RED_START),
|
|
- (void *)readl(scc->base + SCC_SCM_BLACK_START));
|
|
+ dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n",
|
|
+ readl(scc->base + SCC_SCM_RED_START),
|
|
+ readl(scc->base + SCC_SCM_BLACK_START));
|
|
|
|
/* clear interrupt control registers */
|
|
writel(SCC_SCM_INTR_CTRL_CLR_INTR,
|
|
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
|
|
index 4835dd4a9e50..4909f820e953 100644
|
|
--- a/drivers/crypto/stm32/stm32-hash.c
|
|
+++ b/drivers/crypto/stm32/stm32-hash.c
|
|
@@ -361,7 +361,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
|
|
return -ETIMEDOUT;
|
|
|
|
if ((hdev->flags & HASH_FLAGS_HMAC) &&
|
|
- (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
|
|
+ (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
|
|
hdev->flags |= HASH_FLAGS_HMAC_KEY;
|
|
stm32_hash_write_key(hdev);
|
|
if (stm32_hash_wait_busy(hdev))
|
|
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
|
|
index 1d1612e28854..6fd4af647f59 100644
|
|
--- a/drivers/gpu/ipu-v3/ipu-pre.c
|
|
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
|
|
@@ -102,6 +102,7 @@ struct ipu_pre {
|
|
void *buffer_virt;
|
|
bool in_use;
|
|
unsigned int safe_window_end;
|
|
+ unsigned int last_bufaddr;
|
|
};
|
|
|
|
static DEFINE_MUTEX(ipu_pre_list_mutex);
|
|
@@ -177,6 +178,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
|
|
|
|
writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
|
|
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
|
|
+ pre->last_bufaddr = bufaddr;
|
|
|
|
val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) |
|
|
IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) |
|
|
@@ -218,7 +220,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
|
|
unsigned short current_yblock;
|
|
u32 val;
|
|
|
|
+ if (bufaddr == pre->last_bufaddr)
|
|
+ return;
|
|
+
|
|
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
|
|
+ pre->last_bufaddr = bufaddr;
|
|
|
|
do {
|
|
if (time_after(jiffies, timeout)) {
|
|
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
|
|
index 0b0fa257299d..0c547bf841f4 100644
|
|
--- a/drivers/hid/hid-core.c
|
|
+++ b/drivers/hid/hid-core.c
|
|
@@ -195,6 +195,18 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
|
|
return 0; /* we know nothing about this usage type */
|
|
}
|
|
|
|
+/*
|
|
+ * Concatenate usage which defines 16 bits or less with the
|
|
+ * currently defined usage page to form a 32 bit usage
|
|
+ */
|
|
+
|
|
+static void complete_usage(struct hid_parser *parser, unsigned int index)
|
|
+{
|
|
+ parser->local.usage[index] &= 0xFFFF;
|
|
+ parser->local.usage[index] |=
|
|
+ (parser->global.usage_page & 0xFFFF) << 16;
|
|
+}
|
|
+
|
|
/*
|
|
* Add a usage to the temporary parser table.
|
|
*/
|
|
@@ -206,6 +218,14 @@ static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
|
|
return -1;
|
|
}
|
|
parser->local.usage[parser->local.usage_index] = usage;
|
|
+
|
|
+ /*
|
|
+ * If Usage item only includes usage id, concatenate it with
|
|
+ * currently defined usage page
|
|
+ */
|
|
+ if (size <= 2)
|
|
+ complete_usage(parser, parser->local.usage_index);
|
|
+
|
|
parser->local.usage_size[parser->local.usage_index] = size;
|
|
parser->local.collection_index[parser->local.usage_index] =
|
|
parser->collection_stack_ptr ?
|
|
@@ -522,13 +542,32 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
|
|
* usage value."
|
|
*/
|
|
|
|
-static void hid_concatenate_usage_page(struct hid_parser *parser)
|
|
+static void hid_concatenate_last_usage_page(struct hid_parser *parser)
|
|
{
|
|
int i;
|
|
+ unsigned int usage_page;
|
|
+ unsigned int current_page;
|
|
|
|
- for (i = 0; i < parser->local.usage_index; i++)
|
|
- if (parser->local.usage_size[i] <= 2)
|
|
- parser->local.usage[i] += parser->global.usage_page << 16;
|
|
+ if (!parser->local.usage_index)
|
|
+ return;
|
|
+
|
|
+ usage_page = parser->global.usage_page;
|
|
+
|
|
+ /*
|
|
+ * Concatenate usage page again only if last declared Usage Page
|
|
+ * has not been already used in previous usages concatenation
|
|
+ */
|
|
+ for (i = parser->local.usage_index - 1; i >= 0; i--) {
|
|
+ if (parser->local.usage_size[i] > 2)
|
|
+ /* Ignore extended usages */
|
|
+ continue;
|
|
+
|
|
+ current_page = parser->local.usage[i] >> 16;
|
|
+ if (current_page == usage_page)
|
|
+ break;
|
|
+
|
|
+ complete_usage(parser, i);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
@@ -540,7 +579,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
|
|
__u32 data;
|
|
int ret;
|
|
|
|
- hid_concatenate_usage_page(parser);
|
|
+ hid_concatenate_last_usage_page(parser);
|
|
|
|
data = item_udata(item);
|
|
|
|
@@ -751,7 +790,7 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
|
|
__u32 data;
|
|
int i;
|
|
|
|
- hid_concatenate_usage_page(parser);
|
|
+ hid_concatenate_last_usage_page(parser);
|
|
|
|
data = item_udata(item);
|
|
|
|
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
|
|
index cd23903ddcf1..e918d78e541c 100644
|
|
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
|
|
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
|
|
@@ -222,7 +222,7 @@ int ishtp_hid_probe(unsigned int cur_hid_dev,
|
|
err_hid_device:
|
|
kfree(hid_data);
|
|
err_hid_data:
|
|
- kfree(hid);
|
|
+ hid_destroy_device(hid);
|
|
return rv;
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
|
|
index 891873b38a1e..5f3f197678b7 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_sdma.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
|
|
@@ -600,8 +600,10 @@ retry:
|
|
dw = (len + 3) >> 2;
|
|
addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
|
|
dw << 2, DMA_TO_DEVICE);
|
|
- if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
|
|
+ if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
|
|
+ ret = -ENOMEM;
|
|
goto unmap;
|
|
+ }
|
|
sdmadesc[0] = 0;
|
|
make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
|
|
/* SDmaUseLargeBuf has to be set in every descriptor */
|
|
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
|
|
index aa533f08e017..5c7aa6ff1538 100644
|
|
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
|
|
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
|
|
@@ -550,7 +550,7 @@ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
|
|
if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
- ah = kzalloc(sizeof(*ah), GFP_KERNEL);
|
|
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
|
|
if (!ah) {
|
|
atomic_dec(&dev->num_ahs);
|
|
return ERR_PTR(-ENOMEM);
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
|
|
index 6aeb7a165e46..ea4542a9d69e 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
|
|
@@ -59,7 +59,7 @@ int rxe_ib_get_hw_stats(struct ib_device *ibdev,
|
|
return -EINVAL;
|
|
|
|
for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_name); cnt++)
|
|
- stats->value[cnt] = dev->stats_counters[cnt];
|
|
+ stats->value[cnt] = atomic64_read(&dev->stats_counters[cnt]);
|
|
|
|
return ARRAY_SIZE(rxe_counter_name);
|
|
}
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
|
|
index b2b76a316eba..d1cc89f6f2e3 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
|
|
@@ -410,16 +410,16 @@ struct rxe_dev {
|
|
spinlock_t mmap_offset_lock; /* guard mmap_offset */
|
|
int mmap_offset;
|
|
|
|
- u64 stats_counters[RXE_NUM_OF_COUNTERS];
|
|
+ atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
|
|
|
|
struct rxe_port port;
|
|
struct list_head list;
|
|
struct crypto_shash *tfm;
|
|
};
|
|
|
|
-static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters cnt)
|
|
+static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
|
|
{
|
|
- rxe->stats_counters[cnt]++;
|
|
+ atomic64_inc(&rxe->stats_counters[index]);
|
|
}
|
|
|
|
static inline struct rxe_dev *to_rdev(struct ib_device *dev)
|
|
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
|
|
index 3f5b5893792c..9f7287f45d06 100644
|
|
--- a/drivers/infiniband/ulp/srp/ib_srp.c
|
|
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
|
|
@@ -2210,6 +2210,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
|
|
|
|
if (srp_post_send(ch, iu, len)) {
|
|
shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
|
|
+ scmnd->result = DID_ERROR << 16;
|
|
goto err_unmap;
|
|
}
|
|
|
|
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
|
|
index aa9f29b875de..d84e3b70215a 100644
|
|
--- a/drivers/input/serio/gscps2.c
|
|
+++ b/drivers/input/serio/gscps2.c
|
|
@@ -382,9 +382,9 @@ static int __init gscps2_probe(struct parisc_device *dev)
|
|
goto fail;
|
|
#endif
|
|
|
|
- printk(KERN_INFO "serio: %s port at 0x%p irq %d @ %s\n",
|
|
+ pr_info("serio: %s port at 0x%08lx irq %d @ %s\n",
|
|
ps2port->port->name,
|
|
- ps2port->addr,
|
|
+ hpa,
|
|
ps2port->padev->irq,
|
|
ps2port->port->phys);
|
|
|
|
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
|
|
index 8eef6849d066..5585823ced19 100644
|
|
--- a/drivers/input/serio/hp_sdc.c
|
|
+++ b/drivers/input/serio/hp_sdc.c
|
|
@@ -887,8 +887,8 @@ static int __init hp_sdc_init(void)
|
|
"HP SDC NMI", &hp_sdc))
|
|
goto err2;
|
|
|
|
- printk(KERN_INFO PREFIX "HP SDC at 0x%p, IRQ %d (NMI IRQ %d)\n",
|
|
- (void *)hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi);
|
|
+ pr_info(PREFIX "HP SDC at 0x%08lx, IRQ %d (NMI IRQ %d)\n",
|
|
+ hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi);
|
|
|
|
hp_sdc_status_in8();
|
|
hp_sdc_data_in8();
|
|
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
|
|
index 07b6cf58fd99..d09c24825734 100644
|
|
--- a/drivers/iommu/amd_iommu.c
|
|
+++ b/drivers/iommu/amd_iommu.c
|
|
@@ -139,10 +139,14 @@ static struct lock_class_key reserved_rbtree_key;
|
|
static inline int match_hid_uid(struct device *dev,
|
|
struct acpihid_map_entry *entry)
|
|
{
|
|
+ struct acpi_device *adev = ACPI_COMPANION(dev);
|
|
const char *hid, *uid;
|
|
|
|
- hid = acpi_device_hid(ACPI_COMPANION(dev));
|
|
- uid = acpi_device_uid(ACPI_COMPANION(dev));
|
|
+ if (!adev)
|
|
+ return -ENODEV;
|
|
+
|
|
+ hid = acpi_device_hid(adev);
|
|
+ uid = acpi_device_uid(adev);
|
|
|
|
if (!hid || !(*hid))
|
|
return -ENODEV;
|
|
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
|
|
index 93f3d4d61fa7..546ba140f83d 100644
|
|
--- a/drivers/mailbox/mailbox-test.c
|
|
+++ b/drivers/mailbox/mailbox-test.c
|
|
@@ -363,22 +363,24 @@ static int mbox_test_probe(struct platform_device *pdev)
|
|
|
|
/* It's okay for MMIO to be NULL */
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
- size = resource_size(res);
|
|
tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
|
|
- if (PTR_ERR(tdev->tx_mmio) == -EBUSY)
|
|
+ if (PTR_ERR(tdev->tx_mmio) == -EBUSY) {
|
|
/* if reserved area in SRAM, try just ioremap */
|
|
+ size = resource_size(res);
|
|
tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size);
|
|
- else if (IS_ERR(tdev->tx_mmio))
|
|
+ } else if (IS_ERR(tdev->tx_mmio)) {
|
|
tdev->tx_mmio = NULL;
|
|
+ }
|
|
|
|
/* If specified, second reg entry is Rx MMIO */
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
|
- size = resource_size(res);
|
|
tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
|
|
- if (PTR_ERR(tdev->rx_mmio) == -EBUSY)
|
|
+ if (PTR_ERR(tdev->rx_mmio) == -EBUSY) {
|
|
+ size = resource_size(res);
|
|
tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
|
|
- else if (IS_ERR(tdev->rx_mmio))
|
|
+ } else if (IS_ERR(tdev->rx_mmio)) {
|
|
tdev->rx_mmio = tdev->tx_mmio;
|
|
+ }
|
|
|
|
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
|
|
tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
|
|
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
|
|
index 0c1ef63c3461..b1b68e01b889 100644
|
|
--- a/drivers/md/dm-flakey.c
|
|
+++ b/drivers/md/dm-flakey.c
|
|
@@ -282,20 +282,31 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
|
|
|
|
static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
|
|
{
|
|
- unsigned bio_bytes = bio_cur_bytes(bio);
|
|
- char *data = bio_data(bio);
|
|
+ unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
|
|
+
|
|
+ struct bvec_iter iter;
|
|
+ struct bio_vec bvec;
|
|
+
|
|
+ if (!bio_has_data(bio))
|
|
+ return;
|
|
|
|
/*
|
|
- * Overwrite the Nth byte of the data returned.
|
|
+ * Overwrite the Nth byte of the bio's data, on whichever page
|
|
+ * it falls.
|
|
*/
|
|
- if (data && bio_bytes >= fc->corrupt_bio_byte) {
|
|
- data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
|
|
-
|
|
- DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
|
|
- "(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
|
|
- bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
|
|
- (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
|
|
- (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
|
|
+ bio_for_each_segment(bvec, bio, iter) {
|
|
+ if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
|
|
+ char *segment = (page_address(bio_iter_page(bio, iter))
|
|
+ + bio_iter_offset(bio, iter));
|
|
+ segment[corrupt_bio_byte] = fc->corrupt_bio_value;
|
|
+ DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
|
|
+ "(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
|
|
+ bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
|
|
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
|
|
+ (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size);
|
|
+ break;
|
|
+ }
|
|
+ corrupt_bio_byte -= bio_iter_len(bio, iter);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
|
|
index d7103c5f92c3..0dea3cf2cb52 100644
|
|
--- a/drivers/media/platform/atmel/atmel-isc.c
|
|
+++ b/drivers/media/platform/atmel/atmel-isc.c
|
|
@@ -1555,6 +1555,8 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
|
|
struct vb2_queue *q = &isc->vb2_vidq;
|
|
int ret;
|
|
|
|
+ INIT_WORK(&isc->awb_work, isc_awb_work);
|
|
+
|
|
ret = v4l2_device_register_subdev_nodes(&isc->v4l2_dev);
|
|
if (ret < 0) {
|
|
v4l2_err(&isc->v4l2_dev, "Failed to register subdev nodes\n");
|
|
@@ -1614,8 +1616,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
|
|
return ret;
|
|
}
|
|
|
|
- INIT_WORK(&isc->awb_work, isc_awb_work);
|
|
-
|
|
/* Register video device */
|
|
strlcpy(vdev->name, ATMEL_ISC_NAME, sizeof(vdev->name));
|
|
vdev->release = video_device_release_empty;
|
|
@@ -1722,8 +1722,11 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
|
|
break;
|
|
}
|
|
|
|
- subdev_entity->asd = devm_kzalloc(dev,
|
|
- sizeof(*subdev_entity->asd), GFP_KERNEL);
|
|
+ /* asd will be freed by the subsystem once it's added to the
|
|
+ * notifier list
|
|
+ */
|
|
+ subdev_entity->asd = kzalloc(sizeof(*subdev_entity->asd),
|
|
+ GFP_KERNEL);
|
|
if (subdev_entity->asd == NULL) {
|
|
of_node_put(rem);
|
|
ret = -ENOMEM;
|
|
@@ -1859,6 +1862,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
|
|
&subdev_entity->notifier);
|
|
if (ret) {
|
|
dev_err(dev, "fail to register async notifier\n");
|
|
+ kfree(subdev_entity->asd);
|
|
goto cleanup_subdev;
|
|
}
|
|
|
|
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
|
|
index 4281f3f76ab1..f157ccbd8286 100644
|
|
--- a/drivers/media/platform/stm32/stm32-dcmi.c
|
|
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
|
|
@@ -161,6 +161,9 @@ struct stm32_dcmi {
|
|
u32 misr;
|
|
int errors_count;
|
|
int buffers_count;
|
|
+
|
|
+ /* Ensure DMA operations atomicity */
|
|
+ struct mutex dma_lock;
|
|
};
|
|
|
|
static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
|
|
@@ -291,6 +294,13 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
|
|
return ret;
|
|
}
|
|
|
|
+ /*
|
|
+ * Avoid call of dmaengine_terminate_all() between
|
|
+ * dmaengine_prep_slave_single() and dmaengine_submit()
|
|
+ * by locking the whole DMA submission sequence
|
|
+ */
|
|
+ mutex_lock(&dcmi->dma_lock);
|
|
+
|
|
/* Prepare a DMA transaction */
|
|
desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
|
|
buf->size,
|
|
@@ -298,6 +308,7 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
|
|
if (!desc) {
|
|
dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer size %zu\n",
|
|
__func__, buf->size);
|
|
+ mutex_unlock(&dcmi->dma_lock);
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -309,9 +320,12 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
|
|
dcmi->dma_cookie = dmaengine_submit(desc);
|
|
if (dma_submit_error(dcmi->dma_cookie)) {
|
|
dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
|
|
+ mutex_unlock(&dcmi->dma_lock);
|
|
return -ENXIO;
|
|
}
|
|
|
|
+ mutex_unlock(&dcmi->dma_lock);
|
|
+
|
|
dma_async_issue_pending(dcmi->dma_chan);
|
|
|
|
return 0;
|
|
@@ -690,7 +704,9 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
|
|
spin_unlock_irq(&dcmi->irqlock);
|
|
|
|
/* Stop all pending DMA operations */
|
|
+ mutex_lock(&dcmi->dma_lock);
|
|
dmaengine_terminate_all(dcmi->dma_chan);
|
|
+ mutex_unlock(&dcmi->dma_lock);
|
|
|
|
clk_disable(dcmi->mclk);
|
|
|
|
@@ -1662,6 +1678,7 @@ static int dcmi_probe(struct platform_device *pdev)
|
|
|
|
spin_lock_init(&dcmi->irqlock);
|
|
mutex_init(&dcmi->lock);
|
|
+ mutex_init(&dcmi->dma_lock);
|
|
init_completion(&dcmi->complete);
|
|
INIT_LIST_HEAD(&dcmi->buffers);
|
|
|
|
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
|
|
index 1ee072e939e4..34d6ae43fc45 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
|
|
@@ -1014,6 +1014,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
|
|
case V4L2_CID_FLASH_STROBE_STOP:
|
|
case V4L2_CID_AUTO_FOCUS_START:
|
|
case V4L2_CID_AUTO_FOCUS_STOP:
|
|
+ case V4L2_CID_DO_WHITE_BALANCE:
|
|
*type = V4L2_CTRL_TYPE_BUTTON;
|
|
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
|
|
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
|
|
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
|
|
index 37b13bc5c16f..8f6ab516041b 100644
|
|
--- a/drivers/misc/mei/bus.c
|
|
+++ b/drivers/misc/mei/bus.c
|
|
@@ -850,15 +850,16 @@ static const struct device_type mei_cl_device_type = {
|
|
|
|
/**
|
|
* mei_cl_bus_set_name - set device name for me client device
|
|
+ * <controller>-<client device>
|
|
+ * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
|
|
*
|
|
* @cldev: me client device
|
|
*/
|
|
static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
|
|
{
|
|
- dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X",
|
|
- cldev->name,
|
|
- mei_me_cl_uuid(cldev->me_cl),
|
|
- mei_me_cl_ver(cldev->me_cl));
|
|
+ dev_set_name(&cldev->dev, "%s-%pUl",
|
|
+ dev_name(cldev->bus->dev),
|
|
+ mei_me_cl_uuid(cldev->me_cl));
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
|
|
index 08a55c2e96e1..53ce1bb83d2c 100644
|
|
--- a/drivers/mmc/host/meson-gx-mmc.c
|
|
+++ b/drivers/mmc/host/meson-gx-mmc.c
|
|
@@ -21,6 +21,7 @@
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/device.h>
|
|
#include <linux/of_device.h>
|
|
#include <linux/platform_device.h>
|
|
@@ -74,9 +75,11 @@
|
|
#define CFG_CLK_ALWAYS_ON BIT(18)
|
|
#define CFG_CHK_DS BIT(20)
|
|
#define CFG_AUTO_CLK BIT(23)
|
|
+#define CFG_ERR_ABORT BIT(27)
|
|
|
|
#define SD_EMMC_STATUS 0x48
|
|
#define STATUS_BUSY BIT(31)
|
|
+#define STATUS_DESC_BUSY BIT(30)
|
|
#define STATUS_DATI GENMASK(23, 16)
|
|
|
|
#define SD_EMMC_IRQ_EN 0x4c
|
|
@@ -905,6 +908,7 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
|
|
|
|
cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
|
|
cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
|
|
+ cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
|
|
|
|
meson_mmc_set_response_bits(cmd, &cmd_cfg);
|
|
|
|
@@ -999,6 +1003,17 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
|
|
u32 irq_en, status, raw_status;
|
|
irqreturn_t ret = IRQ_NONE;
|
|
|
|
+ irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
|
|
+ raw_status = readl(host->regs + SD_EMMC_STATUS);
|
|
+ status = raw_status & irq_en;
|
|
+
|
|
+ if (!status) {
|
|
+ dev_dbg(host->dev,
|
|
+ "Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n",
|
|
+ irq_en, raw_status);
|
|
+ return IRQ_NONE;
|
|
+ }
|
|
+
|
|
if (WARN_ON(!host) || WARN_ON(!host->cmd))
|
|
return IRQ_NONE;
|
|
|
|
@@ -1006,22 +1021,18 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
|
|
|
|
cmd = host->cmd;
|
|
data = cmd->data;
|
|
- irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
|
|
- raw_status = readl(host->regs + SD_EMMC_STATUS);
|
|
- status = raw_status & irq_en;
|
|
-
|
|
cmd->error = 0;
|
|
if (status & IRQ_CRC_ERR) {
|
|
dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status);
|
|
cmd->error = -EILSEQ;
|
|
- ret = IRQ_HANDLED;
|
|
+ ret = IRQ_WAKE_THREAD;
|
|
goto out;
|
|
}
|
|
|
|
if (status & IRQ_TIMEOUTS) {
|
|
dev_dbg(host->dev, "Timeout - status 0x%08x\n", status);
|
|
cmd->error = -ETIMEDOUT;
|
|
- ret = IRQ_HANDLED;
|
|
+ ret = IRQ_WAKE_THREAD;
|
|
goto out;
|
|
}
|
|
|
|
@@ -1046,17 +1057,49 @@ out:
|
|
/* ack all enabled interrupts */
|
|
writel(irq_en, host->regs + SD_EMMC_STATUS);
|
|
|
|
+ if (cmd->error) {
|
|
+ /* Stop desc in case of errors */
|
|
+ u32 start = readl(host->regs + SD_EMMC_START);
|
|
+
|
|
+ start &= ~START_DESC_BUSY;
|
|
+ writel(start, host->regs + SD_EMMC_START);
|
|
+ }
|
|
+
|
|
if (ret == IRQ_HANDLED)
|
|
meson_mmc_request_done(host->mmc, cmd->mrq);
|
|
- else if (ret == IRQ_NONE)
|
|
- dev_warn(host->dev,
|
|
- "Unexpected IRQ! status=0x%08x, irq_en=0x%08x\n",
|
|
- raw_status, irq_en);
|
|
|
|
spin_unlock(&host->lock);
|
|
return ret;
|
|
}
|
|
|
|
+static int meson_mmc_wait_desc_stop(struct meson_host *host)
|
|
+{
|
|
+ int loop;
|
|
+ u32 status;
|
|
+
|
|
+ /*
|
|
+ * It may sometimes take a while for it to actually halt. Here, we
|
|
+ * are giving it 5ms to comply
|
|
+ *
|
|
+ * If we don't confirm the descriptor is stopped, it might raise new
|
|
+ * IRQs after we have called mmc_request_done() which is bad.
|
|
+ */
|
|
+ for (loop = 50; loop; loop--) {
|
|
+ status = readl(host->regs + SD_EMMC_STATUS);
|
|
+ if (status & (STATUS_BUSY | STATUS_DESC_BUSY))
|
|
+ udelay(100);
|
|
+ else
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (status & (STATUS_BUSY | STATUS_DESC_BUSY)) {
|
|
+ dev_err(host->dev, "Timed out waiting for host to stop\n");
|
|
+ return -ETIMEDOUT;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
|
|
{
|
|
struct meson_host *host = dev_id;
|
|
@@ -1067,6 +1110,13 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
|
|
if (WARN_ON(!cmd))
|
|
return IRQ_NONE;
|
|
|
|
+ if (cmd->error) {
|
|
+ meson_mmc_wait_desc_stop(host);
|
|
+ meson_mmc_request_done(host->mmc, cmd->mrq);
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
+
|
|
data = cmd->data;
|
|
if (meson_mmc_bounce_buf_read(data)) {
|
|
xfer_bytes = data->blksz * data->blocks;
|
|
@@ -1107,6 +1157,9 @@ static void meson_mmc_cfg_init(struct meson_host *host)
|
|
cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP));
|
|
cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE));
|
|
|
|
+ /* abort chain on R/W errors */
|
|
+ cfg |= CFG_ERR_ABORT;
|
|
+
|
|
writel(cfg, host->regs + SD_EMMC_CFG);
|
|
}
|
|
|
|
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
|
|
index 37accfd0400e..24480b75a88d 100644
|
|
--- a/drivers/mtd/mtdcore.h
|
|
+++ b/drivers/mtd/mtdcore.h
|
|
@@ -7,7 +7,7 @@
|
|
extern struct mutex mtd_table_mutex;
|
|
|
|
struct mtd_info *__mtd_next_device(int i);
|
|
-int add_mtd_device(struct mtd_info *mtd);
|
|
+int __must_check add_mtd_device(struct mtd_info *mtd);
|
|
int del_mtd_device(struct mtd_info *mtd);
|
|
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
|
|
int del_mtd_partitions(struct mtd_info *);
|
|
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
|
|
index a308e707392d..45626b0eed64 100644
|
|
--- a/drivers/mtd/mtdpart.c
|
|
+++ b/drivers/mtd/mtdpart.c
|
|
@@ -684,10 +684,21 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
|
|
list_add(&new->list, &mtd_partitions);
|
|
mutex_unlock(&mtd_partitions_mutex);
|
|
|
|
- add_mtd_device(&new->mtd);
|
|
+ ret = add_mtd_device(&new->mtd);
|
|
+ if (ret)
|
|
+ goto err_remove_part;
|
|
|
|
mtd_add_partition_attrs(new);
|
|
|
|
+ return 0;
|
|
+
|
|
+err_remove_part:
|
|
+ mutex_lock(&mtd_partitions_mutex);
|
|
+ list_del(&new->list);
|
|
+ mutex_unlock(&mtd_partitions_mutex);
|
|
+
|
|
+ free_partition(new);
|
|
+
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(mtd_add_partition);
|
|
@@ -778,22 +789,31 @@ int add_mtd_partitions(struct mtd_info *master,
|
|
{
|
|
struct mtd_part *slave;
|
|
uint64_t cur_offset = 0;
|
|
- int i;
|
|
+ int i, ret;
|
|
|
|
printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
|
|
|
|
for (i = 0; i < nbparts; i++) {
|
|
slave = allocate_partition(master, parts + i, i, cur_offset);
|
|
if (IS_ERR(slave)) {
|
|
- del_mtd_partitions(master);
|
|
- return PTR_ERR(slave);
|
|
+ ret = PTR_ERR(slave);
|
|
+ goto err_del_partitions;
|
|
}
|
|
|
|
mutex_lock(&mtd_partitions_mutex);
|
|
list_add(&slave->list, &mtd_partitions);
|
|
mutex_unlock(&mtd_partitions_mutex);
|
|
|
|
- add_mtd_device(&slave->mtd);
|
|
+ ret = add_mtd_device(&slave->mtd);
|
|
+ if (ret) {
|
|
+ mutex_lock(&mtd_partitions_mutex);
|
|
+ list_del(&slave->list);
|
|
+ mutex_unlock(&mtd_partitions_mutex);
|
|
+
|
|
+ free_partition(slave);
|
|
+ goto err_del_partitions;
|
|
+ }
|
|
+
|
|
mtd_add_partition_attrs(slave);
|
|
if (parts[i].types)
|
|
mtd_parse_part(slave, parts[i].types);
|
|
@@ -802,6 +822,11 @@ int add_mtd_partitions(struct mtd_info *master,
|
|
}
|
|
|
|
return 0;
|
|
+
|
|
+err_del_partitions:
|
|
+ del_mtd_partitions(master);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static DEFINE_SPINLOCK(part_parser_lock);
|
|
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
|
|
index 0b93f152d993..d5a493e8ee08 100644
|
|
--- a/drivers/mtd/nand/atmel/nand-controller.c
|
|
+++ b/drivers/mtd/nand/atmel/nand-controller.c
|
|
@@ -1888,7 +1888,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
|
|
|
|
ret = of_property_read_u32(np, "#size-cells", &val);
|
|
if (ret) {
|
|
- dev_err(dev, "missing #address-cells property\n");
|
|
+ dev_err(dev, "missing #size-cells property\n");
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
|
|
index 4124bf91bee6..8cd153974e8d 100644
|
|
--- a/drivers/mtd/nand/atmel/pmecc.c
|
|
+++ b/drivers/mtd/nand/atmel/pmecc.c
|
|
@@ -875,23 +875,32 @@ static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev,
|
|
{
|
|
struct platform_device *pdev;
|
|
struct atmel_pmecc *pmecc, **ptr;
|
|
+ int ret;
|
|
|
|
pdev = of_find_device_by_node(np);
|
|
- if (!pdev || !platform_get_drvdata(pdev))
|
|
+ if (!pdev)
|
|
return ERR_PTR(-EPROBE_DEFER);
|
|
+ pmecc = platform_get_drvdata(pdev);
|
|
+ if (!pmecc) {
|
|
+ ret = -EPROBE_DEFER;
|
|
+ goto err_put_device;
|
|
+ }
|
|
|
|
ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL);
|
|
- if (!ptr)
|
|
- return ERR_PTR(-ENOMEM);
|
|
-
|
|
- get_device(&pdev->dev);
|
|
- pmecc = platform_get_drvdata(pdev);
|
|
+ if (!ptr) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_put_device;
|
|
+ }
|
|
|
|
*ptr = pmecc;
|
|
|
|
devres_add(userdev, ptr);
|
|
|
|
return pmecc;
|
|
+
|
|
+err_put_device:
|
|
+ put_device(&pdev->dev);
|
|
+ return ERR_PTR(ret);
|
|
}
|
|
|
|
static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 };
|
|
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
|
|
index 958974821582..8e5231482397 100644
|
|
--- a/drivers/mtd/nand/sunxi_nand.c
|
|
+++ b/drivers/mtd/nand/sunxi_nand.c
|
|
@@ -1435,7 +1435,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
|
|
sunxi_nfc_randomizer_enable(mtd);
|
|
|
|
writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
|
|
- nfc->regs + NFC_REG_RCMD_SET);
|
|
+ nfc->regs + NFC_REG_WCMD_SET);
|
|
|
|
dma_async_issue_pending(nfc->dmac);
|
|
|
|
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
|
|
index 6c013341ef09..d550148177a0 100644
|
|
--- a/drivers/mtd/spi-nor/spi-nor.c
|
|
+++ b/drivers/mtd/spi-nor/spi-nor.c
|
|
@@ -2382,7 +2382,7 @@ static int spi_nor_init_params(struct spi_nor *nor,
|
|
memset(params, 0, sizeof(*params));
|
|
|
|
/* Set SPI NOR sizes. */
|
|
- params->size = info->sector_size * info->n_sectors;
|
|
+ params->size = (u64)info->sector_size * info->n_sectors;
|
|
params->page_size = info->page_size;
|
|
|
|
/* (Fast) Read settings. */
|
|
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
|
|
index 6445c693d935..0104d9537329 100644
|
|
--- a/drivers/mtd/ubi/build.c
|
|
+++ b/drivers/mtd/ubi/build.c
|
|
@@ -1092,10 +1092,10 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
|
|
ubi_wl_close(ubi);
|
|
ubi_free_internal_volumes(ubi);
|
|
vfree(ubi->vtbl);
|
|
- put_mtd_device(ubi->mtd);
|
|
vfree(ubi->peb_buf);
|
|
vfree(ubi->fm_buf);
|
|
ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
|
|
+ put_mtd_device(ubi->mtd);
|
|
put_device(&ubi->dev);
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
|
|
index d4b2e8744498..c2cf6bd3c162 100644
|
|
--- a/drivers/mtd/ubi/kapi.c
|
|
+++ b/drivers/mtd/ubi/kapi.c
|
|
@@ -227,9 +227,9 @@ out_unlock:
|
|
out_free:
|
|
kfree(desc);
|
|
out_put_ubi:
|
|
- ubi_put_device(ubi);
|
|
ubi_err(ubi, "cannot open device %d, volume %d, error %d",
|
|
ubi_num, vol_id, err);
|
|
+ ubi_put_device(ubi);
|
|
return ERR_PTR(err);
|
|
}
|
|
EXPORT_SYMBOL_GPL(ubi_open_volume);
|
|
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
|
|
index 9b61bfbea6cd..24c6015f6c92 100644
|
|
--- a/drivers/net/can/c_can/c_can.c
|
|
+++ b/drivers/net/can/c_can/c_can.c
|
|
@@ -52,6 +52,7 @@
|
|
#define CONTROL_EX_PDR BIT(8)
|
|
|
|
/* control register */
|
|
+#define CONTROL_SWR BIT(15)
|
|
#define CONTROL_TEST BIT(7)
|
|
#define CONTROL_CCE BIT(6)
|
|
#define CONTROL_DISABLE_AR BIT(5)
|
|
@@ -572,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev)
|
|
IF_MCONT_RCV_EOB);
|
|
}
|
|
|
|
+static int c_can_software_reset(struct net_device *dev)
|
|
+{
|
|
+ struct c_can_priv *priv = netdev_priv(dev);
|
|
+ int retry = 0;
|
|
+
|
|
+ if (priv->type != BOSCH_D_CAN)
|
|
+ return 0;
|
|
+
|
|
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT);
|
|
+ while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) {
|
|
+ msleep(20);
|
|
+ if (retry++ > 100) {
|
|
+ netdev_err(dev, "CCTRL: software reset failed\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/*
|
|
* Configure C_CAN chip:
|
|
* - enable/disable auto-retransmission
|
|
@@ -581,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev)
|
|
static int c_can_chip_config(struct net_device *dev)
|
|
{
|
|
struct c_can_priv *priv = netdev_priv(dev);
|
|
+ int err;
|
|
+
|
|
+ err = c_can_software_reset(dev);
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
/* enable automatic retransmission */
|
|
priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
|
|
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
|
|
index 1a7c183e6678..54ffd1e91a69 100644
|
|
--- a/drivers/net/can/rx-offload.c
|
|
+++ b/drivers/net/can/rx-offload.c
|
|
@@ -116,37 +116,95 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
|
|
return cb_b->timestamp - cb_a->timestamp;
|
|
}
|
|
|
|
-static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
|
|
+/**
|
|
+ * can_rx_offload_offload_one() - Read one CAN frame from HW
|
|
+ * @offload: pointer to rx_offload context
|
|
+ * @n: number of mailbox to read
|
|
+ *
|
|
+ * The task of this function is to read a CAN frame from mailbox @n
|
|
+ * from the device and return the mailbox's content as a struct
|
|
+ * sk_buff.
|
|
+ *
|
|
+ * If the struct can_rx_offload::skb_queue exceeds the maximal queue
|
|
+ * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
|
|
+ * allocated, the mailbox contents is discarded by reading it into an
|
|
+ * overflow buffer. This way the mailbox is marked as free by the
|
|
+ * driver.
|
|
+ *
|
|
+ * Return: A pointer to skb containing the CAN frame on success.
|
|
+ *
|
|
+ * NULL if the mailbox @n is empty.
|
|
+ *
|
|
+ * ERR_PTR() in case of an error
|
|
+ */
|
|
+static struct sk_buff *
|
|
+can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
|
|
{
|
|
- struct sk_buff *skb = NULL;
|
|
+ struct sk_buff *skb = NULL, *skb_error = NULL;
|
|
struct can_rx_offload_cb *cb;
|
|
struct can_frame *cf;
|
|
int ret;
|
|
|
|
- /* If queue is full or skb not available, read to discard mailbox */
|
|
- if (likely(skb_queue_len(&offload->skb_queue) <=
|
|
- offload->skb_queue_len_max))
|
|
+ if (likely(skb_queue_len(&offload->skb_queue) <
|
|
+ offload->skb_queue_len_max)) {
|
|
skb = alloc_can_skb(offload->dev, &cf);
|
|
+ if (unlikely(!skb))
|
|
+ skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
|
|
+ } else {
|
|
+ skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
|
|
+ }
|
|
|
|
- if (!skb) {
|
|
+ /* If queue is full or skb not available, drop by reading into
|
|
+ * overflow buffer.
|
|
+ */
|
|
+ if (unlikely(skb_error)) {
|
|
struct can_frame cf_overflow;
|
|
u32 timestamp;
|
|
|
|
ret = offload->mailbox_read(offload, &cf_overflow,
|
|
×tamp, n);
|
|
- if (ret)
|
|
- offload->dev->stats.rx_dropped++;
|
|
|
|
- return NULL;
|
|
+ /* Mailbox was empty. */
|
|
+ if (unlikely(!ret))
|
|
+ return NULL;
|
|
+
|
|
+ /* Mailbox has been read and we're dropping it or
|
|
+ * there was a problem reading the mailbox.
|
|
+ *
|
|
+ * Increment error counters in any case.
|
|
+ */
|
|
+ offload->dev->stats.rx_dropped++;
|
|
+ offload->dev->stats.rx_fifo_errors++;
|
|
+
|
|
+ /* There was a problem reading the mailbox, propagate
|
|
+ * error value.
|
|
+ */
|
|
+ if (unlikely(ret < 0))
|
|
+ return ERR_PTR(ret);
|
|
+
|
|
+ return skb_error;
|
|
}
|
|
|
|
cb = can_rx_offload_get_cb(skb);
|
|
ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
|
|
- if (!ret) {
|
|
+
|
|
+ /* Mailbox was empty. */
|
|
+ if (unlikely(!ret)) {
|
|
kfree_skb(skb);
|
|
return NULL;
|
|
}
|
|
|
|
+ /* There was a problem reading the mailbox, propagate error value. */
|
|
+ if (unlikely(ret < 0)) {
|
|
+ kfree_skb(skb);
|
|
+
|
|
+ offload->dev->stats.rx_dropped++;
|
|
+ offload->dev->stats.rx_fifo_errors++;
|
|
+
|
|
+ return ERR_PTR(ret);
|
|
+ }
|
|
+
|
|
+ /* Mailbox was read. */
|
|
return skb;
|
|
}
|
|
|
|
@@ -166,8 +224,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
|
|
continue;
|
|
|
|
skb = can_rx_offload_offload_one(offload, i);
|
|
- if (!skb)
|
|
- break;
|
|
+ if (IS_ERR_OR_NULL(skb))
|
|
+ continue;
|
|
|
|
__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
|
|
}
|
|
@@ -197,7 +255,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
|
|
struct sk_buff *skb;
|
|
int received = 0;
|
|
|
|
- while ((skb = can_rx_offload_offload_one(offload, 0))) {
|
|
+ while (1) {
|
|
+ skb = can_rx_offload_offload_one(offload, 0);
|
|
+ if (IS_ERR(skb))
|
|
+ continue;
|
|
+ if (!skb)
|
|
+ break;
|
|
+
|
|
skb_queue_tail(&offload->skb_queue, skb);
|
|
received++;
|
|
}
|
|
@@ -261,8 +325,10 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
|
|
struct sk_buff *skb)
|
|
{
|
|
if (skb_queue_len(&offload->skb_queue) >
|
|
- offload->skb_queue_len_max)
|
|
- return -ENOMEM;
|
|
+ offload->skb_queue_len_max) {
|
|
+ kfree_skb(skb);
|
|
+ return -ENOBUFS;
|
|
+ }
|
|
|
|
skb_queue_tail(&offload->skb_queue, skb);
|
|
can_rx_offload_schedule(offload);
|
|
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
|
|
index e626c2afbbb1..0e1fc6c4360e 100644
|
|
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
|
|
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
|
|
@@ -441,8 +441,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
|
|
}
|
|
if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
|
|
/* no error (back to active state) */
|
|
- mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
|
|
- return 0;
|
|
+ new_state = CAN_STATE_ERROR_ACTIVE;
|
|
+ break;
|
|
}
|
|
break;
|
|
|
|
@@ -465,9 +465,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
|
|
}
|
|
|
|
if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
|
|
- /* no error (back to active state) */
|
|
- mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
|
|
- return 0;
|
|
+ /* no error (back to warning state) */
|
|
+ new_state = CAN_STATE_ERROR_WARNING;
|
|
+ break;
|
|
}
|
|
break;
|
|
|
|
@@ -506,6 +506,11 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
|
|
mc->pdev->dev.can.can_stats.error_warning++;
|
|
break;
|
|
|
|
+ case CAN_STATE_ERROR_ACTIVE:
|
|
+ cf->can_id |= CAN_ERR_CRTL;
|
|
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
|
|
+ break;
|
|
+
|
|
default:
|
|
/* CAN_STATE_MAX (trick to handle other errors) */
|
|
cf->can_id |= CAN_ERR_CRTL;
|
|
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
|
|
index af666951a959..94ad2fdd6ef0 100644
|
|
--- a/drivers/net/dsa/bcm_sf2.c
|
|
+++ b/drivers/net/dsa/bcm_sf2.c
|
|
@@ -432,11 +432,10 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
|
|
* send them to our master MDIO bus controller
|
|
*/
|
|
if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
|
|
- bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
|
|
+ return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
|
|
else
|
|
- mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val);
|
|
-
|
|
- return 0;
|
|
+ return mdiobus_write_nested(priv->master_mii_bus, addr,
|
|
+ regnum, val);
|
|
}
|
|
|
|
static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
|
|
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
|
|
index 4f7e195af0bc..0d08039981b5 100644
|
|
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
|
|
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
|
|
@@ -472,7 +472,9 @@ static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
|
|
{
|
|
struct atl1e_adapter *adapter = netdev_priv(netdev);
|
|
|
|
- atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
|
|
+ if (atl1e_write_phy_reg(&adapter->hw,
|
|
+ reg_num & MDIO_REG_ADDR_MASK, val))
|
|
+ netdev_err(netdev, "write phy register failed\n");
|
|
}
|
|
|
|
static int atl1e_mii_ioctl(struct net_device *netdev,
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
|
|
index a22336fef66b..fc8e185718a1 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
|
|
@@ -1339,14 +1339,22 @@ static int bnxt_flash_nvram(struct net_device *dev,
|
|
rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
|
|
dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
|
|
|
|
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
|
|
+ netdev_info(dev,
|
|
+ "PF does not have admin privileges to flash the device\n");
|
|
+ rc = -EACCES;
|
|
+ } else if (rc) {
|
|
+ rc = -EIO;
|
|
+ }
|
|
return rc;
|
|
}
|
|
|
|
static int bnxt_firmware_reset(struct net_device *dev,
|
|
u16 dir_type)
|
|
{
|
|
- struct bnxt *bp = netdev_priv(dev);
|
|
struct hwrm_fw_reset_input req = {0};
|
|
+ struct bnxt *bp = netdev_priv(dev);
|
|
+ int rc;
|
|
|
|
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
|
|
|
|
@@ -1380,7 +1388,15 @@ static int bnxt_firmware_reset(struct net_device *dev,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
|
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
|
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
|
|
+ netdev_info(dev,
|
|
+ "PF does not have admin privileges to reset the device\n");
|
|
+ rc = -EACCES;
|
|
+ } else if (rc) {
|
|
+ rc = -EIO;
|
|
+ }
|
|
+ return rc;
|
|
}
|
|
|
|
static int bnxt_flash_firmware(struct net_device *dev,
|
|
@@ -1587,9 +1603,9 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
|
|
struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
|
|
struct hwrm_nvm_install_update_input install = {0};
|
|
const struct firmware *fw;
|
|
+ int rc, hwrm_err = 0;
|
|
u32 item_len;
|
|
u16 index;
|
|
- int rc;
|
|
|
|
bnxt_hwrm_fw_set_time(bp);
|
|
|
|
@@ -1632,15 +1648,16 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
|
|
memcpy(kmem, fw->data, fw->size);
|
|
modify.host_src_addr = cpu_to_le64(dma_handle);
|
|
|
|
- rc = hwrm_send_message(bp, &modify, sizeof(modify),
|
|
- FLASH_PACKAGE_TIMEOUT);
|
|
+ hwrm_err = hwrm_send_message(bp, &modify,
|
|
+ sizeof(modify),
|
|
+ FLASH_PACKAGE_TIMEOUT);
|
|
dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
|
|
dma_handle);
|
|
}
|
|
}
|
|
release_firmware(fw);
|
|
- if (rc)
|
|
- return rc;
|
|
+ if (rc || hwrm_err)
|
|
+ goto err_exit;
|
|
|
|
if ((install_type & 0xffff) == 0)
|
|
install_type >>= 16;
|
|
@@ -1648,12 +1665,10 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
|
|
install.install_type = cpu_to_le32(install_type);
|
|
|
|
mutex_lock(&bp->hwrm_cmd_lock);
|
|
- rc = _hwrm_send_message(bp, &install, sizeof(install),
|
|
- INSTALL_PACKAGE_TIMEOUT);
|
|
- if (rc) {
|
|
- rc = -EOPNOTSUPP;
|
|
+ hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
|
|
+ INSTALL_PACKAGE_TIMEOUT);
|
|
+ if (hwrm_err)
|
|
goto flash_pkg_exit;
|
|
- }
|
|
|
|
if (resp->error_code) {
|
|
u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
|
|
@@ -1661,12 +1676,11 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
|
|
if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
|
|
install.flags |= cpu_to_le16(
|
|
NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
|
|
- rc = _hwrm_send_message(bp, &install, sizeof(install),
|
|
- INSTALL_PACKAGE_TIMEOUT);
|
|
- if (rc) {
|
|
- rc = -EOPNOTSUPP;
|
|
+ hwrm_err = _hwrm_send_message(bp, &install,
|
|
+ sizeof(install),
|
|
+ INSTALL_PACKAGE_TIMEOUT);
|
|
+ if (hwrm_err)
|
|
goto flash_pkg_exit;
|
|
- }
|
|
}
|
|
}
|
|
|
|
@@ -1677,6 +1691,14 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
|
|
}
|
|
flash_pkg_exit:
|
|
mutex_unlock(&bp->hwrm_cmd_lock);
|
|
+err_exit:
|
|
+ if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
|
|
+ netdev_info(dev,
|
|
+ "PF does not have admin privileges to flash the device\n");
|
|
+ rc = -EACCES;
|
|
+ } else if (hwrm_err) {
|
|
+ rc = -EOPNOTSUPP;
|
|
+ }
|
|
return rc;
|
|
}
|
|
|
|
@@ -2236,17 +2258,37 @@ static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
|
|
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
|
}
|
|
|
|
+static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
|
|
+{
|
|
+ struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
|
|
+ struct hwrm_port_phy_qcaps_input req = {0};
|
|
+ int rc;
|
|
+
|
|
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
|
|
+ mutex_lock(&bp->hwrm_cmd_lock);
|
|
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
|
+ if (!rc)
|
|
+ *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
|
|
+
|
|
+ mutex_unlock(&bp->hwrm_cmd_lock);
|
|
+ return rc;
|
|
+}
|
|
+
|
|
static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
|
|
struct hwrm_port_phy_cfg_input *req)
|
|
{
|
|
struct bnxt_link_info *link_info = &bp->link_info;
|
|
- u16 fw_advertising = link_info->advertising;
|
|
+ u16 fw_advertising;
|
|
u16 fw_speed;
|
|
int rc;
|
|
|
|
if (!link_info->autoneg)
|
|
return 0;
|
|
|
|
+ rc = bnxt_query_force_speeds(bp, &fw_advertising);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+
|
|
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
|
|
if (netif_carrier_ok(bp->dev))
|
|
fw_speed = bp->link_info.link_speed;
|
|
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
|
|
index b6af286fa5c7..3e3044fe3206 100644
|
|
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
|
|
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
|
|
@@ -2611,8 +2611,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
|
|
}
|
|
|
|
if (status & UMAC_IRQ_PHY_DET_R &&
|
|
- priv->dev->phydev->autoneg != AUTONEG_ENABLE)
|
|
+ priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
|
|
phy_init_hw(priv->dev->phydev);
|
|
+ genphy_config_aneg(priv->dev->phydev);
|
|
+ }
|
|
|
|
/* Link UP/DOWN event */
|
|
if (status & UMAC_IRQ_LINK_EVENT)
|
|
@@ -3688,6 +3690,7 @@ static int bcmgenet_resume(struct device *d)
|
|
|
|
phy_init_hw(priv->phydev);
|
|
/* Speed settings must be restored */
|
|
+ genphy_config_aneg(dev->phydev);
|
|
bcmgenet_mii_config(priv->dev, false);
|
|
|
|
/* disable ethernet MAC while updating its registers */
|
|
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
|
|
index c93f3a2dc6c1..4c0bcfd1d250 100644
|
|
--- a/drivers/net/ethernet/cadence/macb.h
|
|
+++ b/drivers/net/ethernet/cadence/macb.h
|
|
@@ -457,7 +457,11 @@
|
|
|
|
/* Bitfields in TISUBN */
|
|
#define GEM_SUBNSINCR_OFFSET 0
|
|
-#define GEM_SUBNSINCR_SIZE 16
|
|
+#define GEM_SUBNSINCRL_OFFSET 24
|
|
+#define GEM_SUBNSINCRL_SIZE 8
|
|
+#define GEM_SUBNSINCRH_OFFSET 0
|
|
+#define GEM_SUBNSINCRH_SIZE 16
|
|
+#define GEM_SUBNSINCR_SIZE 24
|
|
|
|
/* Bitfields in TI */
|
|
#define GEM_NSINCR_OFFSET 0
|
|
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
|
|
index 2287749de087..5aff1b460151 100644
|
|
--- a/drivers/net/ethernet/cadence/macb_main.c
|
|
+++ b/drivers/net/ethernet/cadence/macb_main.c
|
|
@@ -852,7 +852,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
|
|
|
|
/* First, update TX stats if needed */
|
|
if (skb) {
|
|
- if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
|
|
+ if (unlikely(skb_shinfo(skb)->tx_flags &
|
|
+ SKBTX_HW_TSTAMP) &&
|
|
+ gem_ptp_do_txstamp(queue, skb, desc) == 0) {
|
|
/* skb now belongs to timestamp buffer
|
|
* and will be removed later
|
|
*/
|
|
@@ -2822,7 +2824,7 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
|
|
if (!err)
|
|
err = -ENODEV;
|
|
|
|
- dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
|
|
+ dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
|
|
return err;
|
|
}
|
|
|
|
@@ -2831,7 +2833,7 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
|
|
if (!err)
|
|
err = -ENODEV;
|
|
|
|
- dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
|
|
+ dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
|
|
return err;
|
|
}
|
|
|
|
@@ -2845,25 +2847,25 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
|
|
|
|
err = clk_prepare_enable(*pclk);
|
|
if (err) {
|
|
- dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
|
|
+ dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
|
|
return err;
|
|
}
|
|
|
|
err = clk_prepare_enable(*hclk);
|
|
if (err) {
|
|
- dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
|
|
+ dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
|
|
goto err_disable_pclk;
|
|
}
|
|
|
|
err = clk_prepare_enable(*tx_clk);
|
|
if (err) {
|
|
- dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
|
|
+ dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
|
|
goto err_disable_hclk;
|
|
}
|
|
|
|
err = clk_prepare_enable(*rx_clk);
|
|
if (err) {
|
|
- dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
|
|
+ dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
|
|
goto err_disable_txclk;
|
|
}
|
|
|
|
@@ -3298,7 +3300,7 @@ static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
|
|
|
|
err = clk_prepare_enable(*pclk);
|
|
if (err) {
|
|
- dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
|
|
+ dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
|
|
index 678835136bf8..f1f07e9d53f8 100755
|
|
--- a/drivers/net/ethernet/cadence/macb_ptp.c
|
|
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
|
|
@@ -115,7 +115,10 @@ static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec)
|
|
* to take effect.
|
|
*/
|
|
spin_lock_irqsave(&bp->tsu_clk_lock, flags);
|
|
- gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, incr_spec->sub_ns));
|
|
+ /* RegBit[15:0] = Subns[23:8]; RegBit[31:24] = Subns[7:0] */
|
|
+ gem_writel(bp, TISUBN, GEM_BF(SUBNSINCRL, incr_spec->sub_ns) |
|
|
+ GEM_BF(SUBNSINCRH, (incr_spec->sub_ns >>
|
|
+ GEM_SUBNSINCRL_SIZE)));
|
|
gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns));
|
|
spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
|
|
|
|
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
|
|
index 0ae6532b02e0..62bc19bedb06 100644
|
|
--- a/drivers/net/ethernet/freescale/fec_main.c
|
|
+++ b/drivers/net/ethernet/freescale/fec_main.c
|
|
@@ -3565,6 +3565,11 @@ fec_drv_remove(struct platform_device *pdev)
|
|
struct net_device *ndev = platform_get_drvdata(pdev);
|
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
|
struct device_node *np = pdev->dev.of_node;
|
|
+ int ret;
|
|
+
|
|
+ ret = pm_runtime_get_sync(&pdev->dev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
|
|
cancel_work_sync(&fep->tx_timeout_work);
|
|
fec_ptp_stop(pdev);
|
|
@@ -3572,13 +3577,17 @@ fec_drv_remove(struct platform_device *pdev)
|
|
fec_enet_mii_remove(fep);
|
|
if (fep->reg_phy)
|
|
regulator_disable(fep->reg_phy);
|
|
- pm_runtime_put(&pdev->dev);
|
|
- pm_runtime_disable(&pdev->dev);
|
|
+
|
|
if (of_phy_is_fixed_link(np))
|
|
of_phy_deregister_fixed_link(np);
|
|
of_node_put(fep->phy_node);
|
|
free_netdev(ndev);
|
|
|
|
+ clk_disable_unprepare(fep->clk_ahb);
|
|
+ clk_disable_unprepare(fep->clk_ipg);
|
|
+ pm_runtime_put_noidle(&pdev->dev);
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
|
|
index 8b511e6e0ce9..396ea0db7102 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
|
|
@@ -251,6 +251,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
|
|
if ((enum hclge_cmd_return_status)desc_ret ==
|
|
HCLGE_CMD_EXEC_SUCCESS)
|
|
retval = 0;
|
|
+ else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
|
|
+ retval = -EOPNOTSUPP;
|
|
else
|
|
retval = -EIO;
|
|
hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
|
|
index 758cf3948131..3823ae6303ad 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
|
|
@@ -52,7 +52,7 @@ struct hclge_cmq_ring {
|
|
enum hclge_cmd_return_status {
|
|
HCLGE_CMD_EXEC_SUCCESS = 0,
|
|
HCLGE_CMD_NO_AUTH = 1,
|
|
- HCLGE_CMD_NOT_EXEC = 2,
|
|
+ HCLGE_CMD_NOT_SUPPORTED = 2,
|
|
HCLGE_CMD_QUEUE_FULL = 3,
|
|
};
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
index 97874c2568fc..1ac0e173da12 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
@@ -838,11 +838,9 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
|
|
|
priv->numa_node = dev_to_node(&dev->pdev->dev);
|
|
|
|
- priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
|
|
- if (!priv->dbg_root) {
|
|
- dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
|
|
- return -ENOMEM;
|
|
- }
|
|
+ if (mlx5_debugfs_root)
|
|
+ priv->dbg_root =
|
|
+ debugfs_create_dir(pci_name(pdev), mlx5_debugfs_root);
|
|
|
|
err = mlx5_pci_enable_device(dev);
|
|
if (err) {
|
|
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
|
|
index 09352ee43b55..2d92a9fe4606 100644
|
|
--- a/drivers/net/ethernet/sfc/ef10.c
|
|
+++ b/drivers/net/ethernet/sfc/ef10.c
|
|
@@ -5852,22 +5852,25 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
|
|
{ NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
|
|
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
|
|
};
|
|
+#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types)
|
|
|
|
static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
|
|
struct efx_mcdi_mtd_partition *part,
|
|
- unsigned int type)
|
|
+ unsigned int type,
|
|
+ unsigned long *found)
|
|
{
|
|
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
|
|
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
|
|
const struct efx_ef10_nvram_type_info *info;
|
|
size_t size, erase_size, outlen;
|
|
+ int type_idx = 0;
|
|
bool protected;
|
|
int rc;
|
|
|
|
- for (info = efx_ef10_nvram_types; ; info++) {
|
|
- if (info ==
|
|
- efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
|
|
+ for (type_idx = 0; ; type_idx++) {
|
|
+ if (type_idx == EF10_NVRAM_PARTITION_COUNT)
|
|
return -ENODEV;
|
|
+ info = efx_ef10_nvram_types + type_idx;
|
|
if ((type & ~info->type_mask) == info->type)
|
|
break;
|
|
}
|
|
@@ -5880,6 +5883,13 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
|
|
if (protected)
|
|
return -ENODEV; /* hide it */
|
|
|
|
+ /* If we've already exposed a partition of this type, hide this
|
|
+ * duplicate. All operations on MTDs are keyed by the type anyway,
|
|
+ * so we can't act on the duplicate.
|
|
+ */
|
|
+ if (__test_and_set_bit(type_idx, found))
|
|
+ return -EEXIST;
|
|
+
|
|
part->nvram_type = type;
|
|
|
|
MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
|
|
@@ -5908,6 +5918,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
|
|
static int efx_ef10_mtd_probe(struct efx_nic *efx)
|
|
{
|
|
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
|
|
+ DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
|
|
struct efx_mcdi_mtd_partition *parts;
|
|
size_t outlen, n_parts_total, i, n_parts;
|
|
unsigned int type;
|
|
@@ -5936,11 +5947,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
|
|
for (i = 0; i < n_parts_total; i++) {
|
|
type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
|
|
i);
|
|
- rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
|
|
- if (rc == 0)
|
|
- n_parts++;
|
|
- else if (rc != -ENODEV)
|
|
+ rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
|
|
+ found);
|
|
+ if (rc == -EEXIST || rc == -ENODEV)
|
|
+ continue;
|
|
+ if (rc)
|
|
goto fail;
|
|
+ n_parts++;
|
|
}
|
|
|
|
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
|
|
index d07520fb969e..62ccbd47c1db 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
|
|
@@ -59,7 +59,9 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
|
|
gmac->clk_enabled = 1;
|
|
} else {
|
|
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
|
|
- clk_prepare(gmac->tx_clk);
|
|
+ ret = clk_prepare(gmac->tx_clk);
|
|
+ if (ret)
|
|
+ return ret;
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
|
|
index e7b76f6b4f67..7d1281d81248 100644
|
|
--- a/drivers/net/ethernet/ti/cpts.c
|
|
+++ b/drivers/net/ethernet/ti/cpts.c
|
|
@@ -567,7 +567,9 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
|
|
return ERR_PTR(PTR_ERR(cpts->refclk));
|
|
}
|
|
|
|
- clk_prepare(cpts->refclk);
|
|
+ ret = clk_prepare(cpts->refclk);
|
|
+ if (ret)
|
|
+ return ERR_PTR(ret);
|
|
|
|
cpts->cc.read = cpts_systim_read;
|
|
cpts->cc.mask = CLOCKSOURCE_MASK(32);
|
|
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
|
|
index 963a02c988e9..8d5f88a538fc 100644
|
|
--- a/drivers/net/macvlan.c
|
|
+++ b/drivers/net/macvlan.c
|
|
@@ -363,10 +363,11 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
|
|
}
|
|
spin_unlock(&port->bc_queue.lock);
|
|
|
|
+ schedule_work(&port->bc_work);
|
|
+
|
|
if (err)
|
|
goto free_nskb;
|
|
|
|
- schedule_work(&port->bc_work);
|
|
return;
|
|
|
|
free_nskb:
|
|
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
|
|
index 2901b7db9d2e..d6dc00b4ba55 100644
|
|
--- a/drivers/net/slip/slip.c
|
|
+++ b/drivers/net/slip/slip.c
|
|
@@ -859,6 +859,7 @@ err_free_chan:
|
|
sl->tty = NULL;
|
|
tty->disc_data = NULL;
|
|
clear_bit(SLF_INUSE, &sl->flags);
|
|
+ sl_free_netdev(sl->dev);
|
|
free_netdev(sl->dev);
|
|
|
|
err_exit:
|
|
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
|
|
index 6d26bbd190dd..153a81ece9fe 100644
|
|
--- a/drivers/net/vxlan.c
|
|
+++ b/drivers/net/vxlan.c
|
|
@@ -3217,6 +3217,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
|
|
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
struct vxlan_fdb *f = NULL;
|
|
+ bool unregister = false;
|
|
int err;
|
|
|
|
err = vxlan_dev_configure(net, dev, conf, false, extack);
|
|
@@ -3242,12 +3243,11 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
|
|
err = register_netdevice(dev);
|
|
if (err)
|
|
goto errout;
|
|
+ unregister = true;
|
|
|
|
err = rtnl_configure_link(dev, NULL);
|
|
- if (err) {
|
|
- unregister_netdevice(dev);
|
|
+ if (err)
|
|
goto errout;
|
|
- }
|
|
|
|
/* notify default fdb entry */
|
|
if (f)
|
|
@@ -3255,9 +3255,16 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
|
|
|
|
list_add(&vxlan->next, &vn->vxlan_list);
|
|
return 0;
|
|
+
|
|
errout:
|
|
+ /* unregister_netdevice() destroys the default FDB entry with deletion
|
|
+ * notification. But the addition notification was not sent yet, so
|
|
+ * destroy the entry by hand here.
|
|
+ */
|
|
if (f)
|
|
vxlan_fdb_destroy(vxlan, f, false);
|
|
+ if (unregister)
|
|
+ unregister_netdevice(dev);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
|
|
index 18b648648adb..289dff262948 100644
|
|
--- a/drivers/net/wan/fsl_ucc_hdlc.c
|
|
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
|
|
@@ -1114,7 +1114,6 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
|
|
if (register_hdlc_device(dev)) {
|
|
ret = -ENOBUFS;
|
|
pr_err("ucc_hdlc: unable to register hdlc device\n");
|
|
- free_netdev(dev);
|
|
goto free_dev;
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
|
|
index 414b5b596efc..37deb9bae364 100644
|
|
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
|
|
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
|
|
@@ -939,7 +939,7 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
|
|
else
|
|
ssid_list[i].flag = ANY_SSID_FLAG;
|
|
|
|
- if (n_match_ssid == 0)
|
|
+ if (ar->wiphy->max_match_sets != 0 && n_match_ssid == 0)
|
|
ssid_list[i].flag |= MATCH_SSID_FLAG;
|
|
}
|
|
|
|
@@ -1093,7 +1093,7 @@ void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
|
|
if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
|
|
for (i = 0; i < vif->scan_req->n_ssids; i++) {
|
|
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
|
|
- i + 1, DISABLE_SSID_FLAG,
|
|
+ i, DISABLE_SSID_FLAG,
|
|
0, NULL);
|
|
}
|
|
}
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
|
|
index 2acd94da9efe..051a2fea9572 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
|
|
@@ -1229,6 +1229,23 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
|
|
return 0;
|
|
}
|
|
|
|
+static int iwl_nvm_check_version(struct iwl_nvm_data *data,
|
|
+ struct iwl_trans *trans)
|
|
+{
|
|
+ if (data->nvm_version >= trans->cfg->nvm_ver ||
|
|
+ data->calib_version >= trans->cfg->nvm_calib_ver) {
|
|
+ IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
|
|
+ data->nvm_version, data->calib_version);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ IWL_ERR(trans,
|
|
+ "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
|
|
+ data->nvm_version, trans->cfg->nvm_ver,
|
|
+ data->calib_version, trans->cfg->nvm_calib_ver);
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|
const struct iwl_cfg *cfg,
|
|
const struct iwl_fw *fw,
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
|
|
index 3199d345b427..92727f7e42db 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
|
|
@@ -928,22 +928,3 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
|
|
return NULL;
|
|
}
|
|
IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
|
|
-
|
|
-/* helper functions */
|
|
-int iwl_nvm_check_version(struct iwl_nvm_data *data,
|
|
- struct iwl_trans *trans)
|
|
-{
|
|
- if (data->nvm_version >= trans->cfg->nvm_ver ||
|
|
- data->calib_version >= trans->cfg->nvm_calib_ver) {
|
|
- IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
|
|
- data->nvm_version, data->calib_version);
|
|
- return 0;
|
|
- }
|
|
-
|
|
- IWL_ERR(trans,
|
|
- "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
|
|
- data->nvm_version, trans->cfg->nvm_ver,
|
|
- data->calib_version, trans->cfg->nvm_calib_ver);
|
|
- return -EINVAL;
|
|
-}
|
|
-IWL_EXPORT_SYMBOL(iwl_nvm_check_version);
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
|
|
index b33888991b94..5545210151cd 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
|
|
@@ -7,6 +7,7 @@
|
|
*
|
|
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
|
|
* Copyright(c) 2015 Intel Mobile Communications GmbH
|
|
+ * Copyright (C) 2018 Intel Corporation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
@@ -33,6 +34,7 @@
|
|
*
|
|
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
|
* Copyright(c) 2015 Intel Mobile Communications GmbH
|
|
+ * Copyright (C) 2018 Intel Corporation
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
@@ -121,9 +123,6 @@ struct iwl_nvm_data *
|
|
iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
|
|
const u8 *eeprom, size_t eeprom_size);
|
|
|
|
-int iwl_nvm_check_version(struct iwl_nvm_data *data,
|
|
- struct iwl_trans *trans);
|
|
-
|
|
int iwl_init_sband_channels(struct iwl_nvm_data *data,
|
|
struct ieee80211_supported_band *sband,
|
|
int n_channels, enum nl80211_band band);
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
|
|
index 534c0ea7b232..78228f870f8f 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
|
|
@@ -501,7 +501,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
|
if (mvm->nvm_file_name)
|
|
iwl_mvm_load_nvm_to_nic(mvm);
|
|
|
|
- WARN_ON(iwl_nvm_check_version(mvm->nvm_data, mvm->trans));
|
|
+ WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
|
|
+ "Too old NVM version (0x%0x, required = 0x%0x)",
|
|
+ mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
|
|
|
|
/*
|
|
* abort after reading the nvm in case RF Kill is on, we will complete
|
|
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
|
|
index 6f4239be609d..49ca84ef1a99 100644
|
|
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
|
|
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
|
|
@@ -296,15 +296,13 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
|
|
"total samples = %d\n",
|
|
atomic_read(&phist_data->num_samples));
|
|
|
|
- p += sprintf(p, "rx rates (in Mbps): 0=1M 1=2M");
|
|
- p += sprintf(p, "2=5.5M 3=11M 4=6M 5=9M 6=12M\n");
|
|
- p += sprintf(p, "7=18M 8=24M 9=36M 10=48M 11=54M");
|
|
- p += sprintf(p, "12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n");
|
|
+ p += sprintf(p,
|
|
+ "rx rates (in Mbps): 0=1M 1=2M 2=5.5M 3=11M 4=6M 5=9M 6=12M\n"
|
|
+ "7=18M 8=24M 9=36M 10=48M 11=54M 12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n");
|
|
|
|
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info)) {
|
|
- p += sprintf(p, "44-53=MCS0-9(VHT:BW20)");
|
|
- p += sprintf(p, "54-63=MCS0-9(VHT:BW40)");
|
|
- p += sprintf(p, "64-73=MCS0-9(VHT:BW80)\n\n");
|
|
+ p += sprintf(p,
|
|
+ "44-53=MCS0-9(VHT:BW20) 54-63=MCS0-9(VHT:BW40) 64-73=MCS0-9(VHT:BW80)\n\n");
|
|
} else {
|
|
p += sprintf(p, "\n");
|
|
}
|
|
@@ -333,7 +331,7 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
|
|
for (i = 0; i < MWIFIEX_MAX_NOISE_FLR; i++) {
|
|
value = atomic_read(&phist_data->noise_flr[i]);
|
|
if (value)
|
|
- p += sprintf(p, "noise_flr[-%02ddBm] = %d\n",
|
|
+ p += sprintf(p, "noise_flr[%02ddBm] = %d\n",
|
|
(int)(i-128), value);
|
|
}
|
|
for (i = 0; i < MWIFIEX_MAX_SIG_STRENGTH; i++) {
|
|
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
|
|
index 67c334221077..c013c94fbf15 100644
|
|
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
|
|
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
|
|
@@ -1901,15 +1901,17 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
|
|
ETH_ALEN))
|
|
mwifiex_update_curr_bss_params(priv,
|
|
bss);
|
|
- cfg80211_put_bss(priv->wdev.wiphy, bss);
|
|
- }
|
|
|
|
- if ((chan->flags & IEEE80211_CHAN_RADAR) ||
|
|
- (chan->flags & IEEE80211_CHAN_NO_IR)) {
|
|
- mwifiex_dbg(adapter, INFO,
|
|
- "radar or passive channel %d\n",
|
|
- channel);
|
|
- mwifiex_save_hidden_ssid_channels(priv, bss);
|
|
+ if ((chan->flags & IEEE80211_CHAN_RADAR) ||
|
|
+ (chan->flags & IEEE80211_CHAN_NO_IR)) {
|
|
+ mwifiex_dbg(adapter, INFO,
|
|
+ "radar or passive channel %d\n",
|
|
+ channel);
|
|
+ mwifiex_save_hidden_ssid_channels(priv,
|
|
+ bss);
|
|
+ }
|
|
+
|
|
+ cfg80211_put_bss(priv->wdev.wiphy, bss);
|
|
}
|
|
}
|
|
} else {
|
|
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
|
|
index 9a1d15b3ce45..518caaaf8a98 100644
|
|
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
|
|
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
|
|
@@ -444,12 +444,13 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
|
|
skb_queue_tail(&priv->rx_queue, skb);
|
|
usb_anchor_urb(entry, &priv->anchored);
|
|
ret = usb_submit_urb(entry, GFP_KERNEL);
|
|
- usb_put_urb(entry);
|
|
if (ret) {
|
|
skb_unlink(skb, &priv->rx_queue);
|
|
usb_unanchor_urb(entry);
|
|
+ usb_put_urb(entry);
|
|
goto err;
|
|
}
|
|
+ usb_put_urb(entry);
|
|
}
|
|
return ret;
|
|
|
|
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
|
|
index 536e9a5cd2b1..d66ef88e13cf 100644
|
|
--- a/drivers/pci/msi.c
|
|
+++ b/drivers/pci/msi.c
|
|
@@ -1156,7 +1156,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
|
|
const struct irq_affinity *affd)
|
|
{
|
|
static const struct irq_affinity msi_default_affd;
|
|
- int vecs = -ENOSPC;
|
|
+ int msix_vecs = -ENOSPC;
|
|
+ int msi_vecs = -ENOSPC;
|
|
|
|
if (flags & PCI_IRQ_AFFINITY) {
|
|
if (!affd)
|
|
@@ -1167,16 +1168,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
|
|
}
|
|
|
|
if (flags & PCI_IRQ_MSIX) {
|
|
- vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
|
|
- affd);
|
|
- if (vecs > 0)
|
|
- return vecs;
|
|
+ msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs,
|
|
+ max_vecs, affd);
|
|
+ if (msix_vecs > 0)
|
|
+ return msix_vecs;
|
|
}
|
|
|
|
if (flags & PCI_IRQ_MSI) {
|
|
- vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
|
|
- if (vecs > 0)
|
|
- return vecs;
|
|
+ msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs,
|
|
+ affd);
|
|
+ if (msi_vecs > 0)
|
|
+ return msi_vecs;
|
|
}
|
|
|
|
/* use legacy irq if allowed */
|
|
@@ -1187,7 +1189,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
|
|
}
|
|
}
|
|
|
|
- return vecs;
|
|
+ if (msix_vecs == -ENOSPC)
|
|
+ return -ENOSPC;
|
|
+ return msi_vecs;
|
|
}
|
|
EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
|
|
|
|
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
|
|
index f9e98a7d4f0c..1b0c5958c56a 100644
|
|
--- a/drivers/pinctrl/pinctrl-xway.c
|
|
+++ b/drivers/pinctrl/pinctrl-xway.c
|
|
@@ -1748,14 +1748,6 @@ static int pinmux_xway_probe(struct platform_device *pdev)
|
|
}
|
|
xway_pctrl_desc.pins = xway_info.pads;
|
|
|
|
- /* register the gpio chip */
|
|
- xway_chip.parent = &pdev->dev;
|
|
- ret = devm_gpiochip_add_data(&pdev->dev, &xway_chip, NULL);
|
|
- if (ret) {
|
|
- dev_err(&pdev->dev, "Failed to register gpio chip\n");
|
|
- return ret;
|
|
- }
|
|
-
|
|
/* setup the data needed by pinctrl */
|
|
xway_pctrl_desc.name = dev_name(&pdev->dev);
|
|
xway_pctrl_desc.npins = xway_chip.ngpio;
|
|
@@ -1777,10 +1769,33 @@ static int pinmux_xway_probe(struct platform_device *pdev)
|
|
return ret;
|
|
}
|
|
|
|
- /* finish with registering the gpio range in pinctrl */
|
|
- xway_gpio_range.npins = xway_chip.ngpio;
|
|
- xway_gpio_range.base = xway_chip.base;
|
|
- pinctrl_add_gpio_range(xway_info.pctrl, &xway_gpio_range);
|
|
+ /* register the gpio chip */
|
|
+ xway_chip.parent = &pdev->dev;
|
|
+ xway_chip.owner = THIS_MODULE;
|
|
+ xway_chip.of_node = pdev->dev.of_node;
|
|
+ ret = devm_gpiochip_add_data(&pdev->dev, &xway_chip, NULL);
|
|
+ if (ret) {
|
|
+ dev_err(&pdev->dev, "Failed to register gpio chip\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * For DeviceTree-supported systems, the gpio core checks the
|
|
+ * pinctrl's device node for the "gpio-ranges" property.
|
|
+ * If it is present, it takes care of adding the pin ranges
|
|
+ * for the driver. In this case the driver can skip ahead.
|
|
+ *
|
|
+ * In order to remain compatible with older, existing DeviceTree
|
|
+ * files which don't set the "gpio-ranges" property or systems that
|
|
+ * utilize ACPI the driver has to call gpiochip_add_pin_range().
|
|
+ */
|
|
+ if (!of_property_read_bool(pdev->dev.of_node, "gpio-ranges")) {
|
|
+ /* finish with registering the gpio range in pinctrl */
|
|
+ xway_gpio_range.npins = xway_chip.ngpio;
|
|
+ xway_gpio_range.base = xway_chip.base;
|
|
+ pinctrl_add_gpio_range(xway_info.pctrl, &xway_gpio_range);
|
|
+ }
|
|
+
|
|
dev_info(&pdev->dev, "Init done\n");
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
|
|
index 8070765311db..e1c34e19222e 100644
|
|
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
|
|
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
|
|
@@ -1716,6 +1716,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
|
|
},
|
|
|
|
{ PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4) {
|
|
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
PF12MD_000, PF12MD_001, 0, PF12MD_011,
|
|
PF12MD_100, PF12MD_101, 0, 0,
|
|
0, 0, 0, 0, 0, 0, 0, 0 }
|
|
@@ -1759,8 +1762,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
|
|
0, 0, 0, 0, 0, 0, 0, 0,
|
|
PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011,
|
|
PF1MD_100, PF1MD_101, 0, 0,
|
|
- 0, 0, 0, 0, 0, 0, 0, 0
|
|
- }
|
|
+ 0, 0, 0, 0, 0, 0, 0, 0,
|
|
+ PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011,
|
|
+ PF0MD_100, PF0MD_101, 0, 0,
|
|
+ 0, 0, 0, 0, 0, 0, 0, 0 }
|
|
},
|
|
|
|
{ PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1) {
|
|
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
|
|
index 6502e676d368..33232041ee86 100644
|
|
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
|
|
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
|
|
@@ -2213,22 +2213,22 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
|
|
/* IP10_22 [1] */
|
|
FN_CAN_CLK_A, FN_RX4_D,
|
|
/* IP10_21_19 [3] */
|
|
- FN_AUDIO_CLKOUT, FN_TX1_E, FN_HRTS0_C, FN_FSE_B,
|
|
- FN_LCD_M_DISP_B, 0, 0, 0,
|
|
+ FN_AUDIO_CLKOUT, FN_TX1_E, 0, FN_HRTS0_C, FN_FSE_B,
|
|
+ FN_LCD_M_DISP_B, 0, 0,
|
|
/* IP10_18_16 [3] */
|
|
- FN_AUDIO_CLKC, FN_SCK1_E, FN_HCTS0_C, FN_FRB_B,
|
|
- FN_LCD_VEPWC_B, 0, 0, 0,
|
|
+ FN_AUDIO_CLKC, FN_SCK1_E, 0, FN_HCTS0_C, FN_FRB_B,
|
|
+ FN_LCD_VEPWC_B, 0, 0,
|
|
/* IP10_15 [1] */
|
|
FN_AUDIO_CLKB_A, FN_LCD_CLK_B,
|
|
/* IP10_14_12 [3] */
|
|
FN_AUDIO_CLKA_A, FN_VI1_CLK_B, FN_SCK1_D, FN_IECLK_B,
|
|
FN_LCD_FLM_B, 0, 0, 0,
|
|
/* IP10_11_9 [3] */
|
|
- FN_SSI_SDATA3, FN_VI1_7_B, FN_HTX0_C, FN_FWE_B,
|
|
- FN_LCD_CL2_B, 0, 0, 0,
|
|
+ FN_SSI_SDATA3, FN_VI1_7_B, 0, FN_HTX0_C, FN_FWE_B,
|
|
+ FN_LCD_CL2_B, 0, 0,
|
|
/* IP10_8_6 [3] */
|
|
- FN_SSI_SDATA2, FN_VI1_6_B, FN_HRX0_C, FN_FRE_B,
|
|
- FN_LCD_CL1_B, 0, 0, 0,
|
|
+ FN_SSI_SDATA2, FN_VI1_6_B, 0, FN_HRX0_C, FN_FRE_B,
|
|
+ FN_LCD_CL1_B, 0, 0,
|
|
/* IP10_5_3 [3] */
|
|
FN_SSI_WS23, FN_VI1_5_B, FN_TX1_D, FN_HSCK0_C, FN_FALE_B,
|
|
FN_LCD_DON_B, 0, 0, 0,
|
|
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
|
|
index 50299ad96659..072bd11074c6 100644
|
|
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
|
|
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
|
|
@@ -403,7 +403,7 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
|
unsigned int num_configs;
|
|
bool has_config = 0;
|
|
unsigned reserve = 0;
|
|
- int num_pins, num_funcs, maps_per_pin, i, err;
|
|
+ int num_pins, num_funcs, maps_per_pin, i, err = 0;
|
|
|
|
pctl = pinctrl_dev_get_drvdata(pctldev);
|
|
|
|
@@ -430,41 +430,45 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
|
if (has_config && num_pins >= 1)
|
|
maps_per_pin++;
|
|
|
|
- if (!num_pins || !maps_per_pin)
|
|
- return -EINVAL;
|
|
+ if (!num_pins || !maps_per_pin) {
|
|
+ err = -EINVAL;
|
|
+ goto exit;
|
|
+ }
|
|
|
|
reserve = num_pins * maps_per_pin;
|
|
|
|
err = pinctrl_utils_reserve_map(pctldev, map,
|
|
reserved_maps, num_maps, reserve);
|
|
if (err)
|
|
- return err;
|
|
+ goto exit;
|
|
|
|
for (i = 0; i < num_pins; i++) {
|
|
err = of_property_read_u32_index(node, "pinmux",
|
|
i, &pinfunc);
|
|
if (err)
|
|
- return err;
|
|
+ goto exit;
|
|
|
|
pin = STM32_GET_PIN_NO(pinfunc);
|
|
func = STM32_GET_PIN_FUNC(pinfunc);
|
|
|
|
if (!stm32_pctrl_is_function_valid(pctl, pin, func)) {
|
|
dev_err(pctl->dev, "invalid function.\n");
|
|
- return -EINVAL;
|
|
+ err = -EINVAL;
|
|
+ goto exit;
|
|
}
|
|
|
|
grp = stm32_pctrl_find_group_by_pin(pctl, pin);
|
|
if (!grp) {
|
|
dev_err(pctl->dev, "unable to match pin %d to group\n",
|
|
pin);
|
|
- return -EINVAL;
|
|
+ err = -EINVAL;
|
|
+ goto exit;
|
|
}
|
|
|
|
err = stm32_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map,
|
|
reserved_maps, num_maps);
|
|
if (err)
|
|
- return err;
|
|
+ goto exit;
|
|
|
|
if (has_config) {
|
|
err = pinctrl_utils_add_map_configs(pctldev, map,
|
|
@@ -472,11 +476,13 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
|
configs, num_configs,
|
|
PIN_MAP_TYPE_CONFIGS_GROUP);
|
|
if (err)
|
|
- return err;
|
|
+ goto exit;
|
|
}
|
|
}
|
|
|
|
- return 0;
|
|
+exit:
|
|
+ kfree(configs);
|
|
+ return err;
|
|
}
|
|
|
|
static int stm32_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
|
|
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
|
|
index b4224389febe..d0ffdd5d9199 100644
|
|
--- a/drivers/platform/x86/hp-wmi.c
|
|
+++ b/drivers/platform/x86/hp-wmi.c
|
|
@@ -78,7 +78,7 @@ struct bios_args {
|
|
u32 command;
|
|
u32 commandtype;
|
|
u32 datasize;
|
|
- u32 data;
|
|
+ u8 data[128];
|
|
};
|
|
|
|
enum hp_wmi_commandtype {
|
|
@@ -229,7 +229,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
|
|
.command = command,
|
|
.commandtype = query,
|
|
.datasize = insize,
|
|
- .data = 0,
|
|
+ .data = { 0 },
|
|
};
|
|
struct acpi_buffer input = { sizeof(struct bios_args), &args };
|
|
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
|
|
@@ -241,7 +241,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
|
|
|
|
if (WARN_ON(insize > sizeof(args.data)))
|
|
return -EINVAL;
|
|
- memcpy(&args.data, buffer, insize);
|
|
+ memcpy(&args.data[0], buffer, insize);
|
|
|
|
wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output);
|
|
|
|
@@ -393,7 +393,7 @@ static int hp_wmi_rfkill2_refresh(void)
|
|
int err, i;
|
|
|
|
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
|
|
- 0, sizeof(state));
|
|
+ sizeof(state), sizeof(state));
|
|
if (err)
|
|
return err;
|
|
|
|
@@ -790,7 +790,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
|
|
int err, i;
|
|
|
|
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
|
|
- 0, sizeof(state));
|
|
+ sizeof(state), sizeof(state));
|
|
if (err)
|
|
return err < 0 ? err : -EINVAL;
|
|
|
|
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
|
|
index 974fd684bab2..4b6fddc18394 100644
|
|
--- a/drivers/power/avs/smartreflex.c
|
|
+++ b/drivers/power/avs/smartreflex.c
|
|
@@ -994,8 +994,7 @@ static int omap_sr_remove(struct platform_device *pdev)
|
|
|
|
if (sr_info->autocomp_active)
|
|
sr_stop_vddautocomp(sr_info);
|
|
- if (sr_info->dbg_dir)
|
|
- debugfs_remove_recursive(sr_info->dbg_dir);
|
|
+ debugfs_remove_recursive(sr_info->dbg_dir);
|
|
|
|
pm_runtime_disable(&pdev->dev);
|
|
list_del(&sr_info->node);
|
|
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
|
|
index c45e5719ba17..b1b74cfb1571 100644
|
|
--- a/drivers/pwm/core.c
|
|
+++ b/drivers/pwm/core.c
|
|
@@ -874,6 +874,7 @@ void pwm_put(struct pwm_device *pwm)
|
|
if (pwm->chip->ops->free)
|
|
pwm->chip->ops->free(pwm->chip, pwm);
|
|
|
|
+ pwm_set_chip_data(pwm, NULL);
|
|
pwm->label = NULL;
|
|
|
|
module_put(pwm->chip->ops->owner);
|
|
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
|
|
index d961a8207b1c..31b01035d0ab 100644
|
|
--- a/drivers/pwm/pwm-bcm-iproc.c
|
|
+++ b/drivers/pwm/pwm-bcm-iproc.c
|
|
@@ -187,6 +187,7 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
|
|
static const struct pwm_ops iproc_pwm_ops = {
|
|
.apply = iproc_pwmc_apply,
|
|
.get_state = iproc_pwmc_get_state,
|
|
+ .owner = THIS_MODULE,
|
|
};
|
|
|
|
static int iproc_pwmc_probe(struct platform_device *pdev)
|
|
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
|
|
index 771859aca4be..7bb819e3c0c1 100644
|
|
--- a/drivers/pwm/pwm-berlin.c
|
|
+++ b/drivers/pwm/pwm-berlin.c
|
|
@@ -78,7 +78,6 @@ static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
|
|
{
|
|
struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm);
|
|
|
|
- pwm_set_chip_data(pwm, NULL);
|
|
kfree(channel);
|
|
}
|
|
|
|
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
|
|
index 26ec24e457b1..7e16b7def0dc 100644
|
|
--- a/drivers/pwm/pwm-clps711x.c
|
|
+++ b/drivers/pwm/pwm-clps711x.c
|
|
@@ -48,7 +48,7 @@ static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v)
|
|
static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v)
|
|
{
|
|
/* Duty cycle 0..15 max */
|
|
- return DIV_ROUND_CLOSEST(v * 0xf, pwm_get_period(pwm));
|
|
+ return DIV_ROUND_CLOSEST(v * 0xf, pwm->args.period);
|
|
}
|
|
|
|
static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
|
|
@@ -71,7 +71,7 @@ static int clps711x_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
|
|
struct clps711x_chip *priv = to_clps711x_chip(chip);
|
|
unsigned int duty;
|
|
|
|
- if (period_ns != pwm_get_period(pwm))
|
|
+ if (period_ns != pwm->args.period)
|
|
return -EINVAL;
|
|
|
|
duty = clps711x_get_duty(pwm, duty_ns);
|
|
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
|
|
index a7eaf962a95b..567f5e2771c4 100644
|
|
--- a/drivers/pwm/pwm-pca9685.c
|
|
+++ b/drivers/pwm/pwm-pca9685.c
|
|
@@ -176,7 +176,6 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
|
|
pm_runtime_put(pca->chip.dev);
|
|
mutex_lock(&pca->lock);
|
|
pwm = &pca->chip.pwms[offset];
|
|
- pwm_set_chip_data(pwm, NULL);
|
|
mutex_unlock(&pca->lock);
|
|
}
|
|
|
|
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
|
|
index 062f2cfc45ec..3762432dd6a7 100644
|
|
--- a/drivers/pwm/pwm-samsung.c
|
|
+++ b/drivers/pwm/pwm-samsung.c
|
|
@@ -238,7 +238,6 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
|
|
static void pwm_samsung_free(struct pwm_chip *chip, struct pwm_device *pwm)
|
|
{
|
|
devm_kfree(chip->dev, pwm_get_chip_data(pwm));
|
|
- pwm_set_chip_data(pwm, NULL);
|
|
}
|
|
|
|
static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm)
|
|
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
|
|
index bb5ab7d78895..c2cc392a27d4 100644
|
|
--- a/drivers/regulator/palmas-regulator.c
|
|
+++ b/drivers/regulator/palmas-regulator.c
|
|
@@ -443,13 +443,16 @@ static int palmas_ldo_write(struct palmas *palmas, unsigned int reg,
|
|
static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
|
|
{
|
|
int id = rdev_get_id(dev);
|
|
+ int ret;
|
|
struct palmas_pmic *pmic = rdev_get_drvdata(dev);
|
|
struct palmas_pmic_driver_data *ddata = pmic->palmas->pmic_ddata;
|
|
struct palmas_regs_info *rinfo = &ddata->palmas_regs_info[id];
|
|
unsigned int reg;
|
|
bool rail_enable = true;
|
|
|
|
- palmas_smps_read(pmic->palmas, rinfo->ctrl_addr, ®);
|
|
+ ret = palmas_smps_read(pmic->palmas, rinfo->ctrl_addr, ®);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
|
|
|
|
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
|
|
index 81672a58fcc2..194fa0cbbc04 100644
|
|
--- a/drivers/regulator/tps65910-regulator.c
|
|
+++ b/drivers/regulator/tps65910-regulator.c
|
|
@@ -1102,8 +1102,10 @@ static int tps65910_probe(struct platform_device *pdev)
|
|
platform_set_drvdata(pdev, pmic);
|
|
|
|
/* Give control of all register to control port */
|
|
- tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
|
|
+ err = tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
|
|
DEVCTRL_SR_CTL_I2C_SEL_MASK);
|
|
+ if (err < 0)
|
|
+ return err;
|
|
|
|
switch (tps65910_chip_id(tps65910)) {
|
|
case TPS65910:
|
|
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
|
|
index 72b96b5c75a8..7e0a14211c88 100644
|
|
--- a/drivers/reset/core.c
|
|
+++ b/drivers/reset/core.c
|
|
@@ -513,6 +513,7 @@ static void reset_control_array_put(struct reset_control_array *resets)
|
|
for (i = 0; i < resets->num_rstcs; i++)
|
|
__reset_control_put_internal(resets->rstc[i]);
|
|
mutex_unlock(&reset_list_mutex);
|
|
+ kfree(resets);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
|
|
index 28a9c7d706cb..03c7b1603dbc 100644
|
|
--- a/drivers/scsi/csiostor/csio_init.c
|
|
+++ b/drivers/scsi/csiostor/csio_init.c
|
|
@@ -649,7 +649,7 @@ csio_shost_init(struct csio_hw *hw, struct device *dev,
|
|
if (csio_lnode_init(ln, hw, pln))
|
|
goto err_shost_put;
|
|
|
|
- if (scsi_add_host(shost, dev))
|
|
+ if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev))
|
|
goto err_lnode_exit;
|
|
|
|
return ln;
|
|
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
|
|
index 7f2d00354a85..f77d72f01da9 100644
|
|
--- a/drivers/scsi/libsas/sas_expander.c
|
|
+++ b/drivers/scsi/libsas/sas_expander.c
|
|
@@ -614,7 +614,14 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
|
|
}
|
|
|
|
res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
|
|
-
|
|
+ if (res) {
|
|
+ pr_err("ex %016llx phy%02d PHY control failed: %d\n",
|
|
+ SAS_ADDR(dev->sas_addr), phy_id, res);
|
|
+ } else if (pc_resp[2] != SMP_RESP_FUNC_ACC) {
|
|
+ pr_err("ex %016llx phy%02d PHY control failed: function result 0x%x\n",
|
|
+ SAS_ADDR(dev->sas_addr), phy_id, pc_resp[2]);
|
|
+ res = pc_resp[2];
|
|
+ }
|
|
kfree(pc_resp);
|
|
kfree(pc_req);
|
|
return res;
|
|
@@ -817,6 +824,26 @@ static struct domain_device *sas_ex_discover_end_dev(
|
|
|
|
#ifdef CONFIG_SCSI_SAS_ATA
|
|
if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
|
|
+ if (child->linkrate > parent->min_linkrate) {
|
|
+ struct sas_phy_linkrates rates = {
|
|
+ .maximum_linkrate = parent->min_linkrate,
|
|
+ .minimum_linkrate = parent->min_linkrate,
|
|
+ };
|
|
+ int ret;
|
|
+
|
|
+ pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
|
|
+ SAS_ADDR(child->sas_addr), phy_id);
|
|
+ ret = sas_smp_phy_control(parent, phy_id,
|
|
+ PHY_FUNC_LINK_RESET, &rates);
|
|
+ if (ret) {
|
|
+ pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
|
|
+ SAS_ADDR(child->sas_addr), phy_id, ret);
|
|
+ goto out_free;
|
|
+ }
|
|
+ pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
|
|
+ SAS_ADDR(child->sas_addr), phy_id);
|
|
+ child->linkrate = child->min_linkrate;
|
|
+ }
|
|
res = sas_get_ata_info(child, phy);
|
|
if (res)
|
|
goto out_free;
|
|
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
|
|
index bc61cc8bc6f0..03e95a3216c8 100644
|
|
--- a/drivers/scsi/lpfc/lpfc.h
|
|
+++ b/drivers/scsi/lpfc/lpfc.h
|
|
@@ -1239,6 +1239,12 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
|
|
static inline struct lpfc_sli_ring *
|
|
lpfc_phba_elsring(struct lpfc_hba *phba)
|
|
{
|
|
+ /* Return NULL if sli_rev has become invalid due to bad fw */
|
|
+ if (phba->sli_rev != LPFC_SLI_REV4 &&
|
|
+ phba->sli_rev != LPFC_SLI_REV3 &&
|
|
+ phba->sli_rev != LPFC_SLI_REV2)
|
|
+ return NULL;
|
|
+
|
|
if (phba->sli_rev == LPFC_SLI_REV4) {
|
|
if (phba->sli4_hba.els_wq)
|
|
return phba->sli4_hba.els_wq->pring;
|
|
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
|
|
index 3da242201cb4..82ce5d193018 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_attr.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_attr.c
|
|
@@ -1178,7 +1178,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
|
|
return -EACCES;
|
|
|
|
if ((phba->sli_rev < LPFC_SLI_REV4) ||
|
|
- (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
|
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
|
|
LPFC_SLI_INTF_IF_TYPE_2))
|
|
return -EPERM;
|
|
|
|
@@ -4056,7 +4056,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
|
|
uint32_t prev_val, if_type;
|
|
|
|
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
|
|
- if (if_type == LPFC_SLI_INTF_IF_TYPE_2 &&
|
|
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
|
|
phba->hba_flag & HBA_FORCED_LINK_SPEED)
|
|
return -EPERM;
|
|
|
|
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
|
|
index d89816222b23..6dde21dc82a3 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_bsg.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
|
|
@@ -2221,7 +2221,7 @@ lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
|
|
|
|
if (phba->sli_rev < LPFC_SLI_REV4)
|
|
rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
|
|
- else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
|
|
+ else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
|
|
LPFC_SLI_INTF_IF_TYPE_2)
|
|
rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
|
|
else
|
|
@@ -2261,7 +2261,7 @@ lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
|
|
|
|
if (phba->sli_rev < LPFC_SLI_REV4)
|
|
return -ENODEV;
|
|
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
|
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
|
|
LPFC_SLI_INTF_IF_TYPE_2)
|
|
return -ENODEV;
|
|
|
|
@@ -2353,7 +2353,7 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
|
|
rc = -ENODEV;
|
|
goto job_error;
|
|
}
|
|
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
|
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
|
|
LPFC_SLI_INTF_IF_TYPE_2) {
|
|
rc = -ENODEV;
|
|
goto job_error;
|
|
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
|
|
index e5db20e8979d..c851fd14ff3e 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_els.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_els.c
|
|
@@ -1337,6 +1337,8 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
|
|
Fabric_DID);
|
|
|
|
pring = lpfc_phba_elsring(phba);
|
|
+ if (unlikely(!pring))
|
|
+ return -EIO;
|
|
|
|
/*
|
|
* Check the txcmplq for an iocb that matches the nport the driver is
|
|
@@ -5548,7 +5550,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
|
struct ls_rjt stat;
|
|
|
|
if (phba->sli_rev < LPFC_SLI_REV4 ||
|
|
- bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
|
+ bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
|
|
LPFC_SLI_INTF_IF_TYPE_2) {
|
|
rjt_err = LSRJT_UNABLE_TPC;
|
|
rjt_expl = LSEXP_REQ_UNSUPPORTED;
|
|
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
index d850077c5e22..3f88f3d79622 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
@@ -4769,7 +4769,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
|
(!(vport->load_flag & FC_UNLOADING)) &&
|
|
(bf_get(lpfc_sli_intf_if_type,
|
|
- &phba->sli4_hba.sli_intf) ==
|
|
+ &phba->sli4_hba.sli_intf) >=
|
|
LPFC_SLI_INTF_IF_TYPE_2) &&
|
|
(kref_read(&ndlp->kref) > 0)) {
|
|
mbox->context1 = lpfc_nlp_get(ndlp);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
|
|
index 15bcd00dd7a2..c69c2a2b2ead 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_init.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_init.c
|
|
@@ -1773,7 +1773,12 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
|
|
lpfc_offline(phba);
|
|
/* release interrupt for possible resource change */
|
|
lpfc_sli4_disable_intr(phba);
|
|
- lpfc_sli_brdrestart(phba);
|
|
+ rc = lpfc_sli_brdrestart(phba);
|
|
+ if (rc) {
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
+ "6309 Failed to restart board\n");
|
|
+ return rc;
|
|
+ }
|
|
/* request and enable interrupt */
|
|
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
|
|
if (intr_mode == LPFC_INTR_ERROR) {
|
|
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
|
|
index 07cb671bb855..2eba0c39ac1c 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_scsi.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
|
|
@@ -2714,6 +2714,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
|
|
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
|
|
int prot_group_type = 0;
|
|
int fcpdl;
|
|
+ struct lpfc_vport *vport = phba->pport;
|
|
|
|
/*
|
|
* Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
|
|
@@ -2819,6 +2820,14 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
|
|
*/
|
|
iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
|
|
|
|
+ /*
|
|
+ * For First burst, we may need to adjust the initial transfer
|
|
+ * length for DIF
|
|
+ */
|
|
+ if (iocb_cmd->un.fcpi.fcpi_XRdy &&
|
|
+ (fcpdl < vport->cfg_first_burst_size))
|
|
+ iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
|
|
+
|
|
return 0;
|
|
err:
|
|
if (lpfc_cmd->seg_cnt)
|
|
@@ -3371,6 +3380,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
|
|
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
|
|
int prot_group_type = 0;
|
|
int fcpdl;
|
|
+ struct lpfc_vport *vport = phba->pport;
|
|
|
|
/*
|
|
* Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
|
|
@@ -3486,6 +3496,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
|
|
*/
|
|
iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
|
|
|
|
+ /*
|
|
+ * For First burst, we may need to adjust the initial transfer
|
|
+ * length for DIF
|
|
+ */
|
|
+ if (iocb_cmd->un.fcpi.fcpi_XRdy &&
|
|
+ (fcpdl < vport->cfg_first_burst_size))
|
|
+ iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
|
|
+
|
|
/*
|
|
* If the OAS driver feature is enabled and the lun is enabled for
|
|
* OAS, set the oas iocb related flags.
|
|
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
|
|
index ebf7d3cda367..62bea4ffdc25 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_sli.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_sli.c
|
|
@@ -4421,6 +4421,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
|
|
hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
|
|
|
|
rc = lpfc_sli4_brdreset(phba);
|
|
+ if (rc)
|
|
+ return rc;
|
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
phba->pport->stopped = 0;
|
|
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
|
|
index 2fcdaadd10fa..e08ac431bc49 100644
|
|
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
|
|
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
|
|
@@ -903,38 +903,14 @@ static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item,
|
|
atomic_read(&tpg->lport_tpg_enabled));
|
|
}
|
|
|
|
-static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
|
|
-{
|
|
- struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
|
|
- struct tcm_qla2xxx_tpg, tpg_base_work);
|
|
- struct se_portal_group *se_tpg = &base_tpg->se_tpg;
|
|
- struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
|
|
-
|
|
- if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
|
|
- atomic_set(&base_tpg->lport_tpg_enabled, 1);
|
|
- qlt_enable_vha(base_vha);
|
|
- }
|
|
- complete(&base_tpg->tpg_base_comp);
|
|
-}
|
|
-
|
|
-static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
|
|
-{
|
|
- struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
|
|
- struct tcm_qla2xxx_tpg, tpg_base_work);
|
|
- struct se_portal_group *se_tpg = &base_tpg->se_tpg;
|
|
- struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
|
|
-
|
|
- if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
|
|
- atomic_set(&base_tpg->lport_tpg_enabled, 0);
|
|
- target_undepend_item(&se_tpg->tpg_group.cg_item);
|
|
- }
|
|
- complete(&base_tpg->tpg_base_comp);
|
|
-}
|
|
-
|
|
static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
|
|
const char *page, size_t count)
|
|
{
|
|
struct se_portal_group *se_tpg = to_tpg(item);
|
|
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
|
|
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
|
|
+ struct tcm_qla2xxx_lport, lport_wwn);
|
|
+ struct scsi_qla_host *vha = lport->qla_vha;
|
|
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
|
|
struct tcm_qla2xxx_tpg, se_tpg);
|
|
unsigned long op;
|
|
@@ -953,24 +929,16 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
|
|
if (atomic_read(&tpg->lport_tpg_enabled))
|
|
return -EEXIST;
|
|
|
|
- INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
|
|
+ atomic_set(&tpg->lport_tpg_enabled, 1);
|
|
+ qlt_enable_vha(vha);
|
|
} else {
|
|
if (!atomic_read(&tpg->lport_tpg_enabled))
|
|
return count;
|
|
|
|
- INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
|
|
+ atomic_set(&tpg->lport_tpg_enabled, 0);
|
|
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
|
|
}
|
|
- init_completion(&tpg->tpg_base_comp);
|
|
- schedule_work(&tpg->tpg_base_work);
|
|
- wait_for_completion(&tpg->tpg_base_comp);
|
|
|
|
- if (op) {
|
|
- if (!atomic_read(&tpg->lport_tpg_enabled))
|
|
- return -ENODEV;
|
|
- } else {
|
|
- if (atomic_read(&tpg->lport_tpg_enabled))
|
|
- return -EPERM;
|
|
- }
|
|
return count;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
|
|
index 7550ba2831c3..147cf6c90366 100644
|
|
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
|
|
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
|
|
@@ -48,9 +48,6 @@ struct tcm_qla2xxx_tpg {
|
|
struct tcm_qla2xxx_tpg_attrib tpg_attrib;
|
|
/* Returned by tcm_qla2xxx_make_tpg() */
|
|
struct se_portal_group se_tpg;
|
|
- /* Items for dealing with configfs_depend_item */
|
|
- struct completion tpg_base_comp;
|
|
- struct work_struct tpg_base_work;
|
|
};
|
|
|
|
struct tcm_qla2xxx_fc_loopid {
|
|
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
|
|
index aca52654825b..811cada301ac 100644
|
|
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
|
|
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
|
|
@@ -1630,14 +1630,15 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
|
|
skb_push(skb, priv->rtllib->tx_headroom);
|
|
ret = _rtl92e_tx(dev, skb);
|
|
- if (ret != 0)
|
|
- kfree_skb(skb);
|
|
|
|
if (queue_index != MGNT_QUEUE) {
|
|
priv->rtllib->stats.tx_bytes += (skb->len -
|
|
priv->rtllib->tx_headroom);
|
|
priv->rtllib->stats.tx_packets++;
|
|
}
|
|
+
|
|
+ if (ret != 0)
|
|
+ kfree_skb(skb);
|
|
}
|
|
|
|
static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
|
|
index 943324877707..33e052106ce7 100644
|
|
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
|
|
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
|
|
@@ -25,18 +25,13 @@
|
|
static const struct sdio_device_id sdio_ids[] =
|
|
{
|
|
{ SDIO_DEVICE(0x024c, 0x0523), },
|
|
+ { SDIO_DEVICE(0x024c, 0x0525), },
|
|
{ SDIO_DEVICE(0x024c, 0x0623), },
|
|
{ SDIO_DEVICE(0x024c, 0x0626), },
|
|
{ SDIO_DEVICE(0x024c, 0xb723), },
|
|
{ /* end: all zeroes */ },
|
|
};
|
|
-static const struct acpi_device_id acpi_ids[] = {
|
|
- {"OBDA8723", 0x0000},
|
|
- {}
|
|
-};
|
|
-
|
|
MODULE_DEVICE_TABLE(sdio, sdio_ids);
|
|
-MODULE_DEVICE_TABLE(acpi, acpi_ids);
|
|
|
|
static int rtw_drv_init(struct sdio_func *func, const struct sdio_device_id *id);
|
|
static void rtw_dev_remove(struct sdio_func *func);
|
|
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
|
|
index d29b512a7d9f..c698ebab6d3b 100644
|
|
--- a/drivers/tty/serial/8250/8250_core.c
|
|
+++ b/drivers/tty/serial/8250/8250_core.c
|
|
@@ -953,6 +953,21 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
|
|
return NULL;
|
|
}
|
|
|
|
+static void serial_8250_overrun_backoff_work(struct work_struct *work)
|
|
+{
|
|
+ struct uart_8250_port *up =
|
|
+ container_of(to_delayed_work(work), struct uart_8250_port,
|
|
+ overrun_backoff);
|
|
+ struct uart_port *port = &up->port;
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&port->lock, flags);
|
|
+ up->ier |= UART_IER_RLSI | UART_IER_RDI;
|
|
+ up->port.read_status_mask |= UART_LSR_DR;
|
|
+ serial_out(up, UART_IER, up->ier);
|
|
+ spin_unlock_irqrestore(&port->lock, flags);
|
|
+}
|
|
+
|
|
/**
|
|
* serial8250_register_8250_port - register a serial port
|
|
* @up: serial port template
|
|
@@ -1062,7 +1077,18 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
|
|
|
|
ret = 0;
|
|
}
|
|
+
|
|
+ /* Initialise interrupt backoff work if required */
|
|
+ if (up->overrun_backoff_time_ms > 0) {
|
|
+ uart->overrun_backoff_time_ms =
|
|
+ up->overrun_backoff_time_ms;
|
|
+ INIT_DELAYED_WORK(&uart->overrun_backoff,
|
|
+ serial_8250_overrun_backoff_work);
|
|
+ } else {
|
|
+ uart->overrun_backoff_time_ms = 0;
|
|
+ }
|
|
}
|
|
+
|
|
mutex_unlock(&serial_mutex);
|
|
|
|
return ret;
|
|
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
|
|
index 910bfee5a88b..cc138c24ae88 100644
|
|
--- a/drivers/tty/serial/8250/8250_fsl.c
|
|
+++ b/drivers/tty/serial/8250/8250_fsl.c
|
|
@@ -48,8 +48,29 @@ int fsl8250_handle_irq(struct uart_port *port)
|
|
|
|
lsr = orig_lsr = up->port.serial_in(&up->port, UART_LSR);
|
|
|
|
- if (lsr & (UART_LSR_DR | UART_LSR_BI))
|
|
+ /* Process incoming characters first */
|
|
+ if ((lsr & (UART_LSR_DR | UART_LSR_BI)) &&
|
|
+ (up->ier & (UART_IER_RLSI | UART_IER_RDI))) {
|
|
lsr = serial8250_rx_chars(up, lsr);
|
|
+ }
|
|
+
|
|
+ /* Stop processing interrupts on input overrun */
|
|
+ if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) {
|
|
+ unsigned long delay;
|
|
+
|
|
+ up->ier = port->serial_in(port, UART_IER);
|
|
+ if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
|
|
+ port->ops->stop_rx(port);
|
|
+ } else {
|
|
+ /* Keep restarting the timer until
|
|
+ * the input overrun subsides.
|
|
+ */
|
|
+ cancel_delayed_work(&up->overrun_backoff);
|
|
+ }
|
|
+
|
|
+ delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
|
|
+ schedule_delayed_work(&up->overrun_backoff, delay);
|
|
+ }
|
|
|
|
serial8250_modem_status(up);
|
|
|
|
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
|
|
index ec510e342e06..c51044ba503c 100644
|
|
--- a/drivers/tty/serial/8250/8250_of.c
|
|
+++ b/drivers/tty/serial/8250/8250_of.c
|
|
@@ -232,6 +232,11 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
|
|
if (of_property_read_bool(ofdev->dev.of_node, "auto-flow-control"))
|
|
port8250.capabilities |= UART_CAP_AFE;
|
|
|
|
+ if (of_property_read_u32(ofdev->dev.of_node,
|
|
+ "overrun-throttle-ms",
|
|
+ &port8250.overrun_backoff_time_ms) != 0)
|
|
+ port8250.overrun_backoff_time_ms = 0;
|
|
+
|
|
ret = serial8250_register_8250_port(&port8250);
|
|
if (ret < 0)
|
|
goto err_dispose;
|
|
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
|
|
index 0969a0d97b2b..cec995ec11ea 100644
|
|
--- a/drivers/tty/serial/max310x.c
|
|
+++ b/drivers/tty/serial/max310x.c
|
|
@@ -769,12 +769,9 @@ static void max310x_start_tx(struct uart_port *port)
|
|
|
|
static unsigned int max310x_tx_empty(struct uart_port *port)
|
|
{
|
|
- unsigned int lvl, sts;
|
|
+ u8 lvl = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
|
|
|
|
- lvl = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
|
|
- sts = max310x_port_read(port, MAX310X_IRQSTS_REG);
|
|
-
|
|
- return ((sts & MAX310X_IRQ_TXEMPTY_BIT) && !lvl) ? TIOCSER_TEMT : 0;
|
|
+ return lvl ? 0 : TIOCSER_TEMT;
|
|
}
|
|
|
|
static unsigned int max310x_get_mctrl(struct uart_port *port)
|
|
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
|
|
index 8b5c99df0f2b..a962065227c4 100644
|
|
--- a/drivers/usb/serial/ftdi_sio.c
|
|
+++ b/drivers/usb/serial/ftdi_sio.c
|
|
@@ -1028,6 +1028,9 @@ static const struct usb_device_id id_table_combined[] = {
|
|
/* Sienna devices */
|
|
{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
|
|
{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
|
|
+ /* U-Blox devices */
|
|
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
|
|
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
|
|
{ } /* Terminating entry */
|
|
};
|
|
|
|
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
|
|
index 22d66217cb41..e8373528264c 100644
|
|
--- a/drivers/usb/serial/ftdi_sio_ids.h
|
|
+++ b/drivers/usb/serial/ftdi_sio_ids.h
|
|
@@ -1558,3 +1558,10 @@
|
|
*/
|
|
#define UNJO_VID 0x22B7
|
|
#define UNJO_ISODEBUG_V1_PID 0x150D
|
|
+
|
|
+/*
|
|
+ * U-Blox products (http://www.u-blox.com).
|
|
+ */
|
|
+#define UBLOX_VID 0x1546
|
|
+#define UBLOX_C099F9P_ZED_PID 0x0502
|
|
+#define UBLOX_C099F9P_ODIN_PID 0x0503
|
|
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
|
|
index b4c68f3b82be..eba9aaf3cc17 100644
|
|
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
|
|
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
|
|
@@ -409,6 +409,7 @@ static void tce_iommu_release(void *iommu_data)
|
|
{
|
|
struct tce_container *container = iommu_data;
|
|
struct tce_iommu_group *tcegrp;
|
|
+ struct tce_iommu_prereg *tcemem, *tmtmp;
|
|
long i;
|
|
|
|
while (tce_groups_attached(container)) {
|
|
@@ -431,13 +432,8 @@ static void tce_iommu_release(void *iommu_data)
|
|
tce_iommu_free_table(container, tbl);
|
|
}
|
|
|
|
- while (!list_empty(&container->prereg_list)) {
|
|
- struct tce_iommu_prereg *tcemem;
|
|
-
|
|
- tcemem = list_first_entry(&container->prereg_list,
|
|
- struct tce_iommu_prereg, next);
|
|
- WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
|
|
- }
|
|
+ list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
|
|
+ WARN_ON(tce_iommu_prereg_free(container, tcemem));
|
|
|
|
tce_iommu_disable(container);
|
|
if (container->mm)
|
|
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
|
|
index 69a5a57f1446..61297a6ab43a 100644
|
|
--- a/drivers/watchdog/meson_gxbb_wdt.c
|
|
+++ b/drivers/watchdog/meson_gxbb_wdt.c
|
|
@@ -137,8 +137,8 @@ static unsigned int meson_gxbb_wdt_get_timeleft(struct watchdog_device *wdt_dev)
|
|
|
|
reg = readl(data->reg_base + GXBB_WDT_TCNT_REG);
|
|
|
|
- return ((reg >> GXBB_WDT_TCNT_CNT_SHIFT) -
|
|
- (reg & GXBB_WDT_TCNT_SETUP_MASK)) / 1000;
|
|
+ return ((reg & GXBB_WDT_TCNT_SETUP_MASK) -
|
|
+ (reg >> GXBB_WDT_TCNT_CNT_SHIFT)) / 1000;
|
|
}
|
|
|
|
static const struct watchdog_ops meson_gxbb_wdt_ops = {
|
|
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
|
|
index 0ae947c3d7bc..d8cf2039c6a4 100644
|
|
--- a/drivers/watchdog/sama5d4_wdt.c
|
|
+++ b/drivers/watchdog/sama5d4_wdt.c
|
|
@@ -111,9 +111,7 @@ static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd,
|
|
u32 value = WDT_SEC2TICKS(timeout);
|
|
|
|
wdt->mr &= ~AT91_WDT_WDV;
|
|
- wdt->mr &= ~AT91_WDT_WDD;
|
|
wdt->mr |= AT91_WDT_SET_WDV(value);
|
|
- wdt->mr |= AT91_WDT_SET_WDD(value);
|
|
|
|
/*
|
|
* WDDIS has to be 0 when updating WDD/WDV. The datasheet states: When
|
|
@@ -255,7 +253,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
|
|
|
|
timeout = WDT_SEC2TICKS(wdd->timeout);
|
|
|
|
- wdt->mr |= AT91_WDT_SET_WDD(timeout);
|
|
+ wdt->mr |= AT91_WDT_SET_WDD(WDT_SEC2TICKS(MAX_WDT_TIMEOUT));
|
|
wdt->mr |= AT91_WDT_SET_WDV(timeout);
|
|
|
|
ret = sama5d4_wdt_init(wdt);
|
|
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
|
|
index 9e480fdebe1f..8c250f4a3a97 100644
|
|
--- a/drivers/xen/xen-pciback/pci_stub.c
|
|
+++ b/drivers/xen/xen-pciback/pci_stub.c
|
|
@@ -106,7 +106,8 @@ static void pcistub_device_release(struct kref *kref)
|
|
* is called from "unbind" which takes a device_lock mutex.
|
|
*/
|
|
__pci_reset_function_locked(dev);
|
|
- if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
|
|
+ if (dev_data &&
|
|
+ pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
|
|
dev_info(&dev->dev, "Could not reload PCI state\n");
|
|
else
|
|
pci_restore_state(dev);
|
|
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
|
|
index 93ffa898df6d..d56bd3625468 100644
|
|
--- a/fs/btrfs/delayed-ref.c
|
|
+++ b/fs/btrfs/delayed-ref.c
|
|
@@ -195,8 +195,6 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
|
|
ref->in_tree = 0;
|
|
btrfs_put_delayed_ref(ref);
|
|
atomic_dec(&delayed_refs->num_entries);
|
|
- if (trans->delayed_ref_updates)
|
|
- trans->delayed_ref_updates--;
|
|
}
|
|
|
|
static bool merge_ref(struct btrfs_trans_handle *trans,
|
|
@@ -458,7 +456,6 @@ add_tail:
|
|
if (ref->action == BTRFS_ADD_DELAYED_REF)
|
|
list_add_tail(&ref->add_list, &href->ref_add_list);
|
|
atomic_inc(&root->num_entries);
|
|
- trans->delayed_ref_updates++;
|
|
spin_unlock(&href->lock);
|
|
return ret;
|
|
}
|
|
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
|
|
index f0694293b31a..088c4488b449 100644
|
|
--- a/fs/ceph/super.c
|
|
+++ b/fs/ceph/super.c
|
|
@@ -232,6 +232,7 @@ static int parse_fsopt_token(char *c, void *private)
|
|
return -ENOMEM;
|
|
break;
|
|
case Opt_fscache_uniq:
|
|
+#ifdef CONFIG_CEPH_FSCACHE
|
|
kfree(fsopt->fscache_uniq);
|
|
fsopt->fscache_uniq = kstrndup(argstr[0].from,
|
|
argstr[0].to-argstr[0].from,
|
|
@@ -240,7 +241,10 @@ static int parse_fsopt_token(char *c, void *private)
|
|
return -ENOMEM;
|
|
fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
|
|
break;
|
|
- /* misc */
|
|
+#else
|
|
+ pr_err("fscache support is disabled\n");
|
|
+ return -EINVAL;
|
|
+#endif
|
|
case Opt_wsize:
|
|
if (intval < PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
|
|
return -EINVAL;
|
|
@@ -312,8 +316,13 @@ static int parse_fsopt_token(char *c, void *private)
|
|
fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
|
|
break;
|
|
case Opt_fscache:
|
|
+#ifdef CONFIG_CEPH_FSCACHE
|
|
fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
|
|
break;
|
|
+#else
|
|
+ pr_err("fscache support is disabled\n");
|
|
+ return -EINVAL;
|
|
+#endif
|
|
case Opt_nofscache:
|
|
fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
|
|
break;
|
|
diff --git a/fs/exec.c b/fs/exec.c
|
|
index 4623fc3ac86b..7def97f6aac2 100644
|
|
--- a/fs/exec.c
|
|
+++ b/fs/exec.c
|
|
@@ -1007,7 +1007,7 @@ static int exec_mmap(struct mm_struct *mm)
|
|
/* Notify parent that we're no longer interested in the old VM */
|
|
tsk = current;
|
|
old_mm = current->mm;
|
|
- mm_release(tsk, old_mm);
|
|
+ exec_mm_release(tsk, old_mm);
|
|
|
|
if (old_mm) {
|
|
sync_mm_rss(old_mm);
|
|
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
|
|
index c9ec652e2fcd..881d5798a181 100644
|
|
--- a/fs/exofs/super.c
|
|
+++ b/fs/exofs/super.c
|
|
@@ -702,21 +702,18 @@ out:
|
|
/*
|
|
* Read the superblock from the OSD and fill in the fields
|
|
*/
|
|
-static int exofs_fill_super(struct super_block *sb, void *data, int silent)
|
|
+static int exofs_fill_super(struct super_block *sb,
|
|
+ struct exofs_mountopt *opts,
|
|
+ struct exofs_sb_info *sbi,
|
|
+ int silent)
|
|
{
|
|
struct inode *root;
|
|
- struct exofs_mountopt *opts = data;
|
|
- struct exofs_sb_info *sbi; /*extended info */
|
|
struct osd_dev *od; /* Master device */
|
|
struct exofs_fscb fscb; /*on-disk superblock info */
|
|
struct ore_comp comp;
|
|
unsigned table_count;
|
|
int ret;
|
|
|
|
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
|
|
- if (!sbi)
|
|
- return -ENOMEM;
|
|
-
|
|
/* use mount options to fill superblock */
|
|
if (opts->is_osdname) {
|
|
struct osd_dev_info odi = {.systemid_len = 0};
|
|
@@ -860,7 +857,9 @@ static struct dentry *exofs_mount(struct file_system_type *type,
|
|
int flags, const char *dev_name,
|
|
void *data)
|
|
{
|
|
+ struct super_block *s;
|
|
struct exofs_mountopt opts;
|
|
+ struct exofs_sb_info *sbi;
|
|
int ret;
|
|
|
|
ret = parse_options(data, &opts);
|
|
@@ -869,9 +868,31 @@ static struct dentry *exofs_mount(struct file_system_type *type,
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
|
|
+ if (!sbi) {
|
|
+ kfree(opts.dev_name);
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+ }
|
|
+
|
|
+ s = sget(type, NULL, set_anon_super, flags, NULL);
|
|
+
|
|
+ if (IS_ERR(s)) {
|
|
+ kfree(opts.dev_name);
|
|
+ kfree(sbi);
|
|
+ return ERR_CAST(s);
|
|
+ }
|
|
+
|
|
if (!opts.dev_name)
|
|
opts.dev_name = dev_name;
|
|
- return mount_nodev(type, flags, &opts, exofs_fill_super);
|
|
+
|
|
+
|
|
+ ret = exofs_fill_super(s, &opts, sbi, flags & SB_SILENT ? 1 : 0);
|
|
+ if (ret) {
|
|
+ deactivate_locked_super(s);
|
|
+ return ERR_PTR(ret);
|
|
+ }
|
|
+ s->s_flags |= SB_ACTIVE;
|
|
+ return dget(s->s_root);
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
|
index 8c987a1994d4..b3d5fd84b485 100644
|
|
--- a/fs/ext4/inode.c
|
|
+++ b/fs/ext4/inode.c
|
|
@@ -5751,8 +5751,23 @@ static int __ext4_expand_extra_isize(struct inode *inode,
|
|
{
|
|
struct ext4_inode *raw_inode;
|
|
struct ext4_xattr_ibody_header *header;
|
|
+ unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
|
|
+ struct ext4_inode_info *ei = EXT4_I(inode);
|
|
int error;
|
|
|
|
+ /* this was checked at iget time, but double check for good measure */
|
|
+ if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
|
|
+ (ei->i_extra_isize & 3)) {
|
|
+ EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
|
|
+ ei->i_extra_isize,
|
|
+ EXT4_INODE_SIZE(inode->i_sb));
|
|
+ return -EFSCORRUPTED;
|
|
+ }
|
|
+ if ((new_extra_isize < ei->i_extra_isize) ||
|
|
+ (new_extra_isize < 4) ||
|
|
+ (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
|
|
+ return -EINVAL; /* Should never happen */
|
|
+
|
|
raw_inode = ext4_raw_inode(iloc);
|
|
|
|
header = IHDR(inode, raw_inode);
|
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
|
index 61d07608577e..1a0a56647974 100644
|
|
--- a/fs/ext4/super.c
|
|
+++ b/fs/ext4/super.c
|
|
@@ -3458,12 +3458,15 @@ static void ext4_clamp_want_extra_isize(struct super_block *sb)
|
|
{
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
struct ext4_super_block *es = sbi->s_es;
|
|
+ unsigned def_extra_isize = sizeof(struct ext4_inode) -
|
|
+ EXT4_GOOD_OLD_INODE_SIZE;
|
|
|
|
- /* determine the minimum size of new large inodes, if present */
|
|
- if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
|
|
- sbi->s_want_extra_isize == 0) {
|
|
- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
|
|
- EXT4_GOOD_OLD_INODE_SIZE;
|
|
+ if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) {
|
|
+ sbi->s_want_extra_isize = 0;
|
|
+ return;
|
|
+ }
|
|
+ if (sbi->s_want_extra_isize < 4) {
|
|
+ sbi->s_want_extra_isize = def_extra_isize;
|
|
if (ext4_has_feature_extra_isize(sb)) {
|
|
if (sbi->s_want_extra_isize <
|
|
le16_to_cpu(es->s_want_extra_isize))
|
|
@@ -3476,10 +3479,10 @@ static void ext4_clamp_want_extra_isize(struct super_block *sb)
|
|
}
|
|
}
|
|
/* Check if enough inode space is available */
|
|
- if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
|
|
- sbi->s_inode_size) {
|
|
- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
|
|
- EXT4_GOOD_OLD_INODE_SIZE;
|
|
+ if ((sbi->s_want_extra_isize > sbi->s_inode_size) ||
|
|
+ (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
|
|
+ sbi->s_inode_size)) {
|
|
+ sbi->s_want_extra_isize = def_extra_isize;
|
|
ext4_msg(sb, KERN_INFO,
|
|
"required extra inode space not available");
|
|
}
|
|
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
|
|
index 1b1792199445..d68b0132718a 100644
|
|
--- a/fs/f2fs/file.c
|
|
+++ b/fs/f2fs/file.c
|
|
@@ -1593,7 +1593,7 @@ static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
|
|
|
|
inode->i_ctime = current_time(inode);
|
|
f2fs_set_inode_flags(inode);
|
|
- f2fs_mark_inode_dirty_sync(inode, false);
|
|
+ f2fs_mark_inode_dirty_sync(inode, true);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
|
|
index 09432b25fe9b..b3a1b16d4e3e 100644
|
|
--- a/fs/gfs2/bmap.c
|
|
+++ b/fs/gfs2/bmap.c
|
|
@@ -1445,6 +1445,8 @@ static int do_grow(struct inode *inode, u64 size)
|
|
}
|
|
|
|
error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
|
|
+ (unstuff &&
|
|
+ gfs2_is_jdata(ip) ? RES_JDATA : 0) +
|
|
(sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
|
|
0 : RES_QUOTA), 0);
|
|
if (error)
|
|
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
|
|
index f7fba58618ef..2459ae9d2234 100644
|
|
--- a/fs/ocfs2/journal.c
|
|
+++ b/fs/ocfs2/journal.c
|
|
@@ -1018,7 +1018,8 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
|
|
mlog_errno(status);
|
|
}
|
|
|
|
- if (status == 0) {
|
|
+ /* Shutdown the kernel journal system */
|
|
+ if (!jbd2_journal_destroy(journal->j_journal) && !status) {
|
|
/*
|
|
* Do not toggle if flush was unsuccessful otherwise
|
|
* will leave dirty metadata in a "clean" journal
|
|
@@ -1027,9 +1028,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
|
|
if (status < 0)
|
|
mlog_errno(status);
|
|
}
|
|
-
|
|
- /* Shutdown the kernel journal system */
|
|
- jbd2_journal_destroy(journal->j_journal);
|
|
journal->j_journal = NULL;
|
|
|
|
OCFS2_I(inode)->ip_open_count--;
|
|
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
|
|
index fa0bc4d46065..5f616a6a5358 100644
|
|
--- a/fs/xfs/xfs_ioctl32.c
|
|
+++ b/fs/xfs/xfs_ioctl32.c
|
|
@@ -252,6 +252,32 @@ xfs_compat_ioc_bulkstat(
|
|
int done;
|
|
int error;
|
|
|
|
+ /*
|
|
+ * Output structure handling functions. Depending on the command,
|
|
+ * either the xfs_bstat and xfs_inogrp structures are written out
|
|
+ * to userpace memory via bulkreq.ubuffer. Normally the compat
|
|
+ * functions and structure size are the correct ones to use ...
|
|
+ */
|
|
+ inumbers_fmt_pf inumbers_func = xfs_inumbers_fmt_compat;
|
|
+ bulkstat_one_pf bs_one_func = xfs_bulkstat_one_compat;
|
|
+ size_t bs_one_size = sizeof(struct compat_xfs_bstat);
|
|
+
|
|
+#ifdef CONFIG_X86_X32
|
|
+ if (in_x32_syscall()) {
|
|
+ /*
|
|
+ * ... but on x32 the input xfs_fsop_bulkreq has pointers
|
|
+ * which must be handled in the "compat" (32-bit) way, while
|
|
+ * the xfs_bstat and xfs_inogrp structures follow native 64-
|
|
+ * bit layout convention. So adjust accordingly, otherwise
|
|
+ * the data written out in compat layout will not match what
|
|
+ * x32 userspace expects.
|
|
+ */
|
|
+ inumbers_func = xfs_inumbers_fmt;
|
|
+ bs_one_func = xfs_bulkstat_one;
|
|
+ bs_one_size = sizeof(struct xfs_bstat);
|
|
+ }
|
|
+#endif
|
|
+
|
|
/* done = 1 if there are more stats to get and if bulkstat */
|
|
/* should be called again (unused here, but used in dmapi) */
|
|
|
|
@@ -283,15 +309,15 @@ xfs_compat_ioc_bulkstat(
|
|
|
|
if (cmd == XFS_IOC_FSINUMBERS_32) {
|
|
error = xfs_inumbers(mp, &inlast, &count,
|
|
- bulkreq.ubuffer, xfs_inumbers_fmt_compat);
|
|
+ bulkreq.ubuffer, inumbers_func);
|
|
} else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
|
|
int res;
|
|
|
|
- error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
|
|
- sizeof(compat_xfs_bstat_t), NULL, &res);
|
|
+ error = bs_one_func(mp, inlast, bulkreq.ubuffer,
|
|
+ bs_one_size, NULL, &res);
|
|
} else if (cmd == XFS_IOC_FSBULKSTAT_32) {
|
|
error = xfs_bulkstat(mp, &inlast, &count,
|
|
- xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t),
|
|
+ bs_one_func, bs_one_size,
|
|
bulkreq.ubuffer, &done);
|
|
} else
|
|
error = -EINVAL;
|
|
@@ -347,6 +373,7 @@ xfs_compat_attrlist_by_handle(
|
|
{
|
|
int error;
|
|
attrlist_cursor_kern_t *cursor;
|
|
+ compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
|
|
compat_xfs_fsop_attrlist_handlereq_t al_hreq;
|
|
struct dentry *dentry;
|
|
char *kbuf;
|
|
@@ -381,6 +408,11 @@ xfs_compat_attrlist_by_handle(
|
|
if (error)
|
|
goto out_kfree;
|
|
|
|
+ if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
|
|
+ error = -EFAULT;
|
|
+ goto out_kfree;
|
|
+ }
|
|
+
|
|
if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
|
|
error = -EFAULT;
|
|
|
|
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
|
|
index 488719d43ca8..cdcb7235e41a 100644
|
|
--- a/fs/xfs/xfs_rtalloc.c
|
|
+++ b/fs/xfs/xfs_rtalloc.c
|
|
@@ -1214,13 +1214,11 @@ xfs_rtmount_inodes(
|
|
xfs_sb_t *sbp;
|
|
|
|
sbp = &mp->m_sb;
|
|
- if (sbp->sb_rbmino == NULLFSINO)
|
|
- return 0;
|
|
error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip);
|
|
if (error)
|
|
return error;
|
|
ASSERT(mp->m_rbmip != NULL);
|
|
- ASSERT(sbp->sb_rsumino != NULLFSINO);
|
|
+
|
|
error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip);
|
|
if (error) {
|
|
IRELE(mp->m_rbmip);
|
|
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
|
|
index 8804753805ac..7bb2d8de9f30 100644
|
|
--- a/include/linux/blktrace_api.h
|
|
+++ b/include/linux/blktrace_api.h
|
|
@@ -116,7 +116,13 @@ extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
|
|
|
|
static inline sector_t blk_rq_trace_sector(struct request *rq)
|
|
{
|
|
- return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
|
|
+ /*
|
|
+ * Tracing should ignore starting sector for passthrough requests and
|
|
+ * requests where starting sector didn't get set.
|
|
+ */
|
|
+ if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
|
|
+ return 0;
|
|
+ return blk_rq_pos(rq);
|
|
}
|
|
|
|
static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
|
|
diff --git a/include/linux/compat.h b/include/linux/compat.h
|
|
index 23909d12f729..cec96d4794d0 100644
|
|
--- a/include/linux/compat.h
|
|
+++ b/include/linux/compat.h
|
|
@@ -324,8 +324,6 @@ struct compat_kexec_segment;
|
|
struct compat_mq_attr;
|
|
struct compat_msgbuf;
|
|
|
|
-extern void compat_exit_robust_list(struct task_struct *curr);
|
|
-
|
|
asmlinkage long
|
|
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
|
|
compat_size_t len);
|
|
diff --git a/include/linux/futex.h b/include/linux/futex.h
|
|
index c0fb9a24bbd2..a4b6cba699bf 100644
|
|
--- a/include/linux/futex.h
|
|
+++ b/include/linux/futex.h
|
|
@@ -2,7 +2,9 @@
|
|
#ifndef _LINUX_FUTEX_H
|
|
#define _LINUX_FUTEX_H
|
|
|
|
+#include <linux/sched.h>
|
|
#include <linux/ktime.h>
|
|
+
|
|
#include <uapi/linux/futex.h>
|
|
|
|
struct inode;
|
|
@@ -12,9 +14,6 @@ struct task_struct;
|
|
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
|
|
u32 __user *uaddr2, u32 val2, u32 val3);
|
|
|
|
-extern int
|
|
-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
|
|
-
|
|
/*
|
|
* Futexes are matched on equal values of this key.
|
|
* The key type depends on whether it's a shared or private mapping.
|
|
@@ -54,24 +53,35 @@ union futex_key {
|
|
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
|
|
|
|
#ifdef CONFIG_FUTEX
|
|
-extern void exit_robust_list(struct task_struct *curr);
|
|
-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
|
|
-#define futex_cmpxchg_enabled 1
|
|
-#else
|
|
-extern int futex_cmpxchg_enabled;
|
|
-#endif
|
|
-#else
|
|
-static inline void exit_robust_list(struct task_struct *curr)
|
|
+enum {
|
|
+ FUTEX_STATE_OK,
|
|
+ FUTEX_STATE_EXITING,
|
|
+ FUTEX_STATE_DEAD,
|
|
+};
|
|
+
|
|
+static inline void futex_init_task(struct task_struct *tsk)
|
|
{
|
|
-}
|
|
+ tsk->robust_list = NULL;
|
|
+#ifdef CONFIG_COMPAT
|
|
+ tsk->compat_robust_list = NULL;
|
|
#endif
|
|
+ INIT_LIST_HEAD(&tsk->pi_state_list);
|
|
+ tsk->pi_state_cache = NULL;
|
|
+ tsk->futex_state = FUTEX_STATE_OK;
|
|
+ mutex_init(&tsk->futex_exit_mutex);
|
|
+}
|
|
|
|
-#ifdef CONFIG_FUTEX_PI
|
|
-extern void exit_pi_state_list(struct task_struct *curr);
|
|
+void futex_exit_recursive(struct task_struct *tsk);
|
|
+void futex_exit_release(struct task_struct *tsk);
|
|
+void futex_exec_release(struct task_struct *tsk);
|
|
+
|
|
+long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
|
|
+ u32 __user *uaddr2, u32 val2, u32 val3);
|
|
#else
|
|
-static inline void exit_pi_state_list(struct task_struct *curr)
|
|
-{
|
|
-}
|
|
+static inline void futex_init_task(struct task_struct *tsk) { }
|
|
+static inline void futex_exit_recursive(struct task_struct *tsk) { }
|
|
+static inline void futex_exit_release(struct task_struct *tsk) { }
|
|
+static inline void futex_exec_release(struct task_struct *tsk) { }
|
|
#endif
|
|
|
|
#endif
|
|
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
|
|
index 872f930f1b06..dd0a452373e7 100644
|
|
--- a/include/linux/genalloc.h
|
|
+++ b/include/linux/genalloc.h
|
|
@@ -51,7 +51,8 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map,
|
|
unsigned long size,
|
|
unsigned long start,
|
|
unsigned int nr,
|
|
- void *data, struct gen_pool *pool);
|
|
+ void *data, struct gen_pool *pool,
|
|
+ unsigned long start_addr);
|
|
|
|
/*
|
|
* General purpose special memory pool descriptor.
|
|
@@ -131,24 +132,24 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
|
|
|
|
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
|
|
unsigned long start, unsigned int nr, void *data,
|
|
- struct gen_pool *pool);
|
|
+ struct gen_pool *pool, unsigned long start_addr);
|
|
|
|
extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
|
|
unsigned long size, unsigned long start, unsigned int nr,
|
|
- void *data, struct gen_pool *pool);
|
|
+ void *data, struct gen_pool *pool, unsigned long start_addr);
|
|
|
|
extern unsigned long gen_pool_first_fit_align(unsigned long *map,
|
|
unsigned long size, unsigned long start, unsigned int nr,
|
|
- void *data, struct gen_pool *pool);
|
|
+ void *data, struct gen_pool *pool, unsigned long start_addr);
|
|
|
|
|
|
extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
|
|
unsigned long size, unsigned long start, unsigned int nr,
|
|
- void *data, struct gen_pool *pool);
|
|
+ void *data, struct gen_pool *pool, unsigned long start_addr);
|
|
|
|
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
|
|
unsigned long start, unsigned int nr, void *data,
|
|
- struct gen_pool *pool);
|
|
+ struct gen_pool *pool, unsigned long start_addr);
|
|
|
|
|
|
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
|
|
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
|
|
index c4a350d83578..79ad4f8b889d 100644
|
|
--- a/include/linux/gpio/consumer.h
|
|
+++ b/include/linux/gpio/consumer.h
|
|
@@ -404,7 +404,7 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
|
|
|
|
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
|
|
{
|
|
- return ERR_PTR(-EINVAL);
|
|
+ return NULL;
|
|
}
|
|
|
|
static inline int desc_to_gpio(const struct gpio_desc *desc)
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index 40b830d55fe5..4725a9d9597f 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -3522,7 +3522,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
|
|
if (debug_value == 0) /* no output */
|
|
return 0;
|
|
/* set low N bits */
|
|
- return (1 << debug_value) - 1;
|
|
+ return (1U << debug_value) - 1;
|
|
}
|
|
|
|
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
|
|
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
|
|
index adb88f8cefbc..576caaf0c9af 100644
|
|
--- a/include/linux/reset-controller.h
|
|
+++ b/include/linux/reset-controller.h
|
|
@@ -7,7 +7,7 @@
|
|
struct reset_controller_dev;
|
|
|
|
/**
|
|
- * struct reset_control_ops
|
|
+ * struct reset_control_ops - reset controller driver callbacks
|
|
*
|
|
* @reset: for self-deasserting resets, does all necessary
|
|
* things to reset the device
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index 866439c361a9..b06577652643 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -959,6 +959,8 @@ struct task_struct {
|
|
#endif
|
|
struct list_head pi_state_list;
|
|
struct futex_pi_state *pi_state_cache;
|
|
+ struct mutex futex_exit_mutex;
|
|
+ unsigned int futex_state;
|
|
#endif
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
|
|
@@ -1334,7 +1336,6 @@ extern struct pid *cad_pid;
|
|
*/
|
|
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
|
|
#define PF_EXITING 0x00000004 /* Getting shut down */
|
|
-#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
|
|
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
|
|
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
|
|
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
|
|
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
|
|
index 9f7cc1d7ec4a..efb9e12e7f91 100644
|
|
--- a/include/linux/sched/mm.h
|
|
+++ b/include/linux/sched/mm.h
|
|
@@ -125,8 +125,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
|
|
* succeeds.
|
|
*/
|
|
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
|
|
-/* Remove the current tasks stale references to the old mm_struct */
|
|
-extern void mm_release(struct task_struct *, struct mm_struct *);
|
|
+/* Remove the current tasks stale references to the old mm_struct on exit() */
|
|
+extern void exit_mm_release(struct task_struct *, struct mm_struct *);
|
|
+/* Remove the current tasks stale references to the old mm_struct on exec() */
|
|
+extern void exec_mm_release(struct task_struct *, struct mm_struct *);
|
|
|
|
#ifdef CONFIG_MEMCG
|
|
extern void mm_update_next_owner(struct mm_struct *mm);
|
|
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
|
|
index a74ec619ac51..11b4fba82950 100644
|
|
--- a/include/linux/sched/task.h
|
|
+++ b/include/linux/sched/task.h
|
|
@@ -39,6 +39,8 @@ void __noreturn do_task_dead(void);
|
|
|
|
extern void proc_caches_init(void);
|
|
|
|
+extern void fork_init(void);
|
|
+
|
|
extern void release_task(struct task_struct * p);
|
|
|
|
#ifdef CONFIG_HAVE_COPY_THREAD_TLS
|
|
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
|
|
index a27ef5f56431..791a6be0e394 100644
|
|
--- a/include/linux/serial_8250.h
|
|
+++ b/include/linux/serial_8250.h
|
|
@@ -134,6 +134,10 @@ struct uart_8250_port {
|
|
void (*dl_write)(struct uart_8250_port *, int);
|
|
|
|
struct uart_8250_em485 *em485;
|
|
+
|
|
+ /* Serial port overrun backoff */
|
|
+ struct delayed_work overrun_backoff;
|
|
+ u32 overrun_backoff_time_ms;
|
|
};
|
|
|
|
static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
|
|
diff --git a/include/linux/swap.h b/include/linux/swap.h
|
|
index e643866912b7..411953964c34 100644
|
|
--- a/include/linux/swap.h
|
|
+++ b/include/linux/swap.h
|
|
@@ -363,14 +363,8 @@ extern unsigned long vm_total_pages;
|
|
extern int node_reclaim_mode;
|
|
extern int sysctl_min_unmapped_ratio;
|
|
extern int sysctl_min_slab_ratio;
|
|
-extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
|
|
#else
|
|
#define node_reclaim_mode 0
|
|
-static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
|
|
- unsigned int order)
|
|
-{
|
|
- return 0;
|
|
-}
|
|
#endif
|
|
|
|
extern int page_evictable(struct page *page);
|
|
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
|
|
index 94c775773f58..c1f71dd464d3 100644
|
|
--- a/include/net/sctp/structs.h
|
|
+++ b/include/net/sctp/structs.h
|
|
@@ -1181,6 +1181,9 @@ struct sctp_ep_common {
|
|
/* What socket does this endpoint belong to? */
|
|
struct sock *sk;
|
|
|
|
+ /* Cache netns and it won't change once set */
|
|
+ struct net *net;
|
|
+
|
|
/* This is where we receive inbound chunks. */
|
|
struct sctp_inq inqueue;
|
|
|
|
diff --git a/include/net/sock.h b/include/net/sock.h
|
|
index 780c6c0a86f0..0af46cbd3649 100644
|
|
--- a/include/net/sock.h
|
|
+++ b/include/net/sock.h
|
|
@@ -1232,7 +1232,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
|
|
percpu_counter_inc(sk->sk_prot->sockets_allocated);
|
|
}
|
|
|
|
-static inline int
|
|
+static inline u64
|
|
sk_sockets_allocated_read_positive(struct sock *sk)
|
|
{
|
|
return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
|
|
diff --git a/init/main.c b/init/main.c
|
|
index 51067e2db509..b1ab36fe1a55 100644
|
|
--- a/init/main.c
|
|
+++ b/init/main.c
|
|
@@ -98,7 +98,6 @@
|
|
static int kernel_init(void *);
|
|
|
|
extern void init_IRQ(void);
|
|
-extern void fork_init(void);
|
|
extern void radix_tree_init(void);
|
|
|
|
/*
|
|
diff --git a/kernel/Makefile b/kernel/Makefile
|
|
index 3085141c055c..43e92e3691ec 100644
|
|
--- a/kernel/Makefile
|
|
+++ b/kernel/Makefile
|
|
@@ -49,9 +49,6 @@ obj-$(CONFIG_PROFILING) += profile.o
|
|
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
|
obj-y += time/
|
|
obj-$(CONFIG_FUTEX) += futex.o
|
|
-ifeq ($(CONFIG_COMPAT),y)
|
|
-obj-$(CONFIG_FUTEX) += futex_compat.o
|
|
-endif
|
|
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
|
|
obj-$(CONFIG_SMP) += smp.o
|
|
ifneq ($(CONFIG_SMP),y)
|
|
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
|
|
index 59d2e94ecb79..f5c1d5479ba3 100644
|
|
--- a/kernel/bpf/syscall.c
|
|
+++ b/kernel/bpf/syscall.c
|
|
@@ -348,12 +348,12 @@ static int map_create(union bpf_attr *attr)
|
|
err = bpf_map_new_fd(map);
|
|
if (err < 0) {
|
|
/* failed to allocate fd.
|
|
- * bpf_map_put() is needed because the above
|
|
+ * bpf_map_put_with_uref() is needed because the above
|
|
* bpf_map_alloc_id() has published the map
|
|
* to the userspace and the userspace may
|
|
* have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
|
|
*/
|
|
- bpf_map_put(map);
|
|
+ bpf_map_put_with_uref(map);
|
|
return err;
|
|
}
|
|
|
|
@@ -1354,7 +1354,7 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
|
|
|
|
fd = bpf_map_new_fd(map);
|
|
if (fd < 0)
|
|
- bpf_map_put(map);
|
|
+ bpf_map_put_with_uref(map);
|
|
|
|
return fd;
|
|
}
|
|
diff --git a/kernel/exit.c b/kernel/exit.c
|
|
index 15437cfdcd70..57cb0eb1271c 100644
|
|
--- a/kernel/exit.c
|
|
+++ b/kernel/exit.c
|
|
@@ -497,7 +497,7 @@ static void exit_mm(void)
|
|
struct mm_struct *mm = current->mm;
|
|
struct core_state *core_state;
|
|
|
|
- mm_release(current, mm);
|
|
+ exit_mm_release(current, mm);
|
|
if (!mm)
|
|
return;
|
|
sync_mm_rss(mm);
|
|
@@ -803,32 +803,12 @@ void __noreturn do_exit(long code)
|
|
*/
|
|
if (unlikely(tsk->flags & PF_EXITING)) {
|
|
pr_alert("Fixing recursive fault but reboot is needed!\n");
|
|
- /*
|
|
- * We can do this unlocked here. The futex code uses
|
|
- * this flag just to verify whether the pi state
|
|
- * cleanup has been done or not. In the worst case it
|
|
- * loops once more. We pretend that the cleanup was
|
|
- * done as there is no way to return. Either the
|
|
- * OWNER_DIED bit is set by now or we push the blocked
|
|
- * task into the wait for ever nirwana as well.
|
|
- */
|
|
- tsk->flags |= PF_EXITPIDONE;
|
|
+ futex_exit_recursive(tsk);
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
schedule();
|
|
}
|
|
|
|
exit_signals(tsk); /* sets PF_EXITING */
|
|
- /*
|
|
- * Ensure that all new tsk->pi_lock acquisitions must observe
|
|
- * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
|
|
- */
|
|
- smp_mb();
|
|
- /*
|
|
- * Ensure that we must observe the pi_state in exit_mm() ->
|
|
- * mm_release() -> exit_pi_state_list().
|
|
- */
|
|
- raw_spin_lock_irq(&tsk->pi_lock);
|
|
- raw_spin_unlock_irq(&tsk->pi_lock);
|
|
|
|
if (unlikely(in_atomic())) {
|
|
pr_info("note: %s[%d] exited with preempt_count %d\n",
|
|
@@ -902,12 +882,6 @@ void __noreturn do_exit(long code)
|
|
* Make sure we are holding no locks:
|
|
*/
|
|
debug_check_no_locks_held();
|
|
- /*
|
|
- * We can do this unlocked here. The futex code uses this flag
|
|
- * just to verify whether the pi state cleanup has been done
|
|
- * or not. In the worst case it loops once more.
|
|
- */
|
|
- tsk->flags |= PF_EXITPIDONE;
|
|
|
|
if (tsk->io_context)
|
|
exit_io_context(tsk);
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index 3352fdbd5e20..0a328cf0cb13 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -162,10 +162,6 @@ static inline void free_task_struct(struct task_struct *tsk)
|
|
}
|
|
#endif
|
|
|
|
-void __weak arch_release_thread_stack(unsigned long *stack)
|
|
-{
|
|
-}
|
|
-
|
|
#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
|
|
|
|
/*
|
|
@@ -348,7 +344,6 @@ static void release_task_stack(struct task_struct *tsk)
|
|
return; /* Better to leak the stack than to free prematurely */
|
|
|
|
account_kernel_stack(tsk, -1);
|
|
- arch_release_thread_stack(tsk->stack);
|
|
free_thread_stack(tsk);
|
|
tsk->stack = NULL;
|
|
#ifdef CONFIG_VMAP_STACK
|
|
@@ -1137,24 +1132,8 @@ static int wait_for_vfork_done(struct task_struct *child,
|
|
* restoring the old one. . .
|
|
* Eric Biederman 10 January 1998
|
|
*/
|
|
-void mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
|
+static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
|
{
|
|
- /* Get rid of any futexes when releasing the mm */
|
|
-#ifdef CONFIG_FUTEX
|
|
- if (unlikely(tsk->robust_list)) {
|
|
- exit_robust_list(tsk);
|
|
- tsk->robust_list = NULL;
|
|
- }
|
|
-#ifdef CONFIG_COMPAT
|
|
- if (unlikely(tsk->compat_robust_list)) {
|
|
- compat_exit_robust_list(tsk);
|
|
- tsk->compat_robust_list = NULL;
|
|
- }
|
|
-#endif
|
|
- if (unlikely(!list_empty(&tsk->pi_state_list)))
|
|
- exit_pi_state_list(tsk);
|
|
-#endif
|
|
-
|
|
uprobe_free_utask(tsk);
|
|
|
|
/* Get rid of any cached register state */
|
|
@@ -1187,6 +1166,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
|
complete_vfork_done(tsk);
|
|
}
|
|
|
|
+void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
|
+{
|
|
+ futex_exit_release(tsk);
|
|
+ mm_release(tsk, mm);
|
|
+}
|
|
+
|
|
+void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
|
+{
|
|
+ futex_exec_release(tsk);
|
|
+ mm_release(tsk, mm);
|
|
+}
|
|
+
|
|
/*
|
|
* Allocate a new mm structure and copy contents from the
|
|
* mm structure of the passed in task structure.
|
|
@@ -1801,14 +1792,8 @@ static __latent_entropy struct task_struct *copy_process(
|
|
#ifdef CONFIG_BLOCK
|
|
p->plug = NULL;
|
|
#endif
|
|
-#ifdef CONFIG_FUTEX
|
|
- p->robust_list = NULL;
|
|
-#ifdef CONFIG_COMPAT
|
|
- p->compat_robust_list = NULL;
|
|
-#endif
|
|
- INIT_LIST_HEAD(&p->pi_state_list);
|
|
- p->pi_state_cache = NULL;
|
|
-#endif
|
|
+ futex_init_task(p);
|
|
+
|
|
/*
|
|
* sigaltstack should be cleared when sharing the same VM
|
|
*/
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index afe6a81584c9..f5aae14c247b 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -44,6 +44,7 @@
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
+#include <linux/compat.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/poll.h>
|
|
#include <linux/fs.h>
|
|
@@ -173,8 +174,10 @@
|
|
* double_lock_hb() and double_unlock_hb(), respectively.
|
|
*/
|
|
|
|
-#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
|
|
-int __read_mostly futex_cmpxchg_enabled;
|
|
+#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
|
|
+#define futex_cmpxchg_enabled 1
|
|
+#else
|
|
+static int __read_mostly futex_cmpxchg_enabled;
|
|
#endif
|
|
|
|
/*
|
|
@@ -338,6 +341,12 @@ static inline bool should_fail_futex(bool fshared)
|
|
}
|
|
#endif /* CONFIG_FAIL_FUTEX */
|
|
|
|
+#ifdef CONFIG_COMPAT
|
|
+static void compat_exit_robust_list(struct task_struct *curr);
|
|
+#else
|
|
+static inline void compat_exit_robust_list(struct task_struct *curr) { }
|
|
+#endif
|
|
+
|
|
static inline void futex_get_mm(union futex_key *key)
|
|
{
|
|
mmgrab(key->private.mm);
|
|
@@ -887,7 +896,7 @@ static struct task_struct *futex_find_get_task(pid_t pid)
|
|
* Kernel cleans up PI-state, but userspace is likely hosed.
|
|
* (Robust-futex cleanup is separate and might save the day for userspace.)
|
|
*/
|
|
-void exit_pi_state_list(struct task_struct *curr)
|
|
+static void exit_pi_state_list(struct task_struct *curr)
|
|
{
|
|
struct list_head *next, *head = &curr->pi_state_list;
|
|
struct futex_pi_state *pi_state;
|
|
@@ -957,7 +966,8 @@ void exit_pi_state_list(struct task_struct *curr)
|
|
}
|
|
raw_spin_unlock_irq(&curr->pi_lock);
|
|
}
|
|
-
|
|
+#else
|
|
+static inline void exit_pi_state_list(struct task_struct *curr) { }
|
|
#endif
|
|
|
|
/*
|
|
@@ -1166,16 +1176,47 @@ out_error:
|
|
return ret;
|
|
}
|
|
|
|
+/**
|
|
+ * wait_for_owner_exiting - Block until the owner has exited
|
|
+ * @exiting: Pointer to the exiting task
|
|
+ *
|
|
+ * Caller must hold a refcount on @exiting.
|
|
+ */
|
|
+static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
|
|
+{
|
|
+ if (ret != -EBUSY) {
|
|
+ WARN_ON_ONCE(exiting);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
|
|
+ return;
|
|
+
|
|
+ mutex_lock(&exiting->futex_exit_mutex);
|
|
+ /*
|
|
+ * No point in doing state checking here. If the waiter got here
|
|
+ * while the task was in exec()->exec_futex_release() then it can
|
|
+ * have any FUTEX_STATE_* value when the waiter has acquired the
|
|
+ * mutex. OK, if running, EXITING or DEAD if it reached exit()
|
|
+ * already. Highly unlikely and not a problem. Just one more round
|
|
+ * through the futex maze.
|
|
+ */
|
|
+ mutex_unlock(&exiting->futex_exit_mutex);
|
|
+
|
|
+ put_task_struct(exiting);
|
|
+}
|
|
+
|
|
static int handle_exit_race(u32 __user *uaddr, u32 uval,
|
|
struct task_struct *tsk)
|
|
{
|
|
u32 uval2;
|
|
|
|
/*
|
|
- * If PF_EXITPIDONE is not yet set, then try again.
|
|
+ * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
|
|
+ * caller that the alleged owner is busy.
|
|
*/
|
|
- if (tsk && !(tsk->flags & PF_EXITPIDONE))
|
|
- return -EAGAIN;
|
|
+ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
|
|
+ return -EBUSY;
|
|
|
|
/*
|
|
* Reread the user space value to handle the following situation:
|
|
@@ -1193,8 +1234,9 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
|
|
* *uaddr = 0xC0000000; tsk = get_task(PID);
|
|
* } if (!tsk->flags & PF_EXITING) {
|
|
* ... attach();
|
|
- * tsk->flags |= PF_EXITPIDONE; } else {
|
|
- * if (!(tsk->flags & PF_EXITPIDONE))
|
|
+ * tsk->futex_state = } else {
|
|
+ * FUTEX_STATE_DEAD; if (tsk->futex_state !=
|
|
+ * FUTEX_STATE_DEAD)
|
|
* return -EAGAIN;
|
|
* return -ESRCH; <--- FAIL
|
|
* }
|
|
@@ -1225,7 +1267,8 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
|
|
* it after doing proper sanity checks.
|
|
*/
|
|
static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
|
|
- struct futex_pi_state **ps)
|
|
+ struct futex_pi_state **ps,
|
|
+ struct task_struct **exiting)
|
|
{
|
|
pid_t pid = uval & FUTEX_TID_MASK;
|
|
struct futex_pi_state *pi_state;
|
|
@@ -1250,22 +1293,33 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
|
|
}
|
|
|
|
/*
|
|
- * We need to look at the task state flags to figure out,
|
|
- * whether the task is exiting. To protect against the do_exit
|
|
- * change of the task flags, we do this protected by
|
|
- * p->pi_lock:
|
|
+ * We need to look at the task state to figure out, whether the
|
|
+ * task is exiting. To protect against the change of the task state
|
|
+ * in futex_exit_release(), we do this protected by p->pi_lock:
|
|
*/
|
|
raw_spin_lock_irq(&p->pi_lock);
|
|
- if (unlikely(p->flags & PF_EXITING)) {
|
|
+ if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
|
|
/*
|
|
- * The task is on the way out. When PF_EXITPIDONE is
|
|
- * set, we know that the task has finished the
|
|
- * cleanup:
|
|
+ * The task is on the way out. When the futex state is
|
|
+ * FUTEX_STATE_DEAD, we know that the task has finished
|
|
+ * the cleanup:
|
|
*/
|
|
int ret = handle_exit_race(uaddr, uval, p);
|
|
|
|
raw_spin_unlock_irq(&p->pi_lock);
|
|
- put_task_struct(p);
|
|
+ /*
|
|
+ * If the owner task is between FUTEX_STATE_EXITING and
|
|
+ * FUTEX_STATE_DEAD then store the task pointer and keep
|
|
+ * the reference on the task struct. The calling code will
|
|
+ * drop all locks, wait for the task to reach
|
|
+ * FUTEX_STATE_DEAD and then drop the refcount. This is
|
|
+ * required to prevent a live lock when the current task
|
|
+ * preempted the exiting task between the two states.
|
|
+ */
|
|
+ if (ret == -EBUSY)
|
|
+ *exiting = p;
|
|
+ else
|
|
+ put_task_struct(p);
|
|
return ret;
|
|
}
|
|
|
|
@@ -1304,7 +1358,8 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
|
|
|
|
static int lookup_pi_state(u32 __user *uaddr, u32 uval,
|
|
struct futex_hash_bucket *hb,
|
|
- union futex_key *key, struct futex_pi_state **ps)
|
|
+ union futex_key *key, struct futex_pi_state **ps,
|
|
+ struct task_struct **exiting)
|
|
{
|
|
struct futex_q *top_waiter = futex_top_waiter(hb, key);
|
|
|
|
@@ -1319,7 +1374,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
|
|
* We are the first waiter - try to look up the owner based on
|
|
* @uval and attach to it.
|
|
*/
|
|
- return attach_to_pi_owner(uaddr, uval, key, ps);
|
|
+ return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
|
|
}
|
|
|
|
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
|
|
@@ -1347,6 +1402,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
|
|
* lookup
|
|
* @task: the task to perform the atomic lock work for. This will
|
|
* be "current" except in the case of requeue pi.
|
|
+ * @exiting: Pointer to store the task pointer of the owner task
|
|
+ * which is in the middle of exiting
|
|
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
|
|
*
|
|
* Return:
|
|
@@ -1355,11 +1412,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
|
|
* - <0 - error
|
|
*
|
|
* The hb->lock and futex_key refs shall be held by the caller.
|
|
+ *
|
|
+ * @exiting is only set when the return value is -EBUSY. If so, this holds
|
|
+ * a refcount on the exiting task on return and the caller needs to drop it
|
|
+ * after waiting for the exit to complete.
|
|
*/
|
|
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
|
|
union futex_key *key,
|
|
struct futex_pi_state **ps,
|
|
- struct task_struct *task, int set_waiters)
|
|
+ struct task_struct *task,
|
|
+ struct task_struct **exiting,
|
|
+ int set_waiters)
|
|
{
|
|
u32 uval, newval, vpid = task_pid_vnr(task);
|
|
struct futex_q *top_waiter;
|
|
@@ -1429,7 +1492,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
|
|
* attach to the owner. If that fails, no harm done, we only
|
|
* set the FUTEX_WAITERS bit in the user space variable.
|
|
*/
|
|
- return attach_to_pi_owner(uaddr, newval, key, ps);
|
|
+ return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
|
|
}
|
|
|
|
/**
|
|
@@ -1848,6 +1911,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
|
|
* @key1: the from futex key
|
|
* @key2: the to futex key
|
|
* @ps: address to store the pi_state pointer
|
|
+ * @exiting: Pointer to store the task pointer of the owner task
|
|
+ * which is in the middle of exiting
|
|
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
|
|
*
|
|
* Try and get the lock on behalf of the top waiter if we can do it atomically.
|
|
@@ -1855,16 +1920,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
|
|
* then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
|
|
* hb1 and hb2 must be held by the caller.
|
|
*
|
|
+ * @exiting is only set when the return value is -EBUSY. If so, this holds
|
|
+ * a refcount on the exiting task on return and the caller needs to drop it
|
|
+ * after waiting for the exit to complete.
|
|
+ *
|
|
* Return:
|
|
* - 0 - failed to acquire the lock atomically;
|
|
* - >0 - acquired the lock, return value is vpid of the top_waiter
|
|
* - <0 - error
|
|
*/
|
|
-static int futex_proxy_trylock_atomic(u32 __user *pifutex,
|
|
- struct futex_hash_bucket *hb1,
|
|
- struct futex_hash_bucket *hb2,
|
|
- union futex_key *key1, union futex_key *key2,
|
|
- struct futex_pi_state **ps, int set_waiters)
|
|
+static int
|
|
+futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
|
|
+ struct futex_hash_bucket *hb2, union futex_key *key1,
|
|
+ union futex_key *key2, struct futex_pi_state **ps,
|
|
+ struct task_struct **exiting, int set_waiters)
|
|
{
|
|
struct futex_q *top_waiter = NULL;
|
|
u32 curval;
|
|
@@ -1901,7 +1970,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
|
|
*/
|
|
vpid = task_pid_vnr(top_waiter->task);
|
|
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
|
|
- set_waiters);
|
|
+ exiting, set_waiters);
|
|
if (ret == 1) {
|
|
requeue_pi_wake_futex(top_waiter, key2, hb2);
|
|
return vpid;
|
|
@@ -2030,6 +2099,8 @@ retry_private:
|
|
}
|
|
|
|
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
|
|
+ struct task_struct *exiting = NULL;
|
|
+
|
|
/*
|
|
* Attempt to acquire uaddr2 and wake the top waiter. If we
|
|
* intend to requeue waiters, force setting the FUTEX_WAITERS
|
|
@@ -2037,7 +2108,8 @@ retry_private:
|
|
* faults rather in the requeue loop below.
|
|
*/
|
|
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
|
|
- &key2, &pi_state, nr_requeue);
|
|
+ &key2, &pi_state,
|
|
+ &exiting, nr_requeue);
|
|
|
|
/*
|
|
* At this point the top_waiter has either taken uaddr2 or is
|
|
@@ -2064,7 +2136,8 @@ retry_private:
|
|
* If that call succeeds then we have pi_state and an
|
|
* initial refcount on it.
|
|
*/
|
|
- ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
|
|
+ ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
|
|
+ &pi_state, &exiting);
|
|
}
|
|
|
|
switch (ret) {
|
|
@@ -2082,17 +2155,24 @@ retry_private:
|
|
if (!ret)
|
|
goto retry;
|
|
goto out;
|
|
+ case -EBUSY:
|
|
case -EAGAIN:
|
|
/*
|
|
* Two reasons for this:
|
|
- * - Owner is exiting and we just wait for the
|
|
+ * - EBUSY: Owner is exiting and we just wait for the
|
|
* exit to complete.
|
|
- * - The user space value changed.
|
|
+ * - EAGAIN: The user space value changed.
|
|
*/
|
|
double_unlock_hb(hb1, hb2);
|
|
hb_waiters_dec(hb2);
|
|
put_futex_key(&key2);
|
|
put_futex_key(&key1);
|
|
+ /*
|
|
+ * Handle the case where the owner is in the middle of
|
|
+ * exiting. Wait for the exit to complete otherwise
|
|
+ * this task might loop forever, aka. live lock.
|
|
+ */
|
|
+ wait_for_owner_exiting(ret, exiting);
|
|
cond_resched();
|
|
goto retry;
|
|
default:
|
|
@@ -2808,6 +2888,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
|
|
{
|
|
struct hrtimer_sleeper timeout, *to = NULL;
|
|
struct futex_pi_state *pi_state = NULL;
|
|
+ struct task_struct *exiting = NULL;
|
|
struct rt_mutex_waiter rt_waiter;
|
|
struct futex_hash_bucket *hb;
|
|
struct futex_q q = futex_q_init;
|
|
@@ -2835,7 +2916,8 @@ retry:
|
|
retry_private:
|
|
hb = queue_lock(&q);
|
|
|
|
- ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
|
|
+ ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
|
|
+ &exiting, 0);
|
|
if (unlikely(ret)) {
|
|
/*
|
|
* Atomic work succeeded and we got the lock,
|
|
@@ -2848,15 +2930,22 @@ retry_private:
|
|
goto out_unlock_put_key;
|
|
case -EFAULT:
|
|
goto uaddr_faulted;
|
|
+ case -EBUSY:
|
|
case -EAGAIN:
|
|
/*
|
|
* Two reasons for this:
|
|
- * - Task is exiting and we just wait for the
|
|
+ * - EBUSY: Task is exiting and we just wait for the
|
|
* exit to complete.
|
|
- * - The user space value changed.
|
|
+ * - EAGAIN: The user space value changed.
|
|
*/
|
|
queue_unlock(hb);
|
|
put_futex_key(&q.key);
|
|
+ /*
|
|
+ * Handle the case where the owner is in the middle of
|
|
+ * exiting. Wait for the exit to complete otherwise
|
|
+ * this task might loop forever, aka. live lock.
|
|
+ */
|
|
+ wait_for_owner_exiting(ret, exiting);
|
|
cond_resched();
|
|
goto retry;
|
|
default:
|
|
@@ -3472,11 +3561,16 @@ err_unlock:
|
|
return ret;
|
|
}
|
|
|
|
+/* Constants for the pending_op argument of handle_futex_death */
|
|
+#define HANDLE_DEATH_PENDING true
|
|
+#define HANDLE_DEATH_LIST false
|
|
+
|
|
/*
|
|
* Process a futex-list entry, check whether it's owned by the
|
|
* dying task, and do notification if so:
|
|
*/
|
|
-int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
|
|
+static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
|
|
+ bool pi, bool pending_op)
|
|
{
|
|
u32 uval, uninitialized_var(nval), mval;
|
|
int err;
|
|
@@ -3489,6 +3583,42 @@ retry:
|
|
if (get_user(uval, uaddr))
|
|
return -1;
|
|
|
|
+ /*
|
|
+ * Special case for regular (non PI) futexes. The unlock path in
|
|
+ * user space has two race scenarios:
|
|
+ *
|
|
+ * 1. The unlock path releases the user space futex value and
|
|
+ * before it can execute the futex() syscall to wake up
|
|
+ * waiters it is killed.
|
|
+ *
|
|
+ * 2. A woken up waiter is killed before it can acquire the
|
|
+ * futex in user space.
|
|
+ *
|
|
+ * In both cases the TID validation below prevents a wakeup of
|
|
+ * potential waiters which can cause these waiters to block
|
|
+ * forever.
|
|
+ *
|
|
+ * In both cases the following conditions are met:
|
|
+ *
|
|
+ * 1) task->robust_list->list_op_pending != NULL
|
|
+ * @pending_op == true
|
|
+ * 2) User space futex value == 0
|
|
+ * 3) Regular futex: @pi == false
|
|
+ *
|
|
+ * If these conditions are met, it is safe to attempt waking up a
|
|
+ * potential waiter without touching the user space futex value and
|
|
+ * trying to set the OWNER_DIED bit. The user space futex value is
|
|
+ * uncontended and the rest of the user space mutex state is
|
|
+ * consistent, so a woken waiter will just take over the
|
|
+ * uncontended futex. Setting the OWNER_DIED bit would create
|
|
+ * inconsistent state and malfunction of the user space owner died
|
|
+ * handling.
|
|
+ */
|
|
+ if (pending_op && !pi && !uval) {
|
|
+ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
|
|
return 0;
|
|
|
|
@@ -3567,7 +3697,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
|
|
*
|
|
* We silently return on any sign of list-walking problem.
|
|
*/
|
|
-void exit_robust_list(struct task_struct *curr)
|
|
+static void exit_robust_list(struct task_struct *curr)
|
|
{
|
|
struct robust_list_head __user *head = curr->robust_list;
|
|
struct robust_list __user *entry, *next_entry, *pending;
|
|
@@ -3608,10 +3738,11 @@ void exit_robust_list(struct task_struct *curr)
|
|
* A pending lock might already be on the list, so
|
|
* don't process it twice:
|
|
*/
|
|
- if (entry != pending)
|
|
+ if (entry != pending) {
|
|
if (handle_futex_death((void __user *)entry + futex_offset,
|
|
- curr, pi))
|
|
+ curr, pi, HANDLE_DEATH_LIST))
|
|
return;
|
|
+ }
|
|
if (rc)
|
|
return;
|
|
entry = next_entry;
|
|
@@ -3625,9 +3756,118 @@ void exit_robust_list(struct task_struct *curr)
|
|
cond_resched();
|
|
}
|
|
|
|
- if (pending)
|
|
+ if (pending) {
|
|
handle_futex_death((void __user *)pending + futex_offset,
|
|
- curr, pip);
|
|
+ curr, pip, HANDLE_DEATH_PENDING);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void futex_cleanup(struct task_struct *tsk)
|
|
+{
|
|
+ if (unlikely(tsk->robust_list)) {
|
|
+ exit_robust_list(tsk);
|
|
+ tsk->robust_list = NULL;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_COMPAT
|
|
+ if (unlikely(tsk->compat_robust_list)) {
|
|
+ compat_exit_robust_list(tsk);
|
|
+ tsk->compat_robust_list = NULL;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ if (unlikely(!list_empty(&tsk->pi_state_list)))
|
|
+ exit_pi_state_list(tsk);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
|
|
+ * @tsk: task to set the state on
|
|
+ *
|
|
+ * Set the futex exit state of the task lockless. The futex waiter code
|
|
+ * observes that state when a task is exiting and loops until the task has
|
|
+ * actually finished the futex cleanup. The worst case for this is that the
|
|
+ * waiter runs through the wait loop until the state becomes visible.
|
|
+ *
|
|
+ * This is called from the recursive fault handling path in do_exit().
|
|
+ *
|
|
+ * This is best effort. Either the futex exit code has run already or
|
|
+ * not. If the OWNER_DIED bit has been set on the futex then the waiter can
|
|
+ * take it over. If not, the problem is pushed back to user space. If the
|
|
+ * futex exit code did not run yet, then an already queued waiter might
|
|
+ * block forever, but there is nothing which can be done about that.
|
|
+ */
|
|
+void futex_exit_recursive(struct task_struct *tsk)
|
|
+{
|
|
+ /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
|
|
+ if (tsk->futex_state == FUTEX_STATE_EXITING)
|
|
+ mutex_unlock(&tsk->futex_exit_mutex);
|
|
+ tsk->futex_state = FUTEX_STATE_DEAD;
|
|
+}
|
|
+
|
|
+static void futex_cleanup_begin(struct task_struct *tsk)
|
|
+{
|
|
+ /*
|
|
+ * Prevent various race issues against a concurrent incoming waiter
|
|
+ * including live locks by forcing the waiter to block on
|
|
+ * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
|
|
+ * attach_to_pi_owner().
|
|
+ */
|
|
+ mutex_lock(&tsk->futex_exit_mutex);
|
|
+
|
|
+ /*
|
|
+ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
|
|
+ *
|
|
+ * This ensures that all subsequent checks of tsk->futex_state in
|
|
+ * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
|
|
+ * tsk->pi_lock held.
|
|
+ *
|
|
+ * It guarantees also that a pi_state which was queued right before
|
|
+ * the state change under tsk->pi_lock by a concurrent waiter must
|
|
+ * be observed in exit_pi_state_list().
|
|
+ */
|
|
+ raw_spin_lock_irq(&tsk->pi_lock);
|
|
+ tsk->futex_state = FUTEX_STATE_EXITING;
|
|
+ raw_spin_unlock_irq(&tsk->pi_lock);
|
|
+}
|
|
+
|
|
+static void futex_cleanup_end(struct task_struct *tsk, int state)
|
|
+{
|
|
+ /*
|
|
+ * Lockless store. The only side effect is that an observer might
|
|
+ * take another loop until it becomes visible.
|
|
+ */
|
|
+ tsk->futex_state = state;
|
|
+ /*
|
|
+ * Drop the exit protection. This unblocks waiters which observed
|
|
+ * FUTEX_STATE_EXITING to reevaluate the state.
|
|
+ */
|
|
+ mutex_unlock(&tsk->futex_exit_mutex);
|
|
+}
|
|
+
|
|
+void futex_exec_release(struct task_struct *tsk)
|
|
+{
|
|
+ /*
|
|
+ * The state handling is done for consistency, but in the case of
|
|
+ * exec() there is no way to prevent futher damage as the PID stays
|
|
+ * the same. But for the unlikely and arguably buggy case that a
|
|
+ * futex is held on exec(), this provides at least as much state
|
|
+ * consistency protection which is possible.
|
|
+ */
|
|
+ futex_cleanup_begin(tsk);
|
|
+ futex_cleanup(tsk);
|
|
+ /*
|
|
+ * Reset the state to FUTEX_STATE_OK. The task is alive and about
|
|
+ * exec a new binary.
|
|
+ */
|
|
+ futex_cleanup_end(tsk, FUTEX_STATE_OK);
|
|
+}
|
|
+
|
|
+void futex_exit_release(struct task_struct *tsk)
|
|
+{
|
|
+ futex_cleanup_begin(tsk);
|
|
+ futex_cleanup(tsk);
|
|
+ futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
|
|
}
|
|
|
|
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
|
|
@@ -3723,6 +3963,193 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
|
|
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
|
|
}
|
|
|
|
+#ifdef CONFIG_COMPAT
|
|
+/*
|
|
+ * Fetch a robust-list pointer. Bit 0 signals PI futexes:
|
|
+ */
|
|
+static inline int
|
|
+compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
|
|
+ compat_uptr_t __user *head, unsigned int *pi)
|
|
+{
|
|
+ if (get_user(*uentry, head))
|
|
+ return -EFAULT;
|
|
+
|
|
+ *entry = compat_ptr((*uentry) & ~1);
|
|
+ *pi = (unsigned int)(*uentry) & 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void __user *futex_uaddr(struct robust_list __user *entry,
|
|
+ compat_long_t futex_offset)
|
|
+{
|
|
+ compat_uptr_t base = ptr_to_compat(entry);
|
|
+ void __user *uaddr = compat_ptr(base + futex_offset);
|
|
+
|
|
+ return uaddr;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Walk curr->robust_list (very carefully, it's a userspace list!)
|
|
+ * and mark any locks found there dead, and notify any waiters.
|
|
+ *
|
|
+ * We silently return on any sign of list-walking problem.
|
|
+ */
|
|
+static void compat_exit_robust_list(struct task_struct *curr)
|
|
+{
|
|
+ struct compat_robust_list_head __user *head = curr->compat_robust_list;
|
|
+ struct robust_list __user *entry, *next_entry, *pending;
|
|
+ unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
|
|
+ unsigned int uninitialized_var(next_pi);
|
|
+ compat_uptr_t uentry, next_uentry, upending;
|
|
+ compat_long_t futex_offset;
|
|
+ int rc;
|
|
+
|
|
+ if (!futex_cmpxchg_enabled)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Fetch the list head (which was registered earlier, via
|
|
+ * sys_set_robust_list()):
|
|
+ */
|
|
+ if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
|
|
+ return;
|
|
+ /*
|
|
+ * Fetch the relative futex offset:
|
|
+ */
|
|
+ if (get_user(futex_offset, &head->futex_offset))
|
|
+ return;
|
|
+ /*
|
|
+ * Fetch any possibly pending lock-add first, and handle it
|
|
+ * if it exists:
|
|
+ */
|
|
+ if (compat_fetch_robust_entry(&upending, &pending,
|
|
+ &head->list_op_pending, &pip))
|
|
+ return;
|
|
+
|
|
+ next_entry = NULL; /* avoid warning with gcc */
|
|
+ while (entry != (struct robust_list __user *) &head->list) {
|
|
+ /*
|
|
+ * Fetch the next entry in the list before calling
|
|
+ * handle_futex_death:
|
|
+ */
|
|
+ rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
|
|
+ (compat_uptr_t __user *)&entry->next, &next_pi);
|
|
+ /*
|
|
+ * A pending lock might already be on the list, so
|
|
+ * dont process it twice:
|
|
+ */
|
|
+ if (entry != pending) {
|
|
+ void __user *uaddr = futex_uaddr(entry, futex_offset);
|
|
+
|
|
+ if (handle_futex_death(uaddr, curr, pi,
|
|
+ HANDLE_DEATH_LIST))
|
|
+ return;
|
|
+ }
|
|
+ if (rc)
|
|
+ return;
|
|
+ uentry = next_uentry;
|
|
+ entry = next_entry;
|
|
+ pi = next_pi;
|
|
+ /*
|
|
+ * Avoid excessively long or circular lists:
|
|
+ */
|
|
+ if (!--limit)
|
|
+ break;
|
|
+
|
|
+ cond_resched();
|
|
+ }
|
|
+ if (pending) {
|
|
+ void __user *uaddr = futex_uaddr(pending, futex_offset);
|
|
+
|
|
+ handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
|
|
+ }
|
|
+}
|
|
+
|
|
+COMPAT_SYSCALL_DEFINE2(set_robust_list,
|
|
+ struct compat_robust_list_head __user *, head,
|
|
+ compat_size_t, len)
|
|
+{
|
|
+ if (!futex_cmpxchg_enabled)
|
|
+ return -ENOSYS;
|
|
+
|
|
+ if (unlikely(len != sizeof(*head)))
|
|
+ return -EINVAL;
|
|
+
|
|
+ current->compat_robust_list = head;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
|
|
+ compat_uptr_t __user *, head_ptr,
|
|
+ compat_size_t __user *, len_ptr)
|
|
+{
|
|
+ struct compat_robust_list_head __user *head;
|
|
+ unsigned long ret;
|
|
+ struct task_struct *p;
|
|
+
|
|
+ if (!futex_cmpxchg_enabled)
|
|
+ return -ENOSYS;
|
|
+
|
|
+ rcu_read_lock();
|
|
+
|
|
+ ret = -ESRCH;
|
|
+ if (!pid)
|
|
+ p = current;
|
|
+ else {
|
|
+ p = find_task_by_vpid(pid);
|
|
+ if (!p)
|
|
+ goto err_unlock;
|
|
+ }
|
|
+
|
|
+ ret = -EPERM;
|
|
+ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
|
|
+ goto err_unlock;
|
|
+
|
|
+ head = p->compat_robust_list;
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ if (put_user(sizeof(*head), len_ptr))
|
|
+ return -EFAULT;
|
|
+ return put_user(ptr_to_compat(head), head_ptr);
|
|
+
|
|
+err_unlock:
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
|
|
+ struct compat_timespec __user *, utime, u32 __user *, uaddr2,
|
|
+ u32, val3)
|
|
+{
|
|
+ struct timespec ts;
|
|
+ ktime_t t, *tp = NULL;
|
|
+ int val2 = 0;
|
|
+ int cmd = op & FUTEX_CMD_MASK;
|
|
+
|
|
+ if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
|
|
+ cmd == FUTEX_WAIT_BITSET ||
|
|
+ cmd == FUTEX_WAIT_REQUEUE_PI)) {
|
|
+ if (compat_get_timespec(&ts, utime))
|
|
+ return -EFAULT;
|
|
+ if (!timespec_valid(&ts))
|
|
+ return -EINVAL;
|
|
+
|
|
+ t = timespec_to_ktime(ts);
|
|
+ if (cmd == FUTEX_WAIT)
|
|
+ t = ktime_add_safe(ktime_get(), t);
|
|
+ tp = &t;
|
|
+ }
|
|
+ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
|
|
+ cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
|
|
+ val2 = (int) (unsigned long) utime;
|
|
+
|
|
+ return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
|
|
+}
|
|
+#endif /* CONFIG_COMPAT */
|
|
+
|
|
static void __init futex_detect_cmpxchg(void)
|
|
{
|
|
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
|
|
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
|
|
deleted file mode 100644
|
|
index 83f830acbb5f..000000000000
|
|
--- a/kernel/futex_compat.c
|
|
+++ /dev/null
|
|
@@ -1,202 +0,0 @@
|
|
-// SPDX-License-Identifier: GPL-2.0
|
|
-/*
|
|
- * linux/kernel/futex_compat.c
|
|
- *
|
|
- * Futex compatibililty routines.
|
|
- *
|
|
- * Copyright 2006, Red Hat, Inc., Ingo Molnar
|
|
- */
|
|
-
|
|
-#include <linux/linkage.h>
|
|
-#include <linux/compat.h>
|
|
-#include <linux/nsproxy.h>
|
|
-#include <linux/futex.h>
|
|
-#include <linux/ptrace.h>
|
|
-#include <linux/syscalls.h>
|
|
-
|
|
-#include <linux/uaccess.h>
|
|
-
|
|
-
|
|
-/*
|
|
- * Fetch a robust-list pointer. Bit 0 signals PI futexes:
|
|
- */
|
|
-static inline int
|
|
-fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
|
|
- compat_uptr_t __user *head, unsigned int *pi)
|
|
-{
|
|
- if (get_user(*uentry, head))
|
|
- return -EFAULT;
|
|
-
|
|
- *entry = compat_ptr((*uentry) & ~1);
|
|
- *pi = (unsigned int)(*uentry) & 1;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void __user *futex_uaddr(struct robust_list __user *entry,
|
|
- compat_long_t futex_offset)
|
|
-{
|
|
- compat_uptr_t base = ptr_to_compat(entry);
|
|
- void __user *uaddr = compat_ptr(base + futex_offset);
|
|
-
|
|
- return uaddr;
|
|
-}
|
|
-
|
|
-/*
|
|
- * Walk curr->robust_list (very carefully, it's a userspace list!)
|
|
- * and mark any locks found there dead, and notify any waiters.
|
|
- *
|
|
- * We silently return on any sign of list-walking problem.
|
|
- */
|
|
-void compat_exit_robust_list(struct task_struct *curr)
|
|
-{
|
|
- struct compat_robust_list_head __user *head = curr->compat_robust_list;
|
|
- struct robust_list __user *entry, *next_entry, *pending;
|
|
- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
|
|
- unsigned int uninitialized_var(next_pi);
|
|
- compat_uptr_t uentry, next_uentry, upending;
|
|
- compat_long_t futex_offset;
|
|
- int rc;
|
|
-
|
|
- if (!futex_cmpxchg_enabled)
|
|
- return;
|
|
-
|
|
- /*
|
|
- * Fetch the list head (which was registered earlier, via
|
|
- * sys_set_robust_list()):
|
|
- */
|
|
- if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
|
|
- return;
|
|
- /*
|
|
- * Fetch the relative futex offset:
|
|
- */
|
|
- if (get_user(futex_offset, &head->futex_offset))
|
|
- return;
|
|
- /*
|
|
- * Fetch any possibly pending lock-add first, and handle it
|
|
- * if it exists:
|
|
- */
|
|
- if (fetch_robust_entry(&upending, &pending,
|
|
- &head->list_op_pending, &pip))
|
|
- return;
|
|
-
|
|
- next_entry = NULL; /* avoid warning with gcc */
|
|
- while (entry != (struct robust_list __user *) &head->list) {
|
|
- /*
|
|
- * Fetch the next entry in the list before calling
|
|
- * handle_futex_death:
|
|
- */
|
|
- rc = fetch_robust_entry(&next_uentry, &next_entry,
|
|
- (compat_uptr_t __user *)&entry->next, &next_pi);
|
|
- /*
|
|
- * A pending lock might already be on the list, so
|
|
- * dont process it twice:
|
|
- */
|
|
- if (entry != pending) {
|
|
- void __user *uaddr = futex_uaddr(entry, futex_offset);
|
|
-
|
|
- if (handle_futex_death(uaddr, curr, pi))
|
|
- return;
|
|
- }
|
|
- if (rc)
|
|
- return;
|
|
- uentry = next_uentry;
|
|
- entry = next_entry;
|
|
- pi = next_pi;
|
|
- /*
|
|
- * Avoid excessively long or circular lists:
|
|
- */
|
|
- if (!--limit)
|
|
- break;
|
|
-
|
|
- cond_resched();
|
|
- }
|
|
- if (pending) {
|
|
- void __user *uaddr = futex_uaddr(pending, futex_offset);
|
|
-
|
|
- handle_futex_death(uaddr, curr, pip);
|
|
- }
|
|
-}
|
|
-
|
|
-COMPAT_SYSCALL_DEFINE2(set_robust_list,
|
|
- struct compat_robust_list_head __user *, head,
|
|
- compat_size_t, len)
|
|
-{
|
|
- if (!futex_cmpxchg_enabled)
|
|
- return -ENOSYS;
|
|
-
|
|
- if (unlikely(len != sizeof(*head)))
|
|
- return -EINVAL;
|
|
-
|
|
- current->compat_robust_list = head;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
|
|
- compat_uptr_t __user *, head_ptr,
|
|
- compat_size_t __user *, len_ptr)
|
|
-{
|
|
- struct compat_robust_list_head __user *head;
|
|
- unsigned long ret;
|
|
- struct task_struct *p;
|
|
-
|
|
- if (!futex_cmpxchg_enabled)
|
|
- return -ENOSYS;
|
|
-
|
|
- rcu_read_lock();
|
|
-
|
|
- ret = -ESRCH;
|
|
- if (!pid)
|
|
- p = current;
|
|
- else {
|
|
- p = find_task_by_vpid(pid);
|
|
- if (!p)
|
|
- goto err_unlock;
|
|
- }
|
|
-
|
|
- ret = -EPERM;
|
|
- if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
|
|
- goto err_unlock;
|
|
-
|
|
- head = p->compat_robust_list;
|
|
- rcu_read_unlock();
|
|
-
|
|
- if (put_user(sizeof(*head), len_ptr))
|
|
- return -EFAULT;
|
|
- return put_user(ptr_to_compat(head), head_ptr);
|
|
-
|
|
-err_unlock:
|
|
- rcu_read_unlock();
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
|
|
- struct compat_timespec __user *, utime, u32 __user *, uaddr2,
|
|
- u32, val3)
|
|
-{
|
|
- struct timespec ts;
|
|
- ktime_t t, *tp = NULL;
|
|
- int val2 = 0;
|
|
- int cmd = op & FUTEX_CMD_MASK;
|
|
-
|
|
- if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
|
|
- cmd == FUTEX_WAIT_BITSET ||
|
|
- cmd == FUTEX_WAIT_REQUEUE_PI)) {
|
|
- if (compat_get_timespec(&ts, utime))
|
|
- return -EFAULT;
|
|
- if (!timespec_valid(&ts))
|
|
- return -EINVAL;
|
|
-
|
|
- t = timespec_to_ktime(ts);
|
|
- if (cmd == FUTEX_WAIT)
|
|
- t = ktime_add_safe(ktime_get(), t);
|
|
- tp = &t;
|
|
- }
|
|
- if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
|
|
- cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
|
|
- val2 = (int) (unsigned long) utime;
|
|
-
|
|
- return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
|
|
-}
|
|
diff --git a/lib/genalloc.c b/lib/genalloc.c
|
|
index ca06adc4f445..7e85d1e37a6e 100644
|
|
--- a/lib/genalloc.c
|
|
+++ b/lib/genalloc.c
|
|
@@ -35,6 +35,7 @@
|
|
#include <linux/interrupt.h>
|
|
#include <linux/genalloc.h>
|
|
#include <linux/of_device.h>
|
|
+#include <linux/vmalloc.h>
|
|
|
|
static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
|
|
{
|
|
@@ -187,7 +188,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
|
|
int nbytes = sizeof(struct gen_pool_chunk) +
|
|
BITS_TO_LONGS(nbits) * sizeof(long);
|
|
|
|
- chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
|
|
+ chunk = vzalloc_node(nbytes, nid);
|
|
if (unlikely(chunk == NULL))
|
|
return -ENOMEM;
|
|
|
|
@@ -251,7 +252,7 @@ void gen_pool_destroy(struct gen_pool *pool)
|
|
bit = find_next_bit(chunk->bits, end_bit, 0);
|
|
BUG_ON(bit < end_bit);
|
|
|
|
- kfree(chunk);
|
|
+ vfree(chunk);
|
|
}
|
|
kfree_const(pool->name);
|
|
kfree(pool);
|
|
@@ -311,7 +312,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
|
|
end_bit = chunk_size(chunk) >> order;
|
|
retry:
|
|
start_bit = algo(chunk->bits, end_bit, start_bit,
|
|
- nbits, data, pool);
|
|
+ nbits, data, pool, chunk->start_addr);
|
|
if (start_bit >= end_bit)
|
|
continue;
|
|
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
|
|
@@ -525,7 +526,7 @@ EXPORT_SYMBOL(gen_pool_set_algo);
|
|
*/
|
|
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
|
|
unsigned long start, unsigned int nr, void *data,
|
|
- struct gen_pool *pool)
|
|
+ struct gen_pool *pool, unsigned long start_addr)
|
|
{
|
|
return bitmap_find_next_zero_area(map, size, start, nr, 0);
|
|
}
|
|
@@ -543,16 +544,19 @@ EXPORT_SYMBOL(gen_pool_first_fit);
|
|
*/
|
|
unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
|
|
unsigned long start, unsigned int nr, void *data,
|
|
- struct gen_pool *pool)
|
|
+ struct gen_pool *pool, unsigned long start_addr)
|
|
{
|
|
struct genpool_data_align *alignment;
|
|
- unsigned long align_mask;
|
|
+ unsigned long align_mask, align_off;
|
|
int order;
|
|
|
|
alignment = data;
|
|
order = pool->min_alloc_order;
|
|
align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
|
|
- return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
|
|
+ align_off = (start_addr & (alignment->align - 1)) >> order;
|
|
+
|
|
+ return bitmap_find_next_zero_area_off(map, size, start, nr,
|
|
+ align_mask, align_off);
|
|
}
|
|
EXPORT_SYMBOL(gen_pool_first_fit_align);
|
|
|
|
@@ -567,7 +571,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align);
|
|
*/
|
|
unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
|
|
unsigned long start, unsigned int nr, void *data,
|
|
- struct gen_pool *pool)
|
|
+ struct gen_pool *pool, unsigned long start_addr)
|
|
{
|
|
struct genpool_data_fixed *fixed_data;
|
|
int order;
|
|
@@ -601,7 +605,8 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc);
|
|
*/
|
|
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
|
|
unsigned long size, unsigned long start,
|
|
- unsigned int nr, void *data, struct gen_pool *pool)
|
|
+ unsigned int nr, void *data, struct gen_pool *pool,
|
|
+ unsigned long start_addr)
|
|
{
|
|
unsigned long align_mask = roundup_pow_of_two(nr) - 1;
|
|
|
|
@@ -624,7 +629,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
|
|
*/
|
|
unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
|
|
unsigned long start, unsigned int nr, void *data,
|
|
- struct gen_pool *pool)
|
|
+ struct gen_pool *pool, unsigned long start_addr)
|
|
{
|
|
unsigned long start_bit = size;
|
|
unsigned long len = size + 1;
|
|
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
|
|
index d172f0341b80..ff00c816266b 100644
|
|
--- a/lib/radix-tree.c
|
|
+++ b/lib/radix-tree.c
|
|
@@ -2184,7 +2184,7 @@ void __rcu **idr_get_free_cmn(struct radix_tree_root *root,
|
|
offset = radix_tree_find_next_bit(node, IDR_FREE,
|
|
offset + 1);
|
|
start = next_index(start, node, offset);
|
|
- if (start > max)
|
|
+ if (start > max || start == 0)
|
|
return ERR_PTR(-ENOSPC);
|
|
while (offset == RADIX_TREE_MAP_SIZE) {
|
|
offset = node->offset + 1;
|
|
diff --git a/mm/internal.h b/mm/internal.h
|
|
index 1df011f62480..a182506242c4 100644
|
|
--- a/mm/internal.h
|
|
+++ b/mm/internal.h
|
|
@@ -455,6 +455,16 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
|
|
#define NODE_RECLAIM_SOME 0
|
|
#define NODE_RECLAIM_SUCCESS 1
|
|
|
|
+#ifdef CONFIG_NUMA
|
|
+extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
|
|
+#else
|
|
+static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
|
|
+ unsigned int order)
|
|
+{
|
|
+ return NODE_RECLAIM_NOSCAN;
|
|
+}
|
|
+#endif
|
|
+
|
|
extern int hwpoison_filter(struct page *p);
|
|
|
|
extern u32 hwpoison_filter_dev_major;
|
|
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
|
|
index dfc86a0199da..1d8c834d9018 100644
|
|
--- a/net/bridge/netfilter/ebt_dnat.c
|
|
+++ b/net/bridge/netfilter/ebt_dnat.c
|
|
@@ -19,7 +19,6 @@ static unsigned int
|
|
ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
|
{
|
|
const struct ebt_nat_info *info = par->targinfo;
|
|
- struct net_device *dev;
|
|
|
|
if (!skb_make_writable(skb, 0))
|
|
return EBT_DROP;
|
|
@@ -32,10 +31,22 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
|
else
|
|
skb->pkt_type = PACKET_MULTICAST;
|
|
} else {
|
|
- if (xt_hooknum(par) != NF_BR_BROUTING)
|
|
- dev = br_port_get_rcu(xt_in(par))->br->dev;
|
|
- else
|
|
+ const struct net_device *dev;
|
|
+
|
|
+ switch (xt_hooknum(par)) {
|
|
+ case NF_BR_BROUTING:
|
|
dev = xt_in(par);
|
|
+ break;
|
|
+ case NF_BR_PRE_ROUTING:
|
|
+ dev = br_port_get_rcu(xt_in(par))->br->dev;
|
|
+ break;
|
|
+ default:
|
|
+ dev = NULL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (!dev) /* NF_BR_LOCAL_OUT */
|
|
+ return info->target;
|
|
|
|
if (ether_addr_equal(info->mac, dev->dev_addr))
|
|
skb->pkt_type = PACKET_HOST;
|
|
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
|
|
index eb3efeabac91..2664ad58e5c0 100644
|
|
--- a/net/core/neighbour.c
|
|
+++ b/net/core/neighbour.c
|
|
@@ -18,6 +18,7 @@
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/slab.h>
|
|
+#include <linux/kmemleak.h>
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
@@ -361,12 +362,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
|
|
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
|
|
if (!ret)
|
|
return NULL;
|
|
- if (size <= PAGE_SIZE)
|
|
+ if (size <= PAGE_SIZE) {
|
|
buckets = kzalloc(size, GFP_ATOMIC);
|
|
- else
|
|
+ } else {
|
|
buckets = (struct neighbour __rcu **)
|
|
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
|
|
get_order(size));
|
|
+ kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
|
|
+ }
|
|
if (!buckets) {
|
|
kfree(ret);
|
|
return NULL;
|
|
@@ -386,10 +389,12 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
|
|
size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
|
|
struct neighbour __rcu **buckets = nht->hash_buckets;
|
|
|
|
- if (size <= PAGE_SIZE)
|
|
+ if (size <= PAGE_SIZE) {
|
|
kfree(buckets);
|
|
- else
|
|
+ } else {
|
|
+ kmemleak_free(buckets);
|
|
free_pages((unsigned long)buckets, get_order(size));
|
|
+ }
|
|
kfree(nht);
|
|
}
|
|
|
|
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
|
|
index 60b88718b1d4..1af25d53f63c 100644
|
|
--- a/net/core/net_namespace.c
|
|
+++ b/net/core/net_namespace.c
|
|
@@ -854,7 +854,8 @@ static int __init net_ns_init(void)
|
|
|
|
mutex_unlock(&net_mutex);
|
|
|
|
- register_pernet_subsys(&net_ns_ops);
|
|
+ if (register_pernet_subsys(&net_ns_ops))
|
|
+ panic("Could not register network namespace subsystems");
|
|
|
|
rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
|
|
RTNL_FLAG_DOIT_UNLOCKED);
|
|
diff --git a/net/core/sock.c b/net/core/sock.c
|
|
index 7ccbcd853cbc..90ccbbf9e6b0 100644
|
|
--- a/net/core/sock.c
|
|
+++ b/net/core/sock.c
|
|
@@ -2357,7 +2357,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
|
|
}
|
|
|
|
if (sk_has_memory_pressure(sk)) {
|
|
- int alloc;
|
|
+ u64 alloc;
|
|
|
|
if (!sk_under_memory_pressure(sk))
|
|
return 1;
|
|
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
|
|
index df042b6d80b8..22876a197ebe 100644
|
|
--- a/net/decnet/dn_dev.c
|
|
+++ b/net/decnet/dn_dev.c
|
|
@@ -56,7 +56,7 @@
|
|
#include <net/dn_neigh.h>
|
|
#include <net/dn_fib.h>
|
|
|
|
-#define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn))
|
|
+#define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn))
|
|
|
|
static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
|
|
static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
|
|
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
|
|
index fabc299cb875..7a31287ff123 100644
|
|
--- a/net/ipv4/ip_tunnel.c
|
|
+++ b/net/ipv4/ip_tunnel.c
|
|
@@ -661,13 +661,19 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
dst = tnl_params->daddr;
|
|
if (dst == 0) {
|
|
/* NBMA tunnel */
|
|
+ struct ip_tunnel_info *tun_info;
|
|
|
|
if (!skb_dst(skb)) {
|
|
dev->stats.tx_fifo_errors++;
|
|
goto tx_error;
|
|
}
|
|
|
|
- if (skb->protocol == htons(ETH_P_IP)) {
|
|
+ tun_info = skb_tunnel_info(skb);
|
|
+ if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
|
|
+ ip_tunnel_info_af(tun_info) == AF_INET &&
|
|
+ tun_info->key.u.ipv4.dst)
|
|
+ dst = tun_info->key.u.ipv4.dst;
|
|
+ else if (skb->protocol == htons(ETH_P_IP)) {
|
|
rt = skb_rtable(skb);
|
|
dst = rt_nexthop(rt, inner_iph->daddr);
|
|
}
|
|
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
|
|
index f1b496222bda..1a86974b02e3 100644
|
|
--- a/net/mac80211/sta_info.c
|
|
+++ b/net/mac80211/sta_info.c
|
|
@@ -2313,7 +2313,8 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta)
|
|
{
|
|
struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
|
|
|
|
- if (time_after(stats->last_rx, sta->status_stats.last_ack))
|
|
+ if (!sta->status_stats.last_ack ||
|
|
+ time_after(stats->last_rx, sta->status_stats.last_ack))
|
|
return stats->last_rx;
|
|
return sta->status_stats.last_ack;
|
|
}
|
|
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
|
|
index 2de2a923ff2b..3248cf04d0b3 100644
|
|
--- a/net/openvswitch/datapath.c
|
|
+++ b/net/openvswitch/datapath.c
|
|
@@ -724,9 +724,13 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
|
|
{
|
|
size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
|
|
|
|
- /* OVS_FLOW_ATTR_UFID */
|
|
+ /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
|
|
+ * see ovs_nla_put_identifier()
|
|
+ */
|
|
if (sfid && ovs_identifier_is_ufid(sfid))
|
|
len += nla_total_size(sfid->ufid_len);
|
|
+ else
|
|
+ len += nla_total_size(ovs_key_attr_size());
|
|
|
|
/* OVS_FLOW_ATTR_KEY */
|
|
if (!sfid || should_fill_key(sfid, ufid_flags))
|
|
@@ -902,7 +906,10 @@ static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
|
|
retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
|
|
info->snd_portid, info->snd_seq, 0,
|
|
cmd, ufid_flags);
|
|
- BUG_ON(retval < 0);
|
|
+ if (WARN_ON_ONCE(retval < 0)) {
|
|
+ kfree_skb(skb);
|
|
+ skb = ERR_PTR(retval);
|
|
+ }
|
|
return skb;
|
|
}
|
|
|
|
@@ -1365,7 +1372,10 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
|
|
OVS_FLOW_CMD_DEL,
|
|
ufid_flags);
|
|
rcu_read_unlock();
|
|
- BUG_ON(err < 0);
|
|
+ if (WARN_ON_ONCE(err < 0)) {
|
|
+ kfree_skb(reply);
|
|
+ goto out_free;
|
|
+ }
|
|
|
|
ovs_notify(&dp_flow_genl_family, reply, info);
|
|
} else {
|
|
@@ -1373,6 +1383,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
|
|
}
|
|
}
|
|
|
|
+out_free:
|
|
ovs_flow_free(flow, true);
|
|
return 0;
|
|
unlock:
|
|
diff --git a/net/psample/psample.c b/net/psample/psample.c
|
|
index 4cea353221da..30e8239bd774 100644
|
|
--- a/net/psample/psample.c
|
|
+++ b/net/psample/psample.c
|
|
@@ -223,7 +223,7 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
|
|
data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
|
|
- NLA_ALIGNTO;
|
|
|
|
- nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC);
|
|
+ nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
|
|
if (unlikely(!nl_skb))
|
|
return;
|
|
|
|
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
|
|
index f3a3e507422b..442ac9c3f16f 100644
|
|
--- a/net/sched/sch_mq.c
|
|
+++ b/net/sched/sch_mq.c
|
|
@@ -191,7 +191,8 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
|
|
|
|
sch = dev_queue->qdisc_sleeping;
|
|
- if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
|
|
+ if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats,
|
|
+ &sch->bstats) < 0 ||
|
|
gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
|
|
return -1;
|
|
return 0;
|
|
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
|
|
index 6bcdfe6e7b63..bb8d3fbc13bb 100644
|
|
--- a/net/sched/sch_mqprio.c
|
|
+++ b/net/sched/sch_mqprio.c
|
|
@@ -366,8 +366,8 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
|
|
|
|
sch = dev_queue->qdisc_sleeping;
|
|
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
|
- d, NULL, &sch->bstats) < 0 ||
|
|
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
|
|
+ sch->cpu_bstats, &sch->bstats) < 0 ||
|
|
gnet_stats_copy_queue(d, NULL,
|
|
&sch->qstats, sch->q.qlen) < 0)
|
|
return -1;
|
|
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
|
|
index ff4fc3e0facd..65aa03d46857 100644
|
|
--- a/net/sched/sch_multiq.c
|
|
+++ b/net/sched/sch_multiq.c
|
|
@@ -340,7 +340,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|
|
|
cl_q = q->queues[cl - 1];
|
|
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
|
- d, NULL, &cl_q->bstats) < 0 ||
|
|
+ d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
|
|
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
|
|
return -1;
|
|
|
|
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
|
|
index 2dd6c68ae91e..c60777351de1 100644
|
|
--- a/net/sched/sch_prio.c
|
|
+++ b/net/sched/sch_prio.c
|
|
@@ -298,7 +298,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|
|
|
cl_q = q->queues[cl - 1];
|
|
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
|
|
- d, NULL, &cl_q->bstats) < 0 ||
|
|
+ d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
|
|
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
|
|
return -1;
|
|
|
|
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
|
|
index 23fec3817e0c..dd1a3bd80be5 100644
|
|
--- a/net/sctp/associola.c
|
|
+++ b/net/sctp/associola.c
|
|
@@ -80,6 +80,7 @@ static struct sctp_association *sctp_association_init(
|
|
/* Discarding const is appropriate here. */
|
|
asoc->ep = (struct sctp_endpoint *)ep;
|
|
asoc->base.sk = (struct sock *)sk;
|
|
+ asoc->base.net = sock_net(sk);
|
|
|
|
sctp_endpoint_hold(asoc->ep);
|
|
sock_hold(asoc->base.sk);
|
|
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
|
|
index 5d4079ef3de6..c71b4191df1e 100644
|
|
--- a/net/sctp/endpointola.c
|
|
+++ b/net/sctp/endpointola.c
|
|
@@ -165,6 +165,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
|
|
|
/* Remember who we are attached to. */
|
|
ep->base.sk = sk;
|
|
+ ep->base.net = sock_net(sk);
|
|
sock_hold(ep->base.sk);
|
|
|
|
return ep;
|
|
diff --git a/net/sctp/input.c b/net/sctp/input.c
|
|
index 0247cc432e02..3c0affecf272 100644
|
|
--- a/net/sctp/input.c
|
|
+++ b/net/sctp/input.c
|
|
@@ -813,7 +813,7 @@ static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
|
|
if (!sctp_transport_hold(t))
|
|
return err;
|
|
|
|
- if (!net_eq(sock_net(t->asoc->base.sk), x->net))
|
|
+ if (!net_eq(t->asoc->base.net, x->net))
|
|
goto out;
|
|
if (x->lport != htons(t->asoc->base.bind_addr.port))
|
|
goto out;
|
|
@@ -828,7 +828,7 @@ static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
|
|
{
|
|
const struct sctp_transport *t = data;
|
|
const union sctp_addr *paddr = &t->ipaddr;
|
|
- const struct net *net = sock_net(t->asoc->base.sk);
|
|
+ const struct net *net = t->asoc->base.net;
|
|
__be16 lport = htons(t->asoc->base.bind_addr.port);
|
|
__u32 addr;
|
|
|
|
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
|
|
index 43105cf04bc4..274df899e7bf 100644
|
|
--- a/net/sctp/transport.c
|
|
+++ b/net/sctp/transport.c
|
|
@@ -210,7 +210,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
|
|
|
|
/* When a data chunk is sent, reset the heartbeat interval. */
|
|
expires = jiffies + sctp_transport_timeout(transport);
|
|
- if (time_before(transport->hb_timer.expires, expires) &&
|
|
+ if ((time_before(transport->hb_timer.expires, expires) ||
|
|
+ !timer_pending(&transport->hb_timer)) &&
|
|
!mod_timer(&transport->hb_timer,
|
|
expires + prandom_u32_max(transport->rto)))
|
|
sctp_transport_hold(transport);
|
|
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
|
|
index f04a037dc967..0de788fa43e9 100644
|
|
--- a/net/smc/smc_core.c
|
|
+++ b/net/smc/smc_core.c
|
|
@@ -103,6 +103,8 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
|
|
struct smc_link_group *lgr = conn->lgr;
|
|
int reduced = 0;
|
|
|
|
+ if (!lgr)
|
|
+ return;
|
|
write_lock_bh(&lgr->conns_lock);
|
|
if (conn->alert_token_local) {
|
|
reduced = 1;
|
|
@@ -431,6 +433,8 @@ int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr,
|
|
local_contact = SMC_REUSE_CONTACT;
|
|
conn->lgr = lgr;
|
|
smc_lgr_register_conn(conn); /* add smc conn to lgr */
|
|
+ if (delayed_work_pending(&lgr->free_work))
|
|
+ cancel_delayed_work(&lgr->free_work);
|
|
write_unlock_bh(&lgr->conns_lock);
|
|
break;
|
|
}
|
|
diff --git a/net/tipc/link.c b/net/tipc/link.c
|
|
index 631bfc7e9127..da749916faac 100644
|
|
--- a/net/tipc/link.c
|
|
+++ b/net/tipc/link.c
|
|
@@ -1073,7 +1073,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
|
|
default:
|
|
pr_warn("Dropping received illegal msg type\n");
|
|
kfree_skb(skb);
|
|
- return false;
|
|
+ return true;
|
|
};
|
|
}
|
|
|
|
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
|
|
index ad4dcc663c6d..fa0522cd683e 100644
|
|
--- a/net/tipc/netlink_compat.c
|
|
+++ b/net/tipc/netlink_compat.c
|
|
@@ -539,7 +539,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
|
|
if (len <= 0)
|
|
return -EINVAL;
|
|
|
|
- len = min_t(int, len, TIPC_MAX_BEARER_NAME);
|
|
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
|
|
if (!string_is_valid(name, len))
|
|
return -EINVAL;
|
|
|
|
@@ -821,7 +821,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
|
|
if (len <= 0)
|
|
return -EINVAL;
|
|
|
|
- len = min_t(int, len, TIPC_MAX_BEARER_NAME);
|
|
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
|
|
if (!string_is_valid(name, len))
|
|
return -EINVAL;
|
|
|
|
@@ -974,6 +974,10 @@ static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
|
|
|
|
hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
|
|
TIPC_NL_PUBL_GET);
|
|
+ if (!hdr) {
|
|
+ kfree_skb(args);
|
|
+ return -EMSGSIZE;
|
|
+ }
|
|
|
|
nest = nla_nest_start(args, TIPC_NLA_SOCK);
|
|
if (!nest) {
|
|
@@ -1021,8 +1025,11 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
|
|
u32 node;
|
|
struct nlattr *con[TIPC_NLA_CON_MAX + 1];
|
|
|
|
- nla_parse_nested(con, TIPC_NLA_CON_MAX,
|
|
- sock[TIPC_NLA_SOCK_CON], NULL, NULL);
|
|
+ err = nla_parse_nested(con, TIPC_NLA_CON_MAX,
|
|
+ sock[TIPC_NLA_SOCK_CON], NULL, NULL);
|
|
+
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
node = nla_get_u32(con[TIPC_NLA_CON_NODE]);
|
|
tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>",
|
|
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
|
|
index 1939b77e98b7..73eac97e19fb 100644
|
|
--- a/net/vmw_vsock/af_vsock.c
|
|
+++ b/net/vmw_vsock/af_vsock.c
|
|
@@ -107,6 +107,7 @@
|
|
#include <linux/mutex.h>
|
|
#include <linux/net.h>
|
|
#include <linux/poll.h>
|
|
+#include <linux/random.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/socket.h>
|
|
@@ -487,9 +488,13 @@ out:
|
|
static int __vsock_bind_stream(struct vsock_sock *vsk,
|
|
struct sockaddr_vm *addr)
|
|
{
|
|
- static u32 port = LAST_RESERVED_PORT + 1;
|
|
+ static u32 port = 0;
|
|
struct sockaddr_vm new_addr;
|
|
|
|
+ if (!port)
|
|
+ port = LAST_RESERVED_PORT + 1 +
|
|
+ prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
|
|
+
|
|
vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
|
|
|
|
if (addr->svm_port == VMADDR_PORT_ANY) {
|
|
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
|
|
index bd16e6882017..190ca59d5ba3 100644
|
|
--- a/net/xfrm/xfrm_state.c
|
|
+++ b/net/xfrm/xfrm_state.c
|
|
@@ -449,6 +449,8 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
|
|
x->type->destructor(x);
|
|
xfrm_put_type(x->type);
|
|
}
|
|
+ if (x->xfrag.page)
|
|
+ put_page(x->xfrag.page);
|
|
xfrm_dev_state_free(x);
|
|
security_xfrm_state_free(x);
|
|
kfree(x);
|
|
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
|
|
index ca495686b9c3..f8c7249fa705 100644
|
|
--- a/samples/vfio-mdev/mtty.c
|
|
+++ b/samples/vfio-mdev/mtty.c
|
|
@@ -171,7 +171,7 @@ static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
|
|
return NULL;
|
|
}
|
|
|
|
-void dump_buffer(char *buf, uint32_t count)
|
|
+void dump_buffer(u8 *buf, uint32_t count)
|
|
{
|
|
#if defined(DEBUG)
|
|
int i;
|
|
@@ -250,7 +250,7 @@ static void mtty_create_config_space(struct mdev_state *mdev_state)
|
|
}
|
|
|
|
static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
|
|
- char *buf, u32 count)
|
|
+ u8 *buf, u32 count)
|
|
{
|
|
u32 cfg_addr, bar_mask, bar_index = 0;
|
|
|
|
@@ -304,7 +304,7 @@ static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
|
|
}
|
|
|
|
static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
|
|
- u16 offset, char *buf, u32 count)
|
|
+ u16 offset, u8 *buf, u32 count)
|
|
{
|
|
u8 data = *buf;
|
|
|
|
@@ -475,7 +475,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
|
|
}
|
|
|
|
static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
|
|
- u16 offset, char *buf, u32 count)
|
|
+ u16 offset, u8 *buf, u32 count)
|
|
{
|
|
/* Handle read requests by guest */
|
|
switch (offset) {
|
|
@@ -650,7 +650,7 @@ static void mdev_read_base(struct mdev_state *mdev_state)
|
|
}
|
|
}
|
|
|
|
-static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
|
|
+static ssize_t mdev_access(struct mdev_device *mdev, u8 *buf, size_t count,
|
|
loff_t pos, bool is_write)
|
|
{
|
|
struct mdev_state *mdev_state;
|
|
@@ -698,7 +698,7 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
|
|
#if defined(DEBUG_REGS)
|
|
pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
|
|
__func__, index, offset, wr_reg[offset],
|
|
- (u8)*buf, mdev_state->s[index].dlab);
|
|
+ *buf, mdev_state->s[index].dlab);
|
|
#endif
|
|
handle_bar_write(index, mdev_state, offset, buf, count);
|
|
} else {
|
|
@@ -708,7 +708,7 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
|
|
#if defined(DEBUG_REGS)
|
|
pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
|
|
__func__, index, offset, rd_reg[offset],
|
|
- (u8)*buf, mdev_state->s[index].dlab);
|
|
+ *buf, mdev_state->s[index].dlab);
|
|
#endif
|
|
}
|
|
break;
|
|
@@ -827,7 +827,7 @@ ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
|
|
if (count >= 4 && !(*ppos % 4)) {
|
|
u32 val;
|
|
|
|
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
|
|
+ ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
|
|
*ppos, false);
|
|
if (ret <= 0)
|
|
goto read_err;
|
|
@@ -839,7 +839,7 @@ ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
|
|
} else if (count >= 2 && !(*ppos % 2)) {
|
|
u16 val;
|
|
|
|
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
|
|
+ ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
|
|
*ppos, false);
|
|
if (ret <= 0)
|
|
goto read_err;
|
|
@@ -851,7 +851,7 @@ ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
|
|
} else {
|
|
u8 val;
|
|
|
|
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
|
|
+ ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
|
|
*ppos, false);
|
|
if (ret <= 0)
|
|
goto read_err;
|
|
@@ -889,7 +889,7 @@ ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
|
|
if (copy_from_user(&val, buf, sizeof(val)))
|
|
goto write_err;
|
|
|
|
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
|
|
+ ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
|
|
*ppos, true);
|
|
if (ret <= 0)
|
|
goto write_err;
|
|
@@ -901,7 +901,7 @@ ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
|
|
if (copy_from_user(&val, buf, sizeof(val)))
|
|
goto write_err;
|
|
|
|
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
|
|
+ ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
|
|
*ppos, true);
|
|
if (ret <= 0)
|
|
goto write_err;
|
|
@@ -913,7 +913,7 @@ ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
|
|
if (copy_from_user(&val, buf, sizeof(val)))
|
|
goto write_err;
|
|
|
|
- ret = mdev_access(mdev, (char *)&val, sizeof(val),
|
|
+ ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
|
|
*ppos, true);
|
|
if (ret <= 0)
|
|
goto write_err;
|
|
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
|
|
index 004b0ac7fa72..4644f1a83b57 100644
|
|
--- a/scripts/gdb/linux/symbols.py
|
|
+++ b/scripts/gdb/linux/symbols.py
|
|
@@ -99,7 +99,8 @@ lx-symbols command."""
|
|
attrs[n]['name'].string(): attrs[n]['address']
|
|
for n in range(int(sect_attrs['nsections']))}
|
|
args = []
|
|
- for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
|
|
+ for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
|
|
+ ".text", ".text.hot", ".text.unlikely"]:
|
|
address = section_name_to_address.get(section_name)
|
|
if address:
|
|
args.append(" -s {name} {addr}".format(
|
|
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
|
|
index dd746bd69a9b..c106988c1b25 100644
|
|
--- a/security/apparmor/apparmorfs.c
|
|
+++ b/security/apparmor/apparmorfs.c
|
|
@@ -363,6 +363,7 @@ static void aafs_remove(struct dentry *dentry)
|
|
simple_rmdir(dir, dentry);
|
|
else
|
|
simple_unlink(dir, dentry);
|
|
+ d_delete(dentry);
|
|
dput(dentry);
|
|
}
|
|
inode_unlock(dir);
|
|
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
|
|
index 2e2d18468491..7ae8e24dc1e6 100644
|
|
--- a/sound/core/compress_offload.c
|
|
+++ b/sound/core/compress_offload.c
|
|
@@ -529,7 +529,7 @@ static int snd_compress_check_input(struct snd_compr_params *params)
|
|
{
|
|
/* first let's check the buffer parameter's */
|
|
if (params->buffer.fragment_size == 0 ||
|
|
- params->buffer.fragments > INT_MAX / params->buffer.fragment_size ||
|
|
+ params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
|
|
params->buffer.fragments == 0)
|
|
return -EINVAL;
|
|
|
|
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
|
|
index 0b9b014b4bb6..969283737787 100644
|
|
--- a/sound/soc/codecs/msm8916-wcd-analog.c
|
|
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
|
|
@@ -303,7 +303,7 @@ struct pm8916_wcd_analog_priv {
|
|
};
|
|
|
|
static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" };
|
|
-static const char *const rdac2_mux_text[] = { "ZERO", "RX2", "RX1" };
|
|
+static const char *const rdac2_mux_text[] = { "RX1", "RX2" };
|
|
static const char *const hph_text[] = { "ZERO", "Switch", };
|
|
|
|
static const struct soc_enum hph_enum = SOC_ENUM_SINGLE_VIRT(
|
|
@@ -318,7 +318,7 @@ static const struct soc_enum adc2_enum = SOC_ENUM_SINGLE_VIRT(
|
|
|
|
/* RDAC2 MUX */
|
|
static const struct soc_enum rdac2_mux_enum = SOC_ENUM_SINGLE(
|
|
- CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 3, rdac2_mux_text);
|
|
+ CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 2, rdac2_mux_text);
|
|
|
|
static const struct snd_kcontrol_new spkr_switch[] = {
|
|
SOC_DAPM_SINGLE("Switch", CDC_A_SPKR_DAC_CTL, 7, 1, 0)
|
|
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
|
|
index 105a73cc5158..149b7cba10fb 100644
|
|
--- a/sound/soc/kirkwood/kirkwood-i2s.c
|
|
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
|
|
@@ -569,10 +569,6 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
|
|
return PTR_ERR(priv->clk);
|
|
}
|
|
|
|
- err = clk_prepare_enable(priv->clk);
|
|
- if (err < 0)
|
|
- return err;
|
|
-
|
|
priv->extclk = devm_clk_get(&pdev->dev, "extclk");
|
|
if (IS_ERR(priv->extclk)) {
|
|
if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
|
|
@@ -588,6 +584,10 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
|
|
}
|
|
}
|
|
|
|
+ err = clk_prepare_enable(priv->clk);
|
|
+ if (err < 0)
|
|
+ return err;
|
|
+
|
|
/* Some sensible defaults - this reflects the powerup values */
|
|
priv->ctl_play = KIRKWOOD_PLAYCTL_SIZE_24;
|
|
priv->ctl_rec = KIRKWOOD_RECCTL_SIZE_24;
|
|
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
|
|
index 6d0bf78d114d..aa2b1196171a 100644
|
|
--- a/sound/soc/stm/stm32_i2s.c
|
|
+++ b/sound/soc/stm/stm32_i2s.c
|
|
@@ -246,8 +246,8 @@ static irqreturn_t stm32_i2s_isr(int irq, void *devid)
|
|
return IRQ_NONE;
|
|
}
|
|
|
|
- regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG,
|
|
- I2S_IFCR_MASK, flags);
|
|
+ regmap_write_bits(i2s->regmap, STM32_I2S_IFCR_REG,
|
|
+ I2S_IFCR_MASK, flags);
|
|
|
|
if (flags & I2S_SR_OVR) {
|
|
dev_dbg(&pdev->dev, "Overrun\n");
|
|
@@ -276,7 +276,6 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
|
|
case STM32_I2S_CFG2_REG:
|
|
case STM32_I2S_IER_REG:
|
|
case STM32_I2S_SR_REG:
|
|
- case STM32_I2S_IFCR_REG:
|
|
case STM32_I2S_TXDR_REG:
|
|
case STM32_I2S_RXDR_REG:
|
|
case STM32_I2S_CGFR_REG:
|
|
@@ -488,7 +487,7 @@ static int stm32_i2s_configure(struct snd_soc_dai *cpu_dai,
|
|
{
|
|
struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
|
|
int format = params_width(params);
|
|
- u32 cfgr, cfgr_mask, cfg1, cfg1_mask;
|
|
+ u32 cfgr, cfgr_mask, cfg1;
|
|
unsigned int fthlv;
|
|
int ret;
|
|
|
|
@@ -501,7 +500,7 @@ static int stm32_i2s_configure(struct snd_soc_dai *cpu_dai,
|
|
switch (format) {
|
|
case 16:
|
|
cfgr = I2S_CGFR_DATLEN_SET(I2S_I2SMOD_DATLEN_16);
|
|
- cfgr_mask = I2S_CGFR_DATLEN_MASK;
|
|
+ cfgr_mask = I2S_CGFR_DATLEN_MASK | I2S_CGFR_CHLEN;
|
|
break;
|
|
case 32:
|
|
cfgr = I2S_CGFR_DATLEN_SET(I2S_I2SMOD_DATLEN_32) |
|
|
@@ -529,15 +528,11 @@ static int stm32_i2s_configure(struct snd_soc_dai *cpu_dai,
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
- cfg1 = I2S_CFG1_RXDMAEN | I2S_CFG1_TXDMAEN;
|
|
- cfg1_mask = cfg1;
|
|
-
|
|
fthlv = STM32_I2S_FIFO_SIZE * I2S_FIFO_TH_ONE_QUARTER / 4;
|
|
- cfg1 |= I2S_CFG1_FTHVL_SET(fthlv - 1);
|
|
- cfg1_mask |= I2S_CFG1_FTHVL_MASK;
|
|
+ cfg1 = I2S_CFG1_FTHVL_SET(fthlv - 1);
|
|
|
|
return regmap_update_bits(i2s->regmap, STM32_I2S_CFG1_REG,
|
|
- cfg1_mask, cfg1);
|
|
+ I2S_CFG1_FTHVL_MASK, cfg1);
|
|
}
|
|
|
|
static int stm32_i2s_startup(struct snd_pcm_substream *substream,
|
|
@@ -551,8 +546,8 @@ static int stm32_i2s_startup(struct snd_pcm_substream *substream,
|
|
i2s->refcount++;
|
|
spin_unlock(&i2s->lock_fd);
|
|
|
|
- return regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG,
|
|
- I2S_IFCR_MASK, I2S_IFCR_MASK);
|
|
+ return regmap_write_bits(i2s->regmap, STM32_I2S_IFCR_REG,
|
|
+ I2S_IFCR_MASK, I2S_IFCR_MASK);
|
|
}
|
|
|
|
static int stm32_i2s_hw_params(struct snd_pcm_substream *substream,
|
|
@@ -589,6 +584,10 @@ static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
|
|
/* Enable i2s */
|
|
dev_dbg(cpu_dai->dev, "start I2S\n");
|
|
|
|
+ cfg1_mask = I2S_CFG1_RXDMAEN | I2S_CFG1_TXDMAEN;
|
|
+ regmap_update_bits(i2s->regmap, STM32_I2S_CFG1_REG,
|
|
+ cfg1_mask, cfg1_mask);
|
|
+
|
|
ret = regmap_update_bits(i2s->regmap, STM32_I2S_CR1_REG,
|
|
I2S_CR1_SPE, I2S_CR1_SPE);
|
|
if (ret < 0) {
|
|
@@ -603,8 +602,8 @@ static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
|
|
return ret;
|
|
}
|
|
|
|
- regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG,
|
|
- I2S_IFCR_MASK, I2S_IFCR_MASK);
|
|
+ regmap_write_bits(i2s->regmap, STM32_I2S_IFCR_REG,
|
|
+ I2S_IFCR_MASK, I2S_IFCR_MASK);
|
|
|
|
if (playback_flg) {
|
|
ier = I2S_IER_UDRIE;
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index cdaacdf7bc87..deff4b3eb972 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -3989,7 +3989,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
|
|
}
|
|
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
|
|
|
|
- if (kvm->debugfs_dentry) {
|
|
+ if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
|
|
char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
|
|
|
|
if (p) {
|