mirror of
https://github.com/armbian/build.git
synced 2025-08-15 23:56:57 +02:00
6875 lines
231 KiB
Diff
6875 lines
231 KiB
Diff
diff --git a/Documentation/arch/arm64/elf_hwcaps.rst b/Documentation/arch/arm64/elf_hwcaps.rst
|
|
index 76ff9d7398fda7..f88a24d621dd43 100644
|
|
--- a/Documentation/arch/arm64/elf_hwcaps.rst
|
|
+++ b/Documentation/arch/arm64/elf_hwcaps.rst
|
|
@@ -174,22 +174,28 @@ HWCAP2_DCPODP
|
|
Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
|
|
|
|
HWCAP2_SVE2
|
|
- Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.SVEver == 0b0001.
|
|
|
|
HWCAP2_SVEAES
|
|
- Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.AES == 0b0001.
|
|
|
|
HWCAP2_SVEPMULL
|
|
- Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0010.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.AES == 0b0010.
|
|
|
|
HWCAP2_SVEBITPERM
|
|
- Functionality implied by ID_AA64ZFR0_EL1.BitPerm == 0b0001.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.BitPerm == 0b0001.
|
|
|
|
HWCAP2_SVESHA3
|
|
- Functionality implied by ID_AA64ZFR0_EL1.SHA3 == 0b0001.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.SHA3 == 0b0001.
|
|
|
|
HWCAP2_SVESM4
|
|
- Functionality implied by ID_AA64ZFR0_EL1.SM4 == 0b0001.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.SM4 == 0b0001.
|
|
|
|
HWCAP2_FLAGM2
|
|
Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0010.
|
|
@@ -198,16 +204,20 @@ HWCAP2_FRINT
|
|
Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
|
|
|
|
HWCAP2_SVEI8MM
|
|
- Functionality implied by ID_AA64ZFR0_EL1.I8MM == 0b0001.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.I8MM == 0b0001.
|
|
|
|
HWCAP2_SVEF32MM
|
|
- Functionality implied by ID_AA64ZFR0_EL1.F32MM == 0b0001.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.F32MM == 0b0001.
|
|
|
|
HWCAP2_SVEF64MM
|
|
- Functionality implied by ID_AA64ZFR0_EL1.F64MM == 0b0001.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.F64MM == 0b0001.
|
|
|
|
HWCAP2_SVEBF16
|
|
- Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0001.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.BF16 == 0b0001.
|
|
|
|
HWCAP2_I8MM
|
|
Functionality implied by ID_AA64ISAR1_EL1.I8MM == 0b0001.
|
|
@@ -273,7 +283,8 @@ HWCAP2_EBF16
|
|
Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
|
|
|
|
HWCAP2_SVE_EBF16
|
|
- Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0010.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.BF16 == 0b0010.
|
|
|
|
HWCAP2_CSSC
|
|
Functionality implied by ID_AA64ISAR2_EL1.CSSC == 0b0001.
|
|
@@ -282,7 +293,8 @@ HWCAP2_RPRFM
|
|
Functionality implied by ID_AA64ISAR2_EL1.RPRFM == 0b0001.
|
|
|
|
HWCAP2_SVE2P1
|
|
- Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0010.
|
|
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
|
|
+ ID_AA64ZFR0_EL1.SVEver == 0b0010.
|
|
|
|
HWCAP2_SME2
|
|
Functionality implied by ID_AA64SMFR0_EL1.SMEver == 0b0001.
|
|
diff --git a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
|
|
index 9ea8ac0786acce..a72175a0910ba5 100644
|
|
--- a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
|
|
+++ b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
|
|
@@ -22,7 +22,7 @@ description:
|
|
Each sub-node is identified using the node's name, with valid values listed
|
|
for each of the pmics below.
|
|
|
|
- For mp5496, s1, s2
|
|
+ For mp5496, s1, s2, l2, l5
|
|
|
|
For pm2250, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
|
|
l12, l13, l14, l15, l16, l17, l18, l19, l20, l21, l22
|
|
diff --git a/Makefile b/Makefile
|
|
index 1d777c3eb7fb97..de16ab06861410 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 6
|
|
PATCHLEVEL = 6
|
|
-SUBLEVEL = 78
|
|
+SUBLEVEL = 79
|
|
EXTRAVERSION =
|
|
NAME = Pinguïn Aangedreven
|
|
|
|
@@ -1054,8 +1054,8 @@ LDFLAGS_vmlinux += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
|
|
endif
|
|
|
|
# Align the bit size of userspace programs with the kernel
|
|
-KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
|
|
-KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
|
|
+KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
|
|
+KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
|
|
|
|
# make the checker run with the right architecture
|
|
CHECKFLAGS += --arch=$(ARCH)
|
|
@@ -1348,18 +1348,13 @@ ifneq ($(wildcard $(resolve_btfids_O)),)
|
|
$(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
|
|
endif
|
|
|
|
-# Clear a bunch of variables before executing the submake
|
|
-ifeq ($(quiet),silent_)
|
|
-tools_silent=s
|
|
-endif
|
|
-
|
|
tools/: FORCE
|
|
$(Q)mkdir -p $(objtree)/tools
|
|
- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
|
|
+ $(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
|
|
|
|
tools/%: FORCE
|
|
$(Q)mkdir -p $(objtree)/tools
|
|
- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
|
|
+ $(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Kernel selftest
|
|
diff --git a/arch/alpha/include/uapi/asm/ptrace.h b/arch/alpha/include/uapi/asm/ptrace.h
|
|
index 5ca45934fcbb82..72ed913a910f25 100644
|
|
--- a/arch/alpha/include/uapi/asm/ptrace.h
|
|
+++ b/arch/alpha/include/uapi/asm/ptrace.h
|
|
@@ -42,6 +42,8 @@ struct pt_regs {
|
|
unsigned long trap_a0;
|
|
unsigned long trap_a1;
|
|
unsigned long trap_a2;
|
|
+/* This makes the stack 16-byte aligned as GCC expects */
|
|
+ unsigned long __pad0;
|
|
/* These are saved by PAL-code: */
|
|
unsigned long ps;
|
|
unsigned long pc;
|
|
diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c
|
|
index b121294bee2663..11c35cf45b4610 100644
|
|
--- a/arch/alpha/kernel/asm-offsets.c
|
|
+++ b/arch/alpha/kernel/asm-offsets.c
|
|
@@ -34,7 +34,9 @@ void foo(void)
|
|
DEFINE(CRED_EGID, offsetof(struct cred, egid));
|
|
BLANK();
|
|
|
|
+ DEFINE(SP_OFF, offsetof(struct pt_regs, ps));
|
|
DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs));
|
|
+ DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack));
|
|
DEFINE(PT_PTRACED, PT_PTRACED);
|
|
DEFINE(CLONE_VM, CLONE_VM);
|
|
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
|
|
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
|
|
index eb51f93a70c8f1..602f701a1c3963 100644
|
|
--- a/arch/alpha/kernel/entry.S
|
|
+++ b/arch/alpha/kernel/entry.S
|
|
@@ -15,10 +15,6 @@
|
|
.set noat
|
|
.cfi_sections .debug_frame
|
|
|
|
-/* Stack offsets. */
|
|
-#define SP_OFF 184
|
|
-#define SWITCH_STACK_SIZE 64
|
|
-
|
|
.macro CFI_START_OSF_FRAME func
|
|
.align 4
|
|
.globl \func
|
|
@@ -198,8 +194,8 @@ CFI_END_OSF_FRAME entArith
|
|
CFI_START_OSF_FRAME entMM
|
|
SAVE_ALL
|
|
/* save $9 - $15 so the inline exception code can manipulate them. */
|
|
- subq $sp, 56, $sp
|
|
- .cfi_adjust_cfa_offset 56
|
|
+ subq $sp, 64, $sp
|
|
+ .cfi_adjust_cfa_offset 64
|
|
stq $9, 0($sp)
|
|
stq $10, 8($sp)
|
|
stq $11, 16($sp)
|
|
@@ -214,7 +210,7 @@ CFI_START_OSF_FRAME entMM
|
|
.cfi_rel_offset $13, 32
|
|
.cfi_rel_offset $14, 40
|
|
.cfi_rel_offset $15, 48
|
|
- addq $sp, 56, $19
|
|
+ addq $sp, 64, $19
|
|
/* handle the fault */
|
|
lda $8, 0x3fff
|
|
bic $sp, $8, $8
|
|
@@ -227,7 +223,7 @@ CFI_START_OSF_FRAME entMM
|
|
ldq $13, 32($sp)
|
|
ldq $14, 40($sp)
|
|
ldq $15, 48($sp)
|
|
- addq $sp, 56, $sp
|
|
+ addq $sp, 64, $sp
|
|
.cfi_restore $9
|
|
.cfi_restore $10
|
|
.cfi_restore $11
|
|
@@ -235,7 +231,7 @@ CFI_START_OSF_FRAME entMM
|
|
.cfi_restore $13
|
|
.cfi_restore $14
|
|
.cfi_restore $15
|
|
- .cfi_adjust_cfa_offset -56
|
|
+ .cfi_adjust_cfa_offset -64
|
|
/* finish up the syscall as normal. */
|
|
br ret_from_sys_call
|
|
CFI_END_OSF_FRAME entMM
|
|
@@ -382,8 +378,8 @@ entUnaUser:
|
|
.cfi_restore $0
|
|
.cfi_adjust_cfa_offset -256
|
|
SAVE_ALL /* setup normal kernel stack */
|
|
- lda $sp, -56($sp)
|
|
- .cfi_adjust_cfa_offset 56
|
|
+ lda $sp, -64($sp)
|
|
+ .cfi_adjust_cfa_offset 64
|
|
stq $9, 0($sp)
|
|
stq $10, 8($sp)
|
|
stq $11, 16($sp)
|
|
@@ -399,7 +395,7 @@ entUnaUser:
|
|
.cfi_rel_offset $14, 40
|
|
.cfi_rel_offset $15, 48
|
|
lda $8, 0x3fff
|
|
- addq $sp, 56, $19
|
|
+ addq $sp, 64, $19
|
|
bic $sp, $8, $8
|
|
jsr $26, do_entUnaUser
|
|
ldq $9, 0($sp)
|
|
@@ -409,7 +405,7 @@ entUnaUser:
|
|
ldq $13, 32($sp)
|
|
ldq $14, 40($sp)
|
|
ldq $15, 48($sp)
|
|
- lda $sp, 56($sp)
|
|
+ lda $sp, 64($sp)
|
|
.cfi_restore $9
|
|
.cfi_restore $10
|
|
.cfi_restore $11
|
|
@@ -417,7 +413,7 @@ entUnaUser:
|
|
.cfi_restore $13
|
|
.cfi_restore $14
|
|
.cfi_restore $15
|
|
- .cfi_adjust_cfa_offset -56
|
|
+ .cfi_adjust_cfa_offset -64
|
|
br ret_from_sys_call
|
|
CFI_END_OSF_FRAME entUna
|
|
|
|
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
|
|
index d9a67b370e0476..de72bd837c5af7 100644
|
|
--- a/arch/alpha/kernel/traps.c
|
|
+++ b/arch/alpha/kernel/traps.c
|
|
@@ -707,7 +707,7 @@ s_reg_to_mem (unsigned long s_reg)
|
|
static int unauser_reg_offsets[32] = {
|
|
R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
|
|
/* r9 ... r15 are stored in front of regs. */
|
|
- -56, -48, -40, -32, -24, -16, -8,
|
|
+ -64, -56, -48, -40, -32, -24, -16, /* padding at -8 */
|
|
R(r16), R(r17), R(r18),
|
|
R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
|
|
R(r27), R(r28), R(gp),
|
|
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
|
|
index 8c9850437e6744..a9816bbc9f34d3 100644
|
|
--- a/arch/alpha/mm/fault.c
|
|
+++ b/arch/alpha/mm/fault.c
|
|
@@ -78,8 +78,8 @@ __load_new_mm_context(struct mm_struct *next_mm)
|
|
|
|
/* Macro for exception fixup code to access integer registers. */
|
|
#define dpf_reg(r) \
|
|
- (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
|
|
- (r) <= 18 ? (r)+10 : (r)-10])
|
|
+ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-17 : \
|
|
+ (r) <= 18 ? (r)+11 : (r)-10])
|
|
|
|
asmlinkage void
|
|
do_page_fault(unsigned long address, unsigned long mmcsr,
|
|
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
|
|
index d9c9218fa1fddc..309942b06c5bc2 100644
|
|
--- a/arch/arm64/kernel/cacheinfo.c
|
|
+++ b/arch/arm64/kernel/cacheinfo.c
|
|
@@ -101,16 +101,18 @@ int populate_cache_leaves(unsigned int cpu)
|
|
unsigned int level, idx;
|
|
enum cache_type type;
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
- struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
|
+ struct cacheinfo *infos = this_cpu_ci->info_list;
|
|
|
|
for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
|
|
- idx < this_cpu_ci->num_leaves; idx++, level++) {
|
|
+ idx < this_cpu_ci->num_leaves; level++) {
|
|
type = get_cache_type(level);
|
|
if (type == CACHE_TYPE_SEPARATE) {
|
|
- ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
|
|
- ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
|
|
+ if (idx + 1 >= this_cpu_ci->num_leaves)
|
|
+ break;
|
|
+ ci_leaf_init(&infos[idx++], CACHE_TYPE_DATA, level);
|
|
+ ci_leaf_init(&infos[idx++], CACHE_TYPE_INST, level);
|
|
} else {
|
|
- ci_leaf_init(this_leaf++, type, level);
|
|
+ ci_leaf_init(&infos[idx++], type, level);
|
|
}
|
|
}
|
|
return 0;
|
|
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
|
|
index 7e96604559004b..82778258855d1a 100644
|
|
--- a/arch/arm64/kernel/cpufeature.c
|
|
+++ b/arch/arm64/kernel/cpufeature.c
|
|
@@ -2762,6 +2762,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|
.matches = match, \
|
|
}
|
|
|
|
+#define HWCAP_CAP_MATCH_ID(match, reg, field, min_value, cap_type, cap) \
|
|
+ { \
|
|
+ __HWCAP_CAP(#cap, cap_type, cap) \
|
|
+ HWCAP_CPUID_MATCH(reg, field, min_value) \
|
|
+ .matches = match, \
|
|
+ }
|
|
+
|
|
#ifdef CONFIG_ARM64_PTR_AUTH
|
|
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
|
|
{
|
|
@@ -2790,6 +2797,13 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
|
|
};
|
|
#endif
|
|
|
|
+#ifdef CONFIG_ARM64_SVE
|
|
+static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope)
|
|
+{
|
|
+ return system_supports_sve() && has_user_cpuid_feature(cap, scope);
|
|
+}
|
|
+#endif
|
|
+
|
|
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
|
HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
|
|
HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
|
|
@@ -2827,18 +2841,18 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
|
HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT),
|
|
#ifdef CONFIG_ARM64_SVE
|
|
HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
|
|
- HWCAP_CAP(ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
|
|
+ HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
|
|
#endif
|
|
HWCAP_CAP(ID_AA64PFR1_EL1, SSBS, SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS),
|
|
#ifdef CONFIG_ARM64_BTI
|
|
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
|
|
index 45354f2ddf7069..2e126ab79ecd86 100644
|
|
--- a/arch/arm64/kernel/vdso/vdso.lds.S
|
|
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
|
|
@@ -38,6 +38,7 @@ SECTIONS
|
|
*/
|
|
/DISCARD/ : {
|
|
*(.note.GNU-stack .note.gnu.property)
|
|
+ *(.ARM.attributes)
|
|
}
|
|
.note : { *(.note.*) } :text :note
|
|
|
|
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
|
|
index a553dae9a0d482..d4353741f331e3 100644
|
|
--- a/arch/arm64/kernel/vmlinux.lds.S
|
|
+++ b/arch/arm64/kernel/vmlinux.lds.S
|
|
@@ -162,6 +162,7 @@ SECTIONS
|
|
/DISCARD/ : {
|
|
*(.interp .dynamic)
|
|
*(.dynsym .dynstr .hash .gnu.hash)
|
|
+ *(.ARM.attributes)
|
|
}
|
|
|
|
. = KIMAGE_VADDR;
|
|
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
|
|
index 2bb3aa2dcfcb2e..e75c2dbd5f2c52 100644
|
|
--- a/arch/loongarch/kernel/genex.S
|
|
+++ b/arch/loongarch/kernel/genex.S
|
|
@@ -18,27 +18,29 @@
|
|
|
|
.align 5
|
|
SYM_FUNC_START(__arch_cpu_idle)
|
|
- /* start of rollback region */
|
|
- LONG_L t0, tp, TI_FLAGS
|
|
- nop
|
|
- andi t0, t0, _TIF_NEED_RESCHED
|
|
- bnez t0, 1f
|
|
- nop
|
|
- nop
|
|
- nop
|
|
+ /* start of idle interrupt region */
|
|
+ ori t0, zero, CSR_CRMD_IE
|
|
+ /* idle instruction needs irq enabled */
|
|
+ csrxchg t0, t0, LOONGARCH_CSR_CRMD
|
|
+ /*
|
|
+ * If an interrupt lands here; between enabling interrupts above and
|
|
+ * going idle on the next instruction, we must *NOT* go idle since the
|
|
+ * interrupt could have set TIF_NEED_RESCHED or caused an timer to need
|
|
+ * reprogramming. Fall through -- see handle_vint() below -- and have
|
|
+ * the idle loop take care of things.
|
|
+ */
|
|
idle 0
|
|
- /* end of rollback region */
|
|
+ /* end of idle interrupt region */
|
|
1: jr ra
|
|
SYM_FUNC_END(__arch_cpu_idle)
|
|
|
|
SYM_CODE_START(handle_vint)
|
|
BACKUP_T0T1
|
|
SAVE_ALL
|
|
- la_abs t1, __arch_cpu_idle
|
|
+ la_abs t1, 1b
|
|
LONG_L t0, sp, PT_ERA
|
|
- /* 32 byte rollback region */
|
|
- ori t0, t0, 0x1f
|
|
- xori t0, t0, 0x1f
|
|
+ /* 3 instructions idle interrupt region */
|
|
+ ori t0, t0, 0b1100
|
|
bne t0, t1, 1f
|
|
LONG_S t0, sp, PT_ERA
|
|
1: move a0, sp
|
|
diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
|
|
index 0b5dd2faeb90b8..54b247d8cdb695 100644
|
|
--- a/arch/loongarch/kernel/idle.c
|
|
+++ b/arch/loongarch/kernel/idle.c
|
|
@@ -11,7 +11,6 @@
|
|
|
|
void __cpuidle arch_cpu_idle(void)
|
|
{
|
|
- raw_local_irq_enable();
|
|
- __arch_cpu_idle(); /* idle instruction needs irq enabled */
|
|
+ __arch_cpu_idle();
|
|
raw_local_irq_disable();
|
|
}
|
|
diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
|
|
index 1ef8c63835351b..de8fa5a8a825cd 100644
|
|
--- a/arch/loongarch/kernel/reset.c
|
|
+++ b/arch/loongarch/kernel/reset.c
|
|
@@ -33,7 +33,7 @@ void machine_halt(void)
|
|
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
|
|
|
|
while (true) {
|
|
- __arch_cpu_idle();
|
|
+ __asm__ __volatile__("idle 0" : : : "memory");
|
|
}
|
|
}
|
|
|
|
@@ -53,7 +53,7 @@ void machine_power_off(void)
|
|
#endif
|
|
|
|
while (true) {
|
|
- __arch_cpu_idle();
|
|
+ __asm__ __volatile__("idle 0" : : : "memory");
|
|
}
|
|
}
|
|
|
|
@@ -74,6 +74,6 @@ void machine_restart(char *command)
|
|
acpi_reboot();
|
|
|
|
while (true) {
|
|
- __arch_cpu_idle();
|
|
+ __asm__ __volatile__("idle 0" : : : "memory");
|
|
}
|
|
}
|
|
diff --git a/arch/loongarch/lib/csum.c b/arch/loongarch/lib/csum.c
|
|
index a5e84b403c3b34..df309ae4045dee 100644
|
|
--- a/arch/loongarch/lib/csum.c
|
|
+++ b/arch/loongarch/lib/csum.c
|
|
@@ -25,7 +25,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
|
|
const u64 *ptr;
|
|
u64 data, sum64 = 0;
|
|
|
|
- if (unlikely(len == 0))
|
|
+ if (unlikely(len <= 0))
|
|
return 0;
|
|
|
|
offset = (unsigned long)buff & 7;
|
|
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
|
|
index 37c8badd270155..df286789c94f18 100644
|
|
--- a/arch/x86/events/intel/core.c
|
|
+++ b/arch/x86/events/intel/core.c
|
|
@@ -4715,8 +4715,11 @@ static void intel_pmu_cpu_starting(int cpu)
|
|
|
|
init_debug_store_on_cpu(cpu);
|
|
/*
|
|
- * Deal with CPUs that don't clear their LBRs on power-up.
|
|
+ * Deal with CPUs that don't clear their LBRs on power-up, and that may
|
|
+ * even boot with LBRs enabled.
|
|
*/
|
|
+ if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr)
|
|
+ msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT);
|
|
intel_pmu_lbr_reset();
|
|
|
|
cpuc->lbr_sel = NULL;
|
|
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
|
|
index 0da5c227f490c0..53763cf1927775 100644
|
|
--- a/arch/x86/include/asm/mmu.h
|
|
+++ b/arch/x86/include/asm/mmu.h
|
|
@@ -37,6 +37,8 @@ typedef struct {
|
|
*/
|
|
atomic64_t tlb_gen;
|
|
|
|
+ unsigned long next_trim_cpumask;
|
|
+
|
|
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
|
struct rw_semaphore ldt_usr_sem;
|
|
struct ldt_struct *ldt;
|
|
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
|
|
index 8dac45a2c7fcf2..f5afd956d5e50a 100644
|
|
--- a/arch/x86/include/asm/mmu_context.h
|
|
+++ b/arch/x86/include/asm/mmu_context.h
|
|
@@ -145,6 +145,7 @@ static inline int init_new_context(struct task_struct *tsk,
|
|
|
|
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
|
|
atomic64_set(&mm->context.tlb_gen, 0);
|
|
+ mm->context.next_trim_cpumask = jiffies + HZ;
|
|
|
|
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
|
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
|
|
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
|
|
index 24b7bd255e9830..623bb48774d44c 100644
|
|
--- a/arch/x86/include/asm/msr-index.h
|
|
+++ b/arch/x86/include/asm/msr-index.h
|
|
@@ -358,7 +358,8 @@
|
|
#define MSR_IA32_PASID_VALID BIT_ULL(31)
|
|
|
|
/* DEBUGCTLMSR bits (others vary by model): */
|
|
-#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
|
|
+#define DEBUGCTLMSR_LBR_BIT 0 /* last branch recording */
|
|
+#define DEBUGCTLMSR_LBR (1UL << DEBUGCTLMSR_LBR_BIT)
|
|
#define DEBUGCTLMSR_BTF_SHIFT 1
|
|
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
|
|
#define DEBUGCTLMSR_BUS_LOCK_DETECT (1UL << 2)
|
|
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
|
|
index 25726893c6f4dd..5d61adc6e892ea 100644
|
|
--- a/arch/x86/include/asm/tlbflush.h
|
|
+++ b/arch/x86/include/asm/tlbflush.h
|
|
@@ -222,6 +222,7 @@ struct flush_tlb_info {
|
|
unsigned int initiating_cpu;
|
|
u8 stride_shift;
|
|
u8 freed_tables;
|
|
+ u8 trim_cpumask;
|
|
};
|
|
|
|
void flush_tlb_local(void);
|
|
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
|
|
index 2b7999a1a50a83..80e262bb627fe1 100644
|
|
--- a/arch/x86/kernel/i8253.c
|
|
+++ b/arch/x86/kernel/i8253.c
|
|
@@ -8,6 +8,7 @@
|
|
#include <linux/timex.h>
|
|
#include <linux/i8253.h>
|
|
|
|
+#include <asm/hypervisor.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/hpet.h>
|
|
#include <asm/time.h>
|
|
@@ -39,9 +40,15 @@ static bool __init use_pit(void)
|
|
|
|
bool __init pit_timer_init(void)
|
|
{
|
|
- if (!use_pit())
|
|
+ if (!use_pit()) {
|
|
+ /*
|
|
+ * Don't just ignore the PIT. Ensure it's stopped, because
|
|
+ * VMMs otherwise steal CPU time just to pointlessly waggle
|
|
+ * the (masked) IRQ.
|
|
+ */
|
|
+ clockevent_i8253_disable();
|
|
return false;
|
|
-
|
|
+ }
|
|
clockevent_i8253_init(true);
|
|
global_clock_event = &i8253_clockevent;
|
|
return true;
|
|
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
|
|
index e332d835d6583f..07961b362e2a0c 100644
|
|
--- a/arch/x86/kernel/static_call.c
|
|
+++ b/arch/x86/kernel/static_call.c
|
|
@@ -175,7 +175,6 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
|
|
noinstr void __static_call_update_early(void *tramp, void *func)
|
|
{
|
|
BUG_ON(system_state != SYSTEM_BOOTING);
|
|
- BUG_ON(!early_boot_irqs_disabled);
|
|
BUG_ON(static_call_initialized);
|
|
__text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
|
|
sync_core();
|
|
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
|
|
index 238afd7335e46d..bd3fbd5be5da6e 100644
|
|
--- a/arch/x86/kvm/hyperv.c
|
|
+++ b/arch/x86/kvm/hyperv.c
|
|
@@ -2175,6 +2175,9 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
|
|
u32 vector;
|
|
bool all_cpus;
|
|
|
|
+ if (!lapic_in_kernel(vcpu))
|
|
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
|
+
|
|
if (hc->code == HVCALL_SEND_IPI) {
|
|
if (!hc->fast) {
|
|
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
|
|
@@ -2801,7 +2804,8 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
|
ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
|
|
ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
|
|
ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
|
|
- ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
|
|
+ if (!vcpu || lapic_in_kernel(vcpu))
|
|
+ ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
|
|
ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
|
|
if (evmcs_ver)
|
|
ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
|
|
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
|
|
index ff85526a9d4819..04b9b919235ecf 100644
|
|
--- a/arch/x86/kvm/mmu/mmu.c
|
|
+++ b/arch/x86/kvm/mmu/mmu.c
|
|
@@ -5289,7 +5289,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
|
|
union kvm_mmu_page_role root_role;
|
|
|
|
/* NPT requires CR0.PG=1. */
|
|
- WARN_ON_ONCE(cpu_role.base.direct);
|
|
+ WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode);
|
|
|
|
root_role = cpu_role.base;
|
|
root_role.level = kvm_mmu_get_tdp_level(vcpu);
|
|
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
|
|
index acf22bd99efcd8..e3f3e30fc89cac 100644
|
|
--- a/arch/x86/kvm/svm/nested.c
|
|
+++ b/arch/x86/kvm/svm/nested.c
|
|
@@ -644,6 +644,11 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
|
|
u32 pause_count12;
|
|
u32 pause_thresh12;
|
|
|
|
+ nested_svm_transition_tlb_flush(vcpu);
|
|
+
|
|
+ /* Enter Guest-Mode */
|
|
+ enter_guest_mode(vcpu);
|
|
+
|
|
/*
|
|
* Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
|
|
* exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
|
|
@@ -760,11 +765,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
|
|
}
|
|
}
|
|
|
|
- nested_svm_transition_tlb_flush(vcpu);
|
|
-
|
|
- /* Enter Guest-Mode */
|
|
- enter_guest_mode(vcpu);
|
|
-
|
|
/*
|
|
* Merge guest and host intercepts - must be called with vcpu in
|
|
* guest-mode to take effect.
|
|
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
|
|
index 64f594826a2822..df1794a5e38a57 100644
|
|
--- a/arch/x86/mm/tlb.c
|
|
+++ b/arch/x86/mm/tlb.c
|
|
@@ -898,9 +898,36 @@ static void flush_tlb_func(void *info)
|
|
nr_invalidate);
|
|
}
|
|
|
|
-static bool tlb_is_not_lazy(int cpu, void *data)
|
|
+static bool should_flush_tlb(int cpu, void *data)
|
|
{
|
|
- return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
|
|
+ struct flush_tlb_info *info = data;
|
|
+
|
|
+ /* Lazy TLB will get flushed at the next context switch. */
|
|
+ if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
|
|
+ return false;
|
|
+
|
|
+ /* No mm means kernel memory flush. */
|
|
+ if (!info->mm)
|
|
+ return true;
|
|
+
|
|
+ /* The target mm is loaded, and the CPU is not lazy. */
|
|
+ if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
|
|
+ return true;
|
|
+
|
|
+ /* In cpumask, but not the loaded mm? Periodically remove by flushing. */
|
|
+ if (info->trim_cpumask)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool should_trim_cpumask(struct mm_struct *mm)
|
|
+{
|
|
+ if (time_after(jiffies, READ_ONCE(mm->context.next_trim_cpumask))) {
|
|
+ WRITE_ONCE(mm->context.next_trim_cpumask, jiffies + HZ);
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
}
|
|
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
|
|
@@ -934,7 +961,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
|
|
if (info->freed_tables)
|
|
on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
|
|
else
|
|
- on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
|
|
+ on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
|
|
(void *)info, 1, cpumask);
|
|
}
|
|
|
|
@@ -985,6 +1012,7 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
|
|
info->freed_tables = freed_tables;
|
|
info->new_tlb_gen = new_tlb_gen;
|
|
info->initiating_cpu = smp_processor_id();
|
|
+ info->trim_cpumask = 0;
|
|
|
|
return info;
|
|
}
|
|
@@ -1027,6 +1055,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
* flush_tlb_func_local() directly in this case.
|
|
*/
|
|
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
|
|
+ info->trim_cpumask = should_trim_cpumask(mm);
|
|
flush_tlb_multi(mm_cpumask(mm), info);
|
|
} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
|
|
lockdep_assert_irqs_enabled();
|
|
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
|
|
index 6b201e64d8abc8..88a722954f3f78 100644
|
|
--- a/arch/x86/xen/mmu_pv.c
|
|
+++ b/arch/x86/xen/mmu_pv.c
|
|
@@ -113,6 +113,51 @@ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
|
|
*/
|
|
static DEFINE_SPINLOCK(xen_reservation_lock);
|
|
|
|
+/* Protected by xen_reservation_lock. */
|
|
+#define MIN_CONTIG_ORDER 9 /* 2MB */
|
|
+static unsigned int discontig_frames_order = MIN_CONTIG_ORDER;
|
|
+static unsigned long discontig_frames_early[1UL << MIN_CONTIG_ORDER] __initdata;
|
|
+static unsigned long *discontig_frames __refdata = discontig_frames_early;
|
|
+static bool discontig_frames_dyn;
|
|
+
|
|
+static int alloc_discontig_frames(unsigned int order)
|
|
+{
|
|
+ unsigned long *new_array, *old_array;
|
|
+ unsigned int old_order;
|
|
+ unsigned long flags;
|
|
+
|
|
+ BUG_ON(order < MIN_CONTIG_ORDER);
|
|
+ BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE);
|
|
+
|
|
+ new_array = (unsigned long *)__get_free_pages(GFP_KERNEL,
|
|
+ order - MIN_CONTIG_ORDER);
|
|
+ if (!new_array)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ spin_lock_irqsave(&xen_reservation_lock, flags);
|
|
+
|
|
+ old_order = discontig_frames_order;
|
|
+
|
|
+ if (order > discontig_frames_order || !discontig_frames_dyn) {
|
|
+ if (!discontig_frames_dyn)
|
|
+ old_array = NULL;
|
|
+ else
|
|
+ old_array = discontig_frames;
|
|
+
|
|
+ discontig_frames = new_array;
|
|
+ discontig_frames_order = order;
|
|
+ discontig_frames_dyn = true;
|
|
+ } else {
|
|
+ old_array = new_array;
|
|
+ }
|
|
+
|
|
+ spin_unlock_irqrestore(&xen_reservation_lock, flags);
|
|
+
|
|
+ free_pages((unsigned long)old_array, old_order - MIN_CONTIG_ORDER);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/*
|
|
* Note about cr3 (pagetable base) values:
|
|
*
|
|
@@ -782,6 +827,7 @@ void xen_mm_pin_all(void)
|
|
{
|
|
struct page *page;
|
|
|
|
+ spin_lock(&init_mm.page_table_lock);
|
|
spin_lock(&pgd_lock);
|
|
|
|
list_for_each_entry(page, &pgd_list, lru) {
|
|
@@ -792,6 +838,7 @@ void xen_mm_pin_all(void)
|
|
}
|
|
|
|
spin_unlock(&pgd_lock);
|
|
+ spin_unlock(&init_mm.page_table_lock);
|
|
}
|
|
|
|
static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
|
|
@@ -813,6 +860,9 @@ static void __init xen_after_bootmem(void)
|
|
SetPagePinned(virt_to_page(level3_user_vsyscall));
|
|
#endif
|
|
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
|
|
+
|
|
+ if (alloc_discontig_frames(MIN_CONTIG_ORDER))
|
|
+ BUG();
|
|
}
|
|
|
|
static void xen_unpin_page(struct mm_struct *mm, struct page *page,
|
|
@@ -888,6 +938,7 @@ void xen_mm_unpin_all(void)
|
|
{
|
|
struct page *page;
|
|
|
|
+ spin_lock(&init_mm.page_table_lock);
|
|
spin_lock(&pgd_lock);
|
|
|
|
list_for_each_entry(page, &pgd_list, lru) {
|
|
@@ -899,6 +950,7 @@ void xen_mm_unpin_all(void)
|
|
}
|
|
|
|
spin_unlock(&pgd_lock);
|
|
+ spin_unlock(&init_mm.page_table_lock);
|
|
}
|
|
|
|
static void xen_enter_mmap(struct mm_struct *mm)
|
|
@@ -2199,10 +2251,6 @@ void __init xen_init_mmu_ops(void)
|
|
memset(dummy_mapping, 0xff, PAGE_SIZE);
|
|
}
|
|
|
|
-/* Protected by xen_reservation_lock. */
|
|
-#define MAX_CONTIG_ORDER 9 /* 2MB */
|
|
-static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
|
|
-
|
|
#define VOID_PTE (mfn_pte(0, __pgprot(0)))
|
|
static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
|
|
unsigned long *in_frames,
|
|
@@ -2319,18 +2367,25 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
|
|
unsigned int address_bits,
|
|
dma_addr_t *dma_handle)
|
|
{
|
|
- unsigned long *in_frames = discontig_frames, out_frame;
|
|
+ unsigned long *in_frames, out_frame;
|
|
unsigned long flags;
|
|
int success;
|
|
unsigned long vstart = (unsigned long)phys_to_virt(pstart);
|
|
|
|
- if (unlikely(order > MAX_CONTIG_ORDER))
|
|
- return -ENOMEM;
|
|
+ if (unlikely(order > discontig_frames_order)) {
|
|
+ if (!discontig_frames_dyn)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ if (alloc_discontig_frames(order))
|
|
+ return -ENOMEM;
|
|
+ }
|
|
|
|
memset((void *) vstart, 0, PAGE_SIZE << order);
|
|
|
|
spin_lock_irqsave(&xen_reservation_lock, flags);
|
|
|
|
+ in_frames = discontig_frames;
|
|
+
|
|
/* 1. Zap current PTEs, remembering MFNs. */
|
|
xen_zap_pfn_range(vstart, order, in_frames, NULL);
|
|
|
|
@@ -2354,12 +2409,12 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
|
|
|
|
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
|
|
{
|
|
- unsigned long *out_frames = discontig_frames, in_frame;
|
|
+ unsigned long *out_frames, in_frame;
|
|
unsigned long flags;
|
|
int success;
|
|
unsigned long vstart;
|
|
|
|
- if (unlikely(order > MAX_CONTIG_ORDER))
|
|
+ if (unlikely(order > discontig_frames_order))
|
|
return;
|
|
|
|
vstart = (unsigned long)phys_to_virt(pstart);
|
|
@@ -2367,6 +2422,8 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
|
|
|
|
spin_lock_irqsave(&xen_reservation_lock, flags);
|
|
|
|
+ out_frames = discontig_frames;
|
|
+
|
|
/* 1. Find start MFN of contiguous extent. */
|
|
in_frame = virt_to_mfn((void *)vstart);
|
|
|
|
diff --git a/block/partitions/mac.c b/block/partitions/mac.c
|
|
index 7b521df00a39f4..6415213cd184e7 100644
|
|
--- a/block/partitions/mac.c
|
|
+++ b/block/partitions/mac.c
|
|
@@ -51,13 +51,25 @@ int mac_partition(struct parsed_partitions *state)
|
|
}
|
|
secsize = be16_to_cpu(md->block_size);
|
|
put_dev_sector(sect);
|
|
+
|
|
+ /*
|
|
+ * If the "block size" is not a power of 2, things get weird - we might
|
|
+ * end up with a partition straddling a sector boundary, so we wouldn't
|
|
+ * be able to read a partition entry with read_part_sector().
|
|
+ * Real block sizes are probably (?) powers of two, so just require
|
|
+ * that.
|
|
+ */
|
|
+ if (!is_power_of_2(secsize))
|
|
+ return -1;
|
|
datasize = round_down(secsize, 512);
|
|
data = read_part_sector(state, datasize / 512, §);
|
|
if (!data)
|
|
return -1;
|
|
partoffset = secsize % 512;
|
|
- if (partoffset + sizeof(*part) > datasize)
|
|
+ if (partoffset + sizeof(*part) > datasize) {
|
|
+ put_dev_sector(sect);
|
|
return -1;
|
|
+ }
|
|
part = (struct mac_partition *) (data + partoffset);
|
|
if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
|
|
put_dev_sector(sect);
|
|
@@ -110,8 +122,8 @@ int mac_partition(struct parsed_partitions *state)
|
|
int i, l;
|
|
|
|
goodness++;
|
|
- l = strlen(part->name);
|
|
- if (strcmp(part->name, "/") == 0)
|
|
+ l = strnlen(part->name, sizeof(part->name));
|
|
+ if (strncmp(part->name, "/", sizeof(part->name)) == 0)
|
|
goodness++;
|
|
for (i = 0; i <= l - 4; ++i) {
|
|
if (strncasecmp(part->name + i, "root",
|
|
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
|
|
index fdfc88e09986ec..e894fdf6d5531d 100644
|
|
--- a/drivers/acpi/x86/utils.c
|
|
+++ b/drivers/acpi/x86/utils.c
|
|
@@ -400,6 +400,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
|
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
|
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
|
|
},
|
|
+ {
|
|
+ /* Vexia Edu Atla 10 tablet 5V version */
|
|
+ .matches = {
|
|
+ /* Having all 3 of these not set is somewhat unique */
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
|
|
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
|
|
+ /* Above strings are too generic, also match on BIOS date */
|
|
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
|
|
+ },
|
|
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
|
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
|
|
+ },
|
|
{
|
|
/* Vexia Edu Atla 10 tablet 9V version */
|
|
.matches = {
|
|
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
|
|
index dceab5d013dec9..d99c9fb666c2d3 100644
|
|
--- a/drivers/base/regmap/regmap-irq.c
|
|
+++ b/drivers/base/regmap/regmap-irq.c
|
|
@@ -894,6 +894,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
|
|
kfree(d->wake_buf);
|
|
kfree(d->mask_buf_def);
|
|
kfree(d->mask_buf);
|
|
+ kfree(d->main_status_buf);
|
|
kfree(d->status_buf);
|
|
kfree(d->status_reg_buf);
|
|
if (d->config_buf) {
|
|
@@ -969,6 +970,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
|
|
kfree(d->wake_buf);
|
|
kfree(d->mask_buf_def);
|
|
kfree(d->mask_buf);
|
|
+ kfree(d->main_status_buf);
|
|
kfree(d->status_reg_buf);
|
|
kfree(d->status_buf);
|
|
if (d->config_buf) {
|
|
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
|
|
index d4350bb10b83a2..cb215e6f2e8344 100644
|
|
--- a/drivers/clocksource/i8253.c
|
|
+++ b/drivers/clocksource/i8253.c
|
|
@@ -108,11 +108,8 @@ int __init clocksource_i8253_init(void)
|
|
#endif
|
|
|
|
#ifdef CONFIG_CLKEVT_I8253
|
|
-static int pit_shutdown(struct clock_event_device *evt)
|
|
+void clockevent_i8253_disable(void)
|
|
{
|
|
- if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
|
|
- return 0;
|
|
-
|
|
raw_spin_lock(&i8253_lock);
|
|
|
|
outb_p(0x30, PIT_MODE);
|
|
@@ -123,6 +120,14 @@ static int pit_shutdown(struct clock_event_device *evt)
|
|
}
|
|
|
|
raw_spin_unlock(&i8253_lock);
|
|
+}
|
|
+
|
|
+static int pit_shutdown(struct clock_event_device *evt)
|
|
+{
|
|
+ if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
|
|
+ return 0;
|
|
+
|
|
+ clockevent_i8253_disable();
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
|
|
index 2c1095dcc2f2f8..1ab161e00c8679 100644
|
|
--- a/drivers/firmware/efi/efi.c
|
|
+++ b/drivers/firmware/efi/efi.c
|
|
@@ -908,13 +908,15 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
|
|
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
|
|
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
|
|
EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
|
|
- EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
|
|
+ EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
|
|
+ EFI_MEMORY_RUNTIME))
|
|
snprintf(pos, size, "|attr=0x%016llx]",
|
|
(unsigned long long)attr);
|
|
else
|
|
snprintf(pos, size,
|
|
- "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
|
|
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
|
|
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
|
|
+ attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "",
|
|
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
|
|
attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
|
|
attr & EFI_MEMORY_SP ? "SP" : "",
|
|
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
|
|
index c41e7b2091cdd1..8ad3efb9b1ff16 100644
|
|
--- a/drivers/firmware/efi/libstub/randomalloc.c
|
|
+++ b/drivers/firmware/efi/libstub/randomalloc.c
|
|
@@ -25,6 +25,9 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
|
|
if (md->type != EFI_CONVENTIONAL_MEMORY)
|
|
return 0;
|
|
|
|
+ if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE)
|
|
+ return 0;
|
|
+
|
|
if (efi_soft_reserve_enabled() &&
|
|
(md->attribute & EFI_MEMORY_SP))
|
|
return 0;
|
|
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
|
|
index bf6fbd5d22a1a5..713ee2de02cf3f 100644
|
|
--- a/drivers/firmware/efi/libstub/relocate.c
|
|
+++ b/drivers/firmware/efi/libstub/relocate.c
|
|
@@ -53,6 +53,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
|
|
if (desc->type != EFI_CONVENTIONAL_MEMORY)
|
|
continue;
|
|
|
|
+ if (desc->attribute & EFI_MEMORY_HOT_PLUGGABLE)
|
|
+ continue;
|
|
+
|
|
if (efi_soft_reserve_enabled() &&
|
|
(desc->attribute & EFI_MEMORY_SP))
|
|
continue;
|
|
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
|
|
index 5321ef98f4427d..64908f1a5e7f9b 100644
|
|
--- a/drivers/gpio/gpio-bcm-kona.c
|
|
+++ b/drivers/gpio/gpio-bcm-kona.c
|
|
@@ -69,6 +69,22 @@ struct bcm_kona_gpio {
|
|
struct bcm_kona_gpio_bank {
|
|
int id;
|
|
int irq;
|
|
+ /*
|
|
+ * Used to keep track of lock/unlock operations for each GPIO in the
|
|
+ * bank.
|
|
+ *
|
|
+ * All GPIOs are locked by default (see bcm_kona_gpio_reset), and the
|
|
+ * unlock count for all GPIOs is 0 by default. Each unlock increments
|
|
+ * the counter, and each lock decrements the counter.
|
|
+ *
|
|
+ * The lock function only locks the GPIO once its unlock counter is
|
|
+ * down to 0. This is necessary because the GPIO is unlocked in two
|
|
+ * places in this driver: once for requested GPIOs, and once for
|
|
+ * requested IRQs. Since it is possible for a GPIO to be requested
|
|
+ * as both a GPIO and an IRQ, we need to ensure that we don't lock it
|
|
+ * too early.
|
|
+ */
|
|
+ u8 gpio_unlock_count[GPIO_PER_BANK];
|
|
/* Used in the interrupt handler */
|
|
struct bcm_kona_gpio *kona_gpio;
|
|
};
|
|
@@ -86,14 +102,24 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
|
|
u32 val;
|
|
unsigned long flags;
|
|
int bank_id = GPIO_BANK(gpio);
|
|
+ int bit = GPIO_BIT(gpio);
|
|
+ struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
|
|
|
|
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
|
|
+ if (bank->gpio_unlock_count[bit] == 0) {
|
|
+ dev_err(kona_gpio->gpio_chip.parent,
|
|
+ "Unbalanced locks for GPIO %u\n", gpio);
|
|
+ return;
|
|
+ }
|
|
|
|
- val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
|
|
- val |= BIT(gpio);
|
|
- bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
|
|
+ if (--bank->gpio_unlock_count[bit] == 0) {
|
|
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
|
|
|
|
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
|
|
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
|
|
+ val |= BIT(bit);
|
|
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
|
|
+
|
|
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
|
|
+ }
|
|
}
|
|
|
|
static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
|
|
@@ -102,14 +128,20 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
|
|
u32 val;
|
|
unsigned long flags;
|
|
int bank_id = GPIO_BANK(gpio);
|
|
+ int bit = GPIO_BIT(gpio);
|
|
+ struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
|
|
|
|
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
|
|
+ if (bank->gpio_unlock_count[bit] == 0) {
|
|
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
|
|
|
|
- val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
|
|
- val &= ~BIT(gpio);
|
|
- bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
|
|
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
|
|
+ val &= ~BIT(bit);
|
|
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
|
|
|
|
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
|
|
+ }
|
|
+
|
|
+ ++bank->gpio_unlock_count[bit];
|
|
}
|
|
|
|
static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
|
|
@@ -360,6 +392,7 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
|
|
|
|
kona_gpio = irq_data_get_irq_chip_data(d);
|
|
reg_base = kona_gpio->reg_base;
|
|
+
|
|
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
|
|
|
|
val = readl(reg_base + GPIO_INT_MASK(bank_id));
|
|
@@ -382,6 +415,7 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
|
|
|
|
kona_gpio = irq_data_get_irq_chip_data(d);
|
|
reg_base = kona_gpio->reg_base;
|
|
+
|
|
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
|
|
|
|
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
|
|
@@ -477,15 +511,26 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
|
|
static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
|
|
{
|
|
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
|
|
+ unsigned int gpio = d->hwirq;
|
|
|
|
- return gpiochip_reqres_irq(&kona_gpio->gpio_chip, d->hwirq);
|
|
+ /*
|
|
+ * We need to unlock the GPIO before any other operations are performed
|
|
+ * on the relevant GPIO configuration registers
|
|
+ */
|
|
+ bcm_kona_gpio_unlock_gpio(kona_gpio, gpio);
|
|
+
|
|
+ return gpiochip_reqres_irq(&kona_gpio->gpio_chip, gpio);
|
|
}
|
|
|
|
static void bcm_kona_gpio_irq_relres(struct irq_data *d)
|
|
{
|
|
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
|
|
+ unsigned int gpio = d->hwirq;
|
|
+
|
|
+ /* Once we no longer use it, lock the GPIO again */
|
|
+ bcm_kona_gpio_lock_gpio(kona_gpio, gpio);
|
|
|
|
- gpiochip_relres_irq(&kona_gpio->gpio_chip, d->hwirq);
|
|
+ gpiochip_relres_irq(&kona_gpio->gpio_chip, gpio);
|
|
}
|
|
|
|
static struct irq_chip bcm_gpio_irq_chip = {
|
|
@@ -614,7 +659,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
|
|
bank->irq = platform_get_irq(pdev, i);
|
|
bank->kona_gpio = kona_gpio;
|
|
if (bank->irq < 0) {
|
|
- dev_err(dev, "Couldn't get IRQ for bank %d", i);
|
|
+ dev_err(dev, "Couldn't get IRQ for bank %d\n", i);
|
|
ret = -ENOENT;
|
|
goto err_irq_domain;
|
|
}
|
|
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
|
|
index 27cc4da5356540..2731e97f2812ce 100644
|
|
--- a/drivers/gpio/gpio-stmpe.c
|
|
+++ b/drivers/gpio/gpio-stmpe.c
|
|
@@ -191,7 +191,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
|
|
[REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB,
|
|
[REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB,
|
|
};
|
|
- int i, j;
|
|
+ int ret, i, j;
|
|
|
|
/*
|
|
* STMPE1600: to be able to get IRQ from pins,
|
|
@@ -199,8 +199,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
|
|
* GPSR or GPCR registers
|
|
*/
|
|
if (stmpe->partnum == STMPE1600) {
|
|
- stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
|
|
- stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
|
|
+ ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
|
|
+ if (ret < 0) {
|
|
+ dev_err(stmpe->dev, "Failed to read GPMR_LSB: %d\n", ret);
|
|
+ goto err;
|
|
+ }
|
|
+ ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
|
|
+ if (ret < 0) {
|
|
+ dev_err(stmpe->dev, "Failed to read GPMR_CSB: %d\n", ret);
|
|
+ goto err;
|
|
+ }
|
|
}
|
|
|
|
for (i = 0; i < CACHE_NR_REGS; i++) {
|
|
@@ -222,6 +230,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
|
|
}
|
|
}
|
|
|
|
+err:
|
|
mutex_unlock(&stmpe_gpio->irq_lock);
|
|
}
|
|
|
|
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
|
|
index b366b4ca4c40e9..8ddd5e8341a05b 100644
|
|
--- a/drivers/gpio/gpiolib-acpi.c
|
|
+++ b/drivers/gpio/gpiolib-acpi.c
|
|
@@ -1706,6 +1706,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
|
|
.ignore_wake = "PNP0C50:00@8",
|
|
},
|
|
},
|
|
+ {
|
|
+ /*
|
|
+ * Spurious wakeups from GPIO 11
|
|
+ * Found in BIOS 1.04
|
|
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
|
|
+ */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
|
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
|
|
+ },
|
|
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
|
+ .ignore_interrupt = "AMDI0030:00@11",
|
|
+ },
|
|
+ },
|
|
{} /* Terminating entry */
|
|
};
|
|
|
|
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
|
|
index 5c0016c77d2abe..efb592b6f6aa7a 100644
|
|
--- a/drivers/gpio/gpiolib.c
|
|
+++ b/drivers/gpio/gpiolib.c
|
|
@@ -723,13 +723,13 @@ int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
|
|
}
|
|
|
|
if (gc->ngpio == 0) {
|
|
- chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
|
|
+ dev_err(dev, "tried to insert a GPIO chip with zero lines\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (gc->ngpio > FASTPATH_NGPIO)
|
|
- chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
|
|
- gc->ngpio, FASTPATH_NGPIO);
|
|
+ dev_warn(dev, "line cnt %u is greater than fast path cnt %u\n",
|
|
+ gc->ngpio, FASTPATH_NGPIO);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
|
|
index a4f9015345ccb5..6a24e8ceb94493 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
|
|
@@ -3450,9 +3450,10 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
|
|
if (err == -ENODEV) {
|
|
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
|
|
err = 0;
|
|
- goto out;
|
|
+ } else {
|
|
+ dev_err(adev->dev, "fail to initialize cap microcode\n");
|
|
}
|
|
- dev_err(adev->dev, "fail to initialize cap microcode\n");
|
|
+ goto out;
|
|
}
|
|
|
|
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
|
index d587f807dfd7c4..294609557b73ab 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
|
@@ -2026,6 +2026,7 @@ bool dcn20_fast_validate_bw(
|
|
{
|
|
bool out = false;
|
|
int split[MAX_PIPES] = { 0 };
|
|
+ bool merge[MAX_PIPES] = { false };
|
|
int pipe_cnt, i, pipe_idx, vlevel;
|
|
|
|
ASSERT(pipes);
|
|
@@ -2050,7 +2051,7 @@ bool dcn20_fast_validate_bw(
|
|
if (vlevel > context->bw_ctx.dml.soc.num_states)
|
|
goto validate_fail;
|
|
|
|
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
|
|
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
|
|
|
|
/*initialize pipe_just_split_from to invalid idx*/
|
|
for (i = 0; i < MAX_PIPES; i++)
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
|
|
index 2dc4d2c1410b81..8efe3f32a0e79b 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
|
|
@@ -1002,8 +1002,10 @@ static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer(
|
|
struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
|
|
struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
|
|
|
|
- if (!head_pipe)
|
|
+ if (!head_pipe) {
|
|
ASSERT(0);
|
|
+ return NULL;
|
|
+ }
|
|
|
|
if (!idle_pipe)
|
|
return NULL;
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
index 8dffa5b6426e1c..24105a5b9f2a59 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
@@ -800,6 +800,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
|
|
{
|
|
bool out = false;
|
|
int split[MAX_PIPES] = { 0 };
|
|
+ bool merge[MAX_PIPES] = { false };
|
|
int pipe_cnt, i, pipe_idx, vlevel;
|
|
|
|
ASSERT(pipes);
|
|
@@ -842,7 +843,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
|
|
goto validate_fail;
|
|
}
|
|
|
|
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
|
|
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
|
|
|
|
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
|
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
|
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
|
|
index 4d17b6958397ed..2997aeed634084 100644
|
|
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
|
|
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
|
|
@@ -517,7 +517,8 @@ static int smu_sys_set_pp_table(void *handle,
|
|
return -EIO;
|
|
}
|
|
|
|
- if (!smu_table->hardcode_pptable) {
|
|
+ if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
|
|
+ kfree(smu_table->hardcode_pptable);
|
|
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
|
|
if (!smu_table->hardcode_pptable)
|
|
return -ENOMEM;
|
|
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
|
|
index 5c397a2df70e28..5d27e1c733c527 100644
|
|
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
|
|
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
|
|
@@ -168,7 +168,7 @@ static int igt_ppgtt_alloc(void *arg)
|
|
return PTR_ERR(ppgtt);
|
|
|
|
if (!ppgtt->vm.allocate_va_range)
|
|
- goto err_ppgtt_cleanup;
|
|
+ goto ppgtt_vm_put;
|
|
|
|
/*
|
|
* While we only allocate the page tables here and so we could
|
|
@@ -236,7 +236,7 @@ static int igt_ppgtt_alloc(void *arg)
|
|
goto retry;
|
|
}
|
|
i915_gem_ww_ctx_fini(&ww);
|
|
-
|
|
+ppgtt_vm_put:
|
|
i915_vm_put(&ppgtt->vm);
|
|
return err;
|
|
}
|
|
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
|
|
index 2dba7c5ffd2c62..92f4261305bd9d 100644
|
|
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
|
|
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
|
|
@@ -587,7 +587,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
|
|
for (timeout = 10; timeout > 0; --timeout) {
|
|
if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
|
|
(rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
|
|
- (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
|
|
+ (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK_PHY))
|
|
break;
|
|
|
|
usleep_range(1000, 2000);
|
|
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
|
|
index f8114d11f2d158..a6b276f1d6ee15 100644
|
|
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
|
|
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
|
|
@@ -142,7 +142,6 @@
|
|
|
|
#define CLOCKSET1 0x101c
|
|
#define CLOCKSET1_LOCK_PHY (1 << 17)
|
|
-#define CLOCKSET1_LOCK (1 << 16)
|
|
#define CLOCKSET1_CLKSEL (1 << 8)
|
|
#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2)
|
|
#define CLOCKSET1_CLKINSEL_DIG (1 << 2)
|
|
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
|
|
index 98efbaf3b0c23f..ee3531bbccd7df 100644
|
|
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
|
|
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
|
|
@@ -646,7 +646,7 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
|
|
{
|
|
dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
|
|
|
|
- /* clear the irqstatus for newly enabled irqs */
|
|
+ /* clear the irqstatus for irqs that will be enabled */
|
|
dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
|
|
|
|
dispc_k2g_vp_set_irqenable(dispc, 0, mask);
|
|
@@ -654,6 +654,9 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
|
|
|
|
dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
|
|
|
|
+ /* clear the irqstatus for irqs that were disabled */
|
|
+ dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & old_mask);
|
|
+
|
|
/* flush posted write */
|
|
dispc_k2g_read_irqenable(dispc);
|
|
}
|
|
@@ -726,24 +729,20 @@ static
|
|
void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
|
|
{
|
|
unsigned int i;
|
|
- u32 top_clear = 0;
|
|
|
|
for (i = 0; i < dispc->feat->num_vps; ++i) {
|
|
- if (clearmask & DSS_IRQ_VP_MASK(i)) {
|
|
+ if (clearmask & DSS_IRQ_VP_MASK(i))
|
|
dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
|
|
- top_clear |= BIT(i);
|
|
- }
|
|
}
|
|
for (i = 0; i < dispc->feat->num_planes; ++i) {
|
|
- if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
|
|
+ if (clearmask & DSS_IRQ_PLANE_MASK(i))
|
|
dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
|
|
- top_clear |= BIT(4 + i);
|
|
- }
|
|
}
|
|
if (dispc->feat->subrev == DISPC_K2G)
|
|
return;
|
|
|
|
- dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
|
|
+ /* always clear the top level irqstatus */
|
|
+ dispc_write(dispc, DISPC_IRQSTATUS, dispc_read(dispc, DISPC_IRQSTATUS));
|
|
|
|
/* Flush posted writes */
|
|
dispc_read(dispc, DISPC_IRQSTATUS);
|
|
@@ -789,7 +788,7 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
|
|
|
|
old_mask = dispc_k3_read_irqenable(dispc);
|
|
|
|
- /* clear the irqstatus for newly enabled irqs */
|
|
+ /* clear the irqstatus for irqs that will be enabled */
|
|
dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
|
|
|
|
for (i = 0; i < dispc->feat->num_vps; ++i) {
|
|
@@ -814,6 +813,9 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
|
|
if (main_disable)
|
|
dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
|
|
|
|
+ /* clear the irqstatus for irqs that were disabled */
|
|
+ dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & old_mask);
|
|
+
|
|
/* Flush posted writes */
|
|
dispc_read(dispc, DISPC_IRQENABLE_SET);
|
|
}
|
|
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
|
|
index 141b8abf08629f..e56b5935f2f1c1 100644
|
|
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
|
|
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
|
|
@@ -179,6 +179,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
|
|
{
|
|
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
|
|
struct drm_v3d_perfmon_destroy *req = data;
|
|
+ struct v3d_dev *v3d = v3d_priv->v3d;
|
|
struct v3d_perfmon *perfmon;
|
|
|
|
mutex_lock(&v3d_priv->perfmon.lock);
|
|
@@ -188,6 +189,10 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
|
|
if (!perfmon)
|
|
return -EINVAL;
|
|
|
|
+ /* If the active perfmon is being destroyed, stop it first */
|
|
+ if (perfmon == v3d->active_perfmon)
|
|
+ v3d_perfmon_stop(v3d, perfmon, false);
|
|
+
|
|
v3d_perfmon_put(perfmon);
|
|
|
|
return 0;
|
|
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
|
|
index 5ad871a7d1a44c..6386043aab0bbf 100644
|
|
--- a/drivers/hid/hid-multitouch.c
|
|
+++ b/drivers/hid/hid-multitouch.c
|
|
@@ -1668,9 +1668,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
|
|
break;
|
|
}
|
|
|
|
- if (suffix)
|
|
+ if (suffix) {
|
|
hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
|
|
"%s %s", hdev->name, suffix);
|
|
+ if (!hi->input->name)
|
|
+ return -ENOMEM;
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
|
|
index b110818fc94586..49c067133975f7 100644
|
|
--- a/drivers/hid/hid-steam.c
|
|
+++ b/drivers/hid/hid-steam.c
|
|
@@ -66,55 +66,225 @@ static LIST_HEAD(steam_devices);
|
|
#define STEAM_DECK_TRIGGER_RESOLUTION 5461
|
|
/* Joystick runs are about 5 mm and 32768 units */
|
|
#define STEAM_DECK_JOYSTICK_RESOLUTION 6553
|
|
+/* Accelerometer has 16 bit resolution and a range of +/- 2g */
|
|
+#define STEAM_DECK_ACCEL_RES_PER_G 16384
|
|
+#define STEAM_DECK_ACCEL_RANGE 32768
|
|
+#define STEAM_DECK_ACCEL_FUZZ 32
|
|
+/* Gyroscope has 16 bit resolution and a range of +/- 2000 dps */
|
|
+#define STEAM_DECK_GYRO_RES_PER_DPS 16
|
|
+#define STEAM_DECK_GYRO_RANGE 32768
|
|
+#define STEAM_DECK_GYRO_FUZZ 1
|
|
|
|
#define STEAM_PAD_FUZZ 256
|
|
|
|
/*
|
|
* Commands that can be sent in a feature report.
|
|
- * Thanks to Valve for some valuable hints.
|
|
+ * Thanks to Valve and SDL for the names.
|
|
*/
|
|
-#define STEAM_CMD_SET_MAPPINGS 0x80
|
|
-#define STEAM_CMD_CLEAR_MAPPINGS 0x81
|
|
-#define STEAM_CMD_GET_MAPPINGS 0x82
|
|
-#define STEAM_CMD_GET_ATTRIB 0x83
|
|
-#define STEAM_CMD_GET_ATTRIB_LABEL 0x84
|
|
-#define STEAM_CMD_DEFAULT_MAPPINGS 0x85
|
|
-#define STEAM_CMD_FACTORY_RESET 0x86
|
|
-#define STEAM_CMD_WRITE_REGISTER 0x87
|
|
-#define STEAM_CMD_CLEAR_REGISTER 0x88
|
|
-#define STEAM_CMD_READ_REGISTER 0x89
|
|
-#define STEAM_CMD_GET_REGISTER_LABEL 0x8a
|
|
-#define STEAM_CMD_GET_REGISTER_MAX 0x8b
|
|
-#define STEAM_CMD_GET_REGISTER_DEFAULT 0x8c
|
|
-#define STEAM_CMD_SET_MODE 0x8d
|
|
-#define STEAM_CMD_DEFAULT_MOUSE 0x8e
|
|
-#define STEAM_CMD_FORCEFEEDBAK 0x8f
|
|
-#define STEAM_CMD_REQUEST_COMM_STATUS 0xb4
|
|
-#define STEAM_CMD_GET_SERIAL 0xae
|
|
-#define STEAM_CMD_HAPTIC_RUMBLE 0xeb
|
|
-
|
|
-/* Some useful register ids */
|
|
-#define STEAM_REG_LPAD_MODE 0x07
|
|
-#define STEAM_REG_RPAD_MODE 0x08
|
|
-#define STEAM_REG_RPAD_MARGIN 0x18
|
|
-#define STEAM_REG_LED 0x2d
|
|
-#define STEAM_REG_GYRO_MODE 0x30
|
|
-#define STEAM_REG_LPAD_CLICK_PRESSURE 0x34
|
|
-#define STEAM_REG_RPAD_CLICK_PRESSURE 0x35
|
|
-
|
|
-/* Raw event identifiers */
|
|
-#define STEAM_EV_INPUT_DATA 0x01
|
|
-#define STEAM_EV_CONNECT 0x03
|
|
-#define STEAM_EV_BATTERY 0x04
|
|
-#define STEAM_EV_DECK_INPUT_DATA 0x09
|
|
+enum {
|
|
+ ID_SET_DIGITAL_MAPPINGS = 0x80,
|
|
+ ID_CLEAR_DIGITAL_MAPPINGS = 0x81,
|
|
+ ID_GET_DIGITAL_MAPPINGS = 0x82,
|
|
+ ID_GET_ATTRIBUTES_VALUES = 0x83,
|
|
+ ID_GET_ATTRIBUTE_LABEL = 0x84,
|
|
+ ID_SET_DEFAULT_DIGITAL_MAPPINGS = 0x85,
|
|
+ ID_FACTORY_RESET = 0x86,
|
|
+ ID_SET_SETTINGS_VALUES = 0x87,
|
|
+ ID_CLEAR_SETTINGS_VALUES = 0x88,
|
|
+ ID_GET_SETTINGS_VALUES = 0x89,
|
|
+ ID_GET_SETTING_LABEL = 0x8A,
|
|
+ ID_GET_SETTINGS_MAXS = 0x8B,
|
|
+ ID_GET_SETTINGS_DEFAULTS = 0x8C,
|
|
+ ID_SET_CONTROLLER_MODE = 0x8D,
|
|
+ ID_LOAD_DEFAULT_SETTINGS = 0x8E,
|
|
+ ID_TRIGGER_HAPTIC_PULSE = 0x8F,
|
|
+ ID_TURN_OFF_CONTROLLER = 0x9F,
|
|
+
|
|
+ ID_GET_DEVICE_INFO = 0xA1,
|
|
+
|
|
+ ID_CALIBRATE_TRACKPADS = 0xA7,
|
|
+ ID_RESERVED_0 = 0xA8,
|
|
+ ID_SET_SERIAL_NUMBER = 0xA9,
|
|
+ ID_GET_TRACKPAD_CALIBRATION = 0xAA,
|
|
+ ID_GET_TRACKPAD_FACTORY_CALIBRATION = 0xAB,
|
|
+ ID_GET_TRACKPAD_RAW_DATA = 0xAC,
|
|
+ ID_ENABLE_PAIRING = 0xAD,
|
|
+ ID_GET_STRING_ATTRIBUTE = 0xAE,
|
|
+ ID_RADIO_ERASE_RECORDS = 0xAF,
|
|
+ ID_RADIO_WRITE_RECORD = 0xB0,
|
|
+ ID_SET_DONGLE_SETTING = 0xB1,
|
|
+ ID_DONGLE_DISCONNECT_DEVICE = 0xB2,
|
|
+ ID_DONGLE_COMMIT_DEVICE = 0xB3,
|
|
+ ID_DONGLE_GET_WIRELESS_STATE = 0xB4,
|
|
+ ID_CALIBRATE_GYRO = 0xB5,
|
|
+ ID_PLAY_AUDIO = 0xB6,
|
|
+ ID_AUDIO_UPDATE_START = 0xB7,
|
|
+ ID_AUDIO_UPDATE_DATA = 0xB8,
|
|
+ ID_AUDIO_UPDATE_COMPLETE = 0xB9,
|
|
+ ID_GET_CHIPID = 0xBA,
|
|
+
|
|
+ ID_CALIBRATE_JOYSTICK = 0xBF,
|
|
+ ID_CALIBRATE_ANALOG_TRIGGERS = 0xC0,
|
|
+ ID_SET_AUDIO_MAPPING = 0xC1,
|
|
+ ID_CHECK_GYRO_FW_LOAD = 0xC2,
|
|
+ ID_CALIBRATE_ANALOG = 0xC3,
|
|
+ ID_DONGLE_GET_CONNECTED_SLOTS = 0xC4,
|
|
+
|
|
+ ID_RESET_IMU = 0xCE,
|
|
+
|
|
+ ID_TRIGGER_HAPTIC_CMD = 0xEA,
|
|
+ ID_TRIGGER_RUMBLE_CMD = 0xEB,
|
|
+};
|
|
+
|
|
+/* Settings IDs */
|
|
+enum {
|
|
+ /* 0 */
|
|
+ SETTING_MOUSE_SENSITIVITY,
|
|
+ SETTING_MOUSE_ACCELERATION,
|
|
+ SETTING_TRACKBALL_ROTATION_ANGLE,
|
|
+ SETTING_HAPTIC_INTENSITY_UNUSED,
|
|
+ SETTING_LEFT_GAMEPAD_STICK_ENABLED,
|
|
+ SETTING_RIGHT_GAMEPAD_STICK_ENABLED,
|
|
+ SETTING_USB_DEBUG_MODE,
|
|
+ SETTING_LEFT_TRACKPAD_MODE,
|
|
+ SETTING_RIGHT_TRACKPAD_MODE,
|
|
+ SETTING_MOUSE_POINTER_ENABLED,
|
|
+
|
|
+ /* 10 */
|
|
+ SETTING_DPAD_DEADZONE,
|
|
+ SETTING_MINIMUM_MOMENTUM_VEL,
|
|
+ SETTING_MOMENTUM_DECAY_AMMOUNT,
|
|
+ SETTING_TRACKPAD_RELATIVE_MODE_TICKS_PER_PIXEL,
|
|
+ SETTING_HAPTIC_INCREMENT,
|
|
+ SETTING_DPAD_ANGLE_SIN,
|
|
+ SETTING_DPAD_ANGLE_COS,
|
|
+ SETTING_MOMENTUM_VERTICAL_DIVISOR,
|
|
+ SETTING_MOMENTUM_MAXIMUM_VELOCITY,
|
|
+ SETTING_TRACKPAD_Z_ON,
|
|
+
|
|
+ /* 20 */
|
|
+ SETTING_TRACKPAD_Z_OFF,
|
|
+ SETTING_SENSITIVY_SCALE_AMMOUNT,
|
|
+ SETTING_LEFT_TRACKPAD_SECONDARY_MODE,
|
|
+ SETTING_RIGHT_TRACKPAD_SECONDARY_MODE,
|
|
+ SETTING_SMOOTH_ABSOLUTE_MOUSE,
|
|
+ SETTING_STEAMBUTTON_POWEROFF_TIME,
|
|
+ SETTING_UNUSED_1,
|
|
+ SETTING_TRACKPAD_OUTER_RADIUS,
|
|
+ SETTING_TRACKPAD_Z_ON_LEFT,
|
|
+ SETTING_TRACKPAD_Z_OFF_LEFT,
|
|
+
|
|
+ /* 30 */
|
|
+ SETTING_TRACKPAD_OUTER_SPIN_VEL,
|
|
+ SETTING_TRACKPAD_OUTER_SPIN_RADIUS,
|
|
+ SETTING_TRACKPAD_OUTER_SPIN_HORIZONTAL_ONLY,
|
|
+ SETTING_TRACKPAD_RELATIVE_MODE_DEADZONE,
|
|
+ SETTING_TRACKPAD_RELATIVE_MODE_MAX_VEL,
|
|
+ SETTING_TRACKPAD_RELATIVE_MODE_INVERT_Y,
|
|
+ SETTING_TRACKPAD_DOUBLE_TAP_BEEP_ENABLED,
|
|
+ SETTING_TRACKPAD_DOUBLE_TAP_BEEP_PERIOD,
|
|
+ SETTING_TRACKPAD_DOUBLE_TAP_BEEP_COUNT,
|
|
+ SETTING_TRACKPAD_OUTER_RADIUS_RELEASE_ON_TRANSITION,
|
|
+
|
|
+ /* 40 */
|
|
+ SETTING_RADIAL_MODE_ANGLE,
|
|
+ SETTING_HAPTIC_INTENSITY_MOUSE_MODE,
|
|
+ SETTING_LEFT_DPAD_REQUIRES_CLICK,
|
|
+ SETTING_RIGHT_DPAD_REQUIRES_CLICK,
|
|
+ SETTING_LED_BASELINE_BRIGHTNESS,
|
|
+ SETTING_LED_USER_BRIGHTNESS,
|
|
+ SETTING_ENABLE_RAW_JOYSTICK,
|
|
+ SETTING_ENABLE_FAST_SCAN,
|
|
+ SETTING_IMU_MODE,
|
|
+ SETTING_WIRELESS_PACKET_VERSION,
|
|
+
|
|
+ /* 50 */
|
|
+ SETTING_SLEEP_INACTIVITY_TIMEOUT,
|
|
+ SETTING_TRACKPAD_NOISE_THRESHOLD,
|
|
+ SETTING_LEFT_TRACKPAD_CLICK_PRESSURE,
|
|
+ SETTING_RIGHT_TRACKPAD_CLICK_PRESSURE,
|
|
+ SETTING_LEFT_BUMPER_CLICK_PRESSURE,
|
|
+ SETTING_RIGHT_BUMPER_CLICK_PRESSURE,
|
|
+ SETTING_LEFT_GRIP_CLICK_PRESSURE,
|
|
+ SETTING_RIGHT_GRIP_CLICK_PRESSURE,
|
|
+ SETTING_LEFT_GRIP2_CLICK_PRESSURE,
|
|
+ SETTING_RIGHT_GRIP2_CLICK_PRESSURE,
|
|
+
|
|
+ /* 60 */
|
|
+ SETTING_PRESSURE_MODE,
|
|
+ SETTING_CONTROLLER_TEST_MODE,
|
|
+ SETTING_TRIGGER_MODE,
|
|
+ SETTING_TRACKPAD_Z_THRESHOLD,
|
|
+ SETTING_FRAME_RATE,
|
|
+ SETTING_TRACKPAD_FILT_CTRL,
|
|
+ SETTING_TRACKPAD_CLIP,
|
|
+ SETTING_DEBUG_OUTPUT_SELECT,
|
|
+ SETTING_TRIGGER_THRESHOLD_PERCENT,
|
|
+ SETTING_TRACKPAD_FREQUENCY_HOPPING,
|
|
+
|
|
+ /* 70 */
|
|
+ SETTING_HAPTICS_ENABLED,
|
|
+ SETTING_STEAM_WATCHDOG_ENABLE,
|
|
+ SETTING_TIMP_TOUCH_THRESHOLD_ON,
|
|
+ SETTING_TIMP_TOUCH_THRESHOLD_OFF,
|
|
+ SETTING_FREQ_HOPPING,
|
|
+ SETTING_TEST_CONTROL,
|
|
+ SETTING_HAPTIC_MASTER_GAIN_DB,
|
|
+ SETTING_THUMB_TOUCH_THRESH,
|
|
+ SETTING_DEVICE_POWER_STATUS,
|
|
+ SETTING_HAPTIC_INTENSITY,
|
|
+
|
|
+ /* 80 */
|
|
+ SETTING_STABILIZER_ENABLED,
|
|
+ SETTING_TIMP_MODE_MTE,
|
|
+};
|
|
+
|
|
+/* Input report identifiers */
|
|
+enum
|
|
+{
|
|
+ ID_CONTROLLER_STATE = 1,
|
|
+ ID_CONTROLLER_DEBUG = 2,
|
|
+ ID_CONTROLLER_WIRELESS = 3,
|
|
+ ID_CONTROLLER_STATUS = 4,
|
|
+ ID_CONTROLLER_DEBUG2 = 5,
|
|
+ ID_CONTROLLER_SECONDARY_STATE = 6,
|
|
+ ID_CONTROLLER_BLE_STATE = 7,
|
|
+ ID_CONTROLLER_DECK_STATE = 9
|
|
+};
|
|
+
|
|
+/* String attribute idenitifiers */
|
|
+enum {
|
|
+ ATTRIB_STR_BOARD_SERIAL,
|
|
+ ATTRIB_STR_UNIT_SERIAL,
|
|
+};
|
|
|
|
/* Values for GYRO_MODE (bitmask) */
|
|
-#define STEAM_GYRO_MODE_OFF 0x0000
|
|
-#define STEAM_GYRO_MODE_STEERING 0x0001
|
|
-#define STEAM_GYRO_MODE_TILT 0x0002
|
|
-#define STEAM_GYRO_MODE_SEND_ORIENTATION 0x0004
|
|
-#define STEAM_GYRO_MODE_SEND_RAW_ACCEL 0x0008
|
|
-#define STEAM_GYRO_MODE_SEND_RAW_GYRO 0x0010
|
|
+enum {
|
|
+ SETTING_GYRO_MODE_OFF = 0,
|
|
+ SETTING_GYRO_MODE_STEERING = BIT(0),
|
|
+ SETTING_GYRO_MODE_TILT = BIT(1),
|
|
+ SETTING_GYRO_MODE_SEND_ORIENTATION = BIT(2),
|
|
+ SETTING_GYRO_MODE_SEND_RAW_ACCEL = BIT(3),
|
|
+ SETTING_GYRO_MODE_SEND_RAW_GYRO = BIT(4),
|
|
+};
|
|
+
|
|
+/* Trackpad modes */
|
|
+enum {
|
|
+ TRACKPAD_ABSOLUTE_MOUSE,
|
|
+ TRACKPAD_RELATIVE_MOUSE,
|
|
+ TRACKPAD_DPAD_FOUR_WAY_DISCRETE,
|
|
+ TRACKPAD_DPAD_FOUR_WAY_OVERLAP,
|
|
+ TRACKPAD_DPAD_EIGHT_WAY,
|
|
+ TRACKPAD_RADIAL_MODE,
|
|
+ TRACKPAD_ABSOLUTE_DPAD,
|
|
+ TRACKPAD_NONE,
|
|
+ TRACKPAD_GESTURE_KEYBOARD,
|
|
+};
|
|
+
|
|
+/* Pad identifiers for the deck */
|
|
+#define STEAM_PAD_LEFT 0
|
|
+#define STEAM_PAD_RIGHT 1
|
|
+#define STEAM_PAD_BOTH 2
|
|
|
|
/* Other random constants */
|
|
#define STEAM_SERIAL_LEN 10
|
|
@@ -123,9 +293,10 @@ struct steam_device {
|
|
struct list_head list;
|
|
spinlock_t lock;
|
|
struct hid_device *hdev, *client_hdev;
|
|
- struct mutex mutex;
|
|
+ struct mutex report_mutex;
|
|
bool client_opened;
|
|
struct input_dev __rcu *input;
|
|
+ struct input_dev __rcu *sensors;
|
|
unsigned long quirks;
|
|
struct work_struct work_connect;
|
|
bool connected;
|
|
@@ -134,10 +305,14 @@ struct steam_device {
|
|
struct power_supply __rcu *battery;
|
|
u8 battery_charge;
|
|
u16 voltage;
|
|
- struct delayed_work heartbeat;
|
|
+ struct delayed_work mode_switch;
|
|
+ bool did_mode_switch;
|
|
+ bool gamepad_mode;
|
|
struct work_struct rumble_work;
|
|
u16 rumble_left;
|
|
u16 rumble_right;
|
|
+ unsigned int sensor_timestamp_us;
|
|
+ struct work_struct unregister_work;
|
|
};
|
|
|
|
static int steam_recv_report(struct steam_device *steam,
|
|
@@ -226,13 +401,13 @@ static inline int steam_send_report_byte(struct steam_device *steam, u8 cmd)
|
|
return steam_send_report(steam, &cmd, 1);
|
|
}
|
|
|
|
-static int steam_write_registers(struct steam_device *steam,
|
|
+static int steam_write_settings(struct steam_device *steam,
|
|
/* u8 reg, u16 val */...)
|
|
{
|
|
/* Send: 0x87 len (reg valLo valHi)* */
|
|
u8 reg;
|
|
u16 val;
|
|
- u8 cmd[64] = {STEAM_CMD_WRITE_REGISTER, 0x00};
|
|
+ u8 cmd[64] = {ID_SET_SETTINGS_VALUES, 0x00};
|
|
int ret;
|
|
va_list args;
|
|
|
|
@@ -267,21 +442,26 @@ static int steam_get_serial(struct steam_device *steam)
|
|
* Send: 0xae 0x15 0x01
|
|
* Recv: 0xae 0x15 0x01 serialnumber (10 chars)
|
|
*/
|
|
- int ret;
|
|
- u8 cmd[] = {STEAM_CMD_GET_SERIAL, 0x15, 0x01};
|
|
+ int ret = 0;
|
|
+ u8 cmd[] = {ID_GET_STRING_ATTRIBUTE, 0x15, ATTRIB_STR_UNIT_SERIAL};
|
|
u8 reply[3 + STEAM_SERIAL_LEN + 1];
|
|
|
|
+ mutex_lock(&steam->report_mutex);
|
|
ret = steam_send_report(steam, cmd, sizeof(cmd));
|
|
if (ret < 0)
|
|
- return ret;
|
|
+ goto out;
|
|
ret = steam_recv_report(steam, reply, sizeof(reply));
|
|
if (ret < 0)
|
|
- return ret;
|
|
- if (reply[0] != 0xae || reply[1] != 0x15 || reply[2] != 0x01)
|
|
- return -EIO;
|
|
+ goto out;
|
|
+ if (reply[0] != ID_GET_STRING_ATTRIBUTE || reply[1] != 0x15 || reply[2] != ATTRIB_STR_UNIT_SERIAL) {
|
|
+ ret = -EIO;
|
|
+ goto out;
|
|
+ }
|
|
reply[3 + STEAM_SERIAL_LEN] = 0;
|
|
strscpy(steam->serial_no, reply + 3, sizeof(steam->serial_no));
|
|
- return 0;
|
|
+out:
|
|
+ mutex_unlock(&steam->report_mutex);
|
|
+ return ret;
|
|
}
|
|
|
|
/*
|
|
@@ -291,14 +471,50 @@ static int steam_get_serial(struct steam_device *steam)
|
|
*/
|
|
static inline int steam_request_conn_status(struct steam_device *steam)
|
|
{
|
|
- return steam_send_report_byte(steam, STEAM_CMD_REQUEST_COMM_STATUS);
|
|
+ int ret;
|
|
+ mutex_lock(&steam->report_mutex);
|
|
+ ret = steam_send_report_byte(steam, ID_DONGLE_GET_WIRELESS_STATE);
|
|
+ mutex_unlock(&steam->report_mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Send a haptic pulse to the trackpads
|
|
+ * Duration and interval are measured in microseconds, count is the number
|
|
+ * of pulses to send for duration time with interval microseconds between them
|
|
+ * and gain is measured in decibels, ranging from -24 to +6
|
|
+ */
|
|
+static inline int steam_haptic_pulse(struct steam_device *steam, u8 pad,
|
|
+ u16 duration, u16 interval, u16 count, u8 gain)
|
|
+{
|
|
+ int ret;
|
|
+ u8 report[10] = {ID_TRIGGER_HAPTIC_PULSE, 8};
|
|
+
|
|
+ /* Left and right are swapped on this report for legacy reasons */
|
|
+ if (pad < STEAM_PAD_BOTH)
|
|
+ pad ^= 1;
|
|
+
|
|
+ report[2] = pad;
|
|
+ report[3] = duration & 0xFF;
|
|
+ report[4] = duration >> 8;
|
|
+ report[5] = interval & 0xFF;
|
|
+ report[6] = interval >> 8;
|
|
+ report[7] = count & 0xFF;
|
|
+ report[8] = count >> 8;
|
|
+ report[9] = gain;
|
|
+
|
|
+ mutex_lock(&steam->report_mutex);
|
|
+ ret = steam_send_report(steam, report, sizeof(report));
|
|
+ mutex_unlock(&steam->report_mutex);
|
|
+ return ret;
|
|
}
|
|
|
|
static inline int steam_haptic_rumble(struct steam_device *steam,
|
|
u16 intensity, u16 left_speed, u16 right_speed,
|
|
u8 left_gain, u8 right_gain)
|
|
{
|
|
- u8 report[11] = {STEAM_CMD_HAPTIC_RUMBLE, 9};
|
|
+ int ret;
|
|
+ u8 report[11] = {ID_TRIGGER_RUMBLE_CMD, 9};
|
|
|
|
report[3] = intensity & 0xFF;
|
|
report[4] = intensity >> 8;
|
|
@@ -309,7 +525,10 @@ static inline int steam_haptic_rumble(struct steam_device *steam,
|
|
report[9] = left_gain;
|
|
report[10] = right_gain;
|
|
|
|
- return steam_send_report(steam, report, sizeof(report));
|
|
+ mutex_lock(&steam->report_mutex);
|
|
+ ret = steam_send_report(steam, report, sizeof(report));
|
|
+ mutex_unlock(&steam->report_mutex);
|
|
+ return ret;
|
|
}
|
|
|
|
static void steam_haptic_rumble_cb(struct work_struct *work)
|
|
@@ -335,40 +554,36 @@ static int steam_play_effect(struct input_dev *dev, void *data,
|
|
|
|
static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
|
|
{
|
|
+ if (steam->gamepad_mode)
|
|
+ enable = false;
|
|
+
|
|
if (enable) {
|
|
+ mutex_lock(&steam->report_mutex);
|
|
/* enable esc, enter, cursors */
|
|
- steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MAPPINGS);
|
|
- /* enable mouse */
|
|
- steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MOUSE);
|
|
- steam_write_registers(steam,
|
|
- STEAM_REG_RPAD_MARGIN, 0x01, /* enable margin */
|
|
- 0);
|
|
-
|
|
- cancel_delayed_work_sync(&steam->heartbeat);
|
|
+ steam_send_report_byte(steam, ID_SET_DEFAULT_DIGITAL_MAPPINGS);
|
|
+ /* reset settings */
|
|
+ steam_send_report_byte(steam, ID_LOAD_DEFAULT_SETTINGS);
|
|
+ mutex_unlock(&steam->report_mutex);
|
|
} else {
|
|
+ mutex_lock(&steam->report_mutex);
|
|
/* disable esc, enter, cursor */
|
|
- steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS);
|
|
+ steam_send_report_byte(steam, ID_CLEAR_DIGITAL_MAPPINGS);
|
|
|
|
if (steam->quirks & STEAM_QUIRK_DECK) {
|
|
- steam_write_registers(steam,
|
|
- STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */
|
|
- STEAM_REG_LPAD_MODE, 0x07, /* disable mouse */
|
|
- STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
|
|
- STEAM_REG_LPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
|
|
- STEAM_REG_RPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
|
|
+ steam_write_settings(steam,
|
|
+ SETTING_LEFT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
|
|
+ SETTING_RIGHT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
|
|
+ SETTING_LEFT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable haptic click */
|
|
+ SETTING_RIGHT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable haptic click */
|
|
+ SETTING_STEAM_WATCHDOG_ENABLE, 0, /* disable watchdog that tests if Steam is active */
|
|
0);
|
|
- /*
|
|
- * The Steam Deck has a watchdog that automatically enables
|
|
- * lizard mode if it doesn't see any traffic for too long
|
|
- */
|
|
- if (!work_busy(&steam->heartbeat.work))
|
|
- schedule_delayed_work(&steam->heartbeat, 5 * HZ);
|
|
+ mutex_unlock(&steam->report_mutex);
|
|
} else {
|
|
- steam_write_registers(steam,
|
|
- STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */
|
|
- STEAM_REG_LPAD_MODE, 0x07, /* disable mouse */
|
|
- STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
|
|
+ steam_write_settings(steam,
|
|
+ SETTING_LEFT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
|
|
+ SETTING_RIGHT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
|
|
0);
|
|
+ mutex_unlock(&steam->report_mutex);
|
|
}
|
|
}
|
|
}
|
|
@@ -376,22 +591,38 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
|
|
static int steam_input_open(struct input_dev *dev)
|
|
{
|
|
struct steam_device *steam = input_get_drvdata(dev);
|
|
+ unsigned long flags;
|
|
+ bool set_lizard_mode;
|
|
+
|
|
+ /*
|
|
+ * Disabling lizard mode automatically is only done on the Steam
|
|
+ * Controller. On the Steam Deck, this is toggled manually by holding
|
|
+ * the options button instead, handled by steam_mode_switch_cb.
|
|
+ */
|
|
+ if (!(steam->quirks & STEAM_QUIRK_DECK)) {
|
|
+ spin_lock_irqsave(&steam->lock, flags);
|
|
+ set_lizard_mode = !steam->client_opened && lizard_mode;
|
|
+ spin_unlock_irqrestore(&steam->lock, flags);
|
|
+ if (set_lizard_mode)
|
|
+ steam_set_lizard_mode(steam, false);
|
|
+ }
|
|
|
|
- mutex_lock(&steam->mutex);
|
|
- if (!steam->client_opened && lizard_mode)
|
|
- steam_set_lizard_mode(steam, false);
|
|
- mutex_unlock(&steam->mutex);
|
|
return 0;
|
|
}
|
|
|
|
static void steam_input_close(struct input_dev *dev)
|
|
{
|
|
struct steam_device *steam = input_get_drvdata(dev);
|
|
+ unsigned long flags;
|
|
+ bool set_lizard_mode;
|
|
|
|
- mutex_lock(&steam->mutex);
|
|
- if (!steam->client_opened && lizard_mode)
|
|
- steam_set_lizard_mode(steam, true);
|
|
- mutex_unlock(&steam->mutex);
|
|
+ if (!(steam->quirks & STEAM_QUIRK_DECK)) {
|
|
+ spin_lock_irqsave(&steam->lock, flags);
|
|
+ set_lizard_mode = !steam->client_opened && lizard_mode;
|
|
+ spin_unlock_irqrestore(&steam->lock, flags);
|
|
+ if (set_lizard_mode)
|
|
+ steam_set_lizard_mode(steam, true);
|
|
+ }
|
|
}
|
|
|
|
static enum power_supply_property steam_battery_props[] = {
|
|
@@ -604,6 +835,74 @@ static int steam_input_register(struct steam_device *steam)
|
|
return ret;
|
|
}
|
|
|
|
+static int steam_sensors_register(struct steam_device *steam)
|
|
+{
|
|
+ struct hid_device *hdev = steam->hdev;
|
|
+ struct input_dev *sensors;
|
|
+ int ret;
|
|
+
|
|
+ if (!(steam->quirks & STEAM_QUIRK_DECK))
|
|
+ return 0;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ sensors = rcu_dereference(steam->sensors);
|
|
+ rcu_read_unlock();
|
|
+ if (sensors) {
|
|
+ dbg_hid("%s: already connected\n", __func__);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ sensors = input_allocate_device();
|
|
+ if (!sensors)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ input_set_drvdata(sensors, steam);
|
|
+ sensors->dev.parent = &hdev->dev;
|
|
+
|
|
+ sensors->name = "Steam Deck Motion Sensors";
|
|
+ sensors->phys = hdev->phys;
|
|
+ sensors->uniq = steam->serial_no;
|
|
+ sensors->id.bustype = hdev->bus;
|
|
+ sensors->id.vendor = hdev->vendor;
|
|
+ sensors->id.product = hdev->product;
|
|
+ sensors->id.version = hdev->version;
|
|
+
|
|
+ __set_bit(INPUT_PROP_ACCELEROMETER, sensors->propbit);
|
|
+ __set_bit(EV_MSC, sensors->evbit);
|
|
+ __set_bit(MSC_TIMESTAMP, sensors->mscbit);
|
|
+
|
|
+ input_set_abs_params(sensors, ABS_X, -STEAM_DECK_ACCEL_RANGE,
|
|
+ STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_FUZZ, 0);
|
|
+ input_set_abs_params(sensors, ABS_Y, -STEAM_DECK_ACCEL_RANGE,
|
|
+ STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_FUZZ, 0);
|
|
+ input_set_abs_params(sensors, ABS_Z, -STEAM_DECK_ACCEL_RANGE,
|
|
+ STEAM_DECK_ACCEL_RANGE, STEAM_DECK_ACCEL_FUZZ, 0);
|
|
+ input_abs_set_res(sensors, ABS_X, STEAM_DECK_ACCEL_RES_PER_G);
|
|
+ input_abs_set_res(sensors, ABS_Y, STEAM_DECK_ACCEL_RES_PER_G);
|
|
+ input_abs_set_res(sensors, ABS_Z, STEAM_DECK_ACCEL_RES_PER_G);
|
|
+
|
|
+ input_set_abs_params(sensors, ABS_RX, -STEAM_DECK_GYRO_RANGE,
|
|
+ STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_FUZZ, 0);
|
|
+ input_set_abs_params(sensors, ABS_RY, -STEAM_DECK_GYRO_RANGE,
|
|
+ STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_FUZZ, 0);
|
|
+ input_set_abs_params(sensors, ABS_RZ, -STEAM_DECK_GYRO_RANGE,
|
|
+ STEAM_DECK_GYRO_RANGE, STEAM_DECK_GYRO_FUZZ, 0);
|
|
+ input_abs_set_res(sensors, ABS_RX, STEAM_DECK_GYRO_RES_PER_DPS);
|
|
+ input_abs_set_res(sensors, ABS_RY, STEAM_DECK_GYRO_RES_PER_DPS);
|
|
+ input_abs_set_res(sensors, ABS_RZ, STEAM_DECK_GYRO_RES_PER_DPS);
|
|
+
|
|
+ ret = input_register_device(sensors);
|
|
+ if (ret)
|
|
+ goto sensors_register_fail;
|
|
+
|
|
+ rcu_assign_pointer(steam->sensors, sensors);
|
|
+ return 0;
|
|
+
|
|
+sensors_register_fail:
|
|
+ input_free_device(sensors);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static void steam_input_unregister(struct steam_device *steam)
|
|
{
|
|
struct input_dev *input;
|
|
@@ -617,6 +916,24 @@ static void steam_input_unregister(struct steam_device *steam)
|
|
input_unregister_device(input);
|
|
}
|
|
|
|
+static void steam_sensors_unregister(struct steam_device *steam)
|
|
+{
|
|
+ struct input_dev *sensors;
|
|
+
|
|
+ if (!(steam->quirks & STEAM_QUIRK_DECK))
|
|
+ return;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ sensors = rcu_dereference(steam->sensors);
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ if (!sensors)
|
|
+ return;
|
|
+ RCU_INIT_POINTER(steam->sensors, NULL);
|
|
+ synchronize_rcu();
|
|
+ input_unregister_device(sensors);
|
|
+}
|
|
+
|
|
static void steam_battery_unregister(struct steam_device *steam)
|
|
{
|
|
struct power_supply *battery;
|
|
@@ -636,6 +953,7 @@ static int steam_register(struct steam_device *steam)
|
|
{
|
|
int ret;
|
|
bool client_opened;
|
|
+ unsigned long flags;
|
|
|
|
/*
|
|
* This function can be called several times in a row with the
|
|
@@ -648,11 +966,9 @@ static int steam_register(struct steam_device *steam)
|
|
* Unlikely, but getting the serial could fail, and it is not so
|
|
* important, so make up a serial number and go on.
|
|
*/
|
|
- mutex_lock(&steam->mutex);
|
|
if (steam_get_serial(steam) < 0)
|
|
strscpy(steam->serial_no, "XXXXXXXXXX",
|
|
sizeof(steam->serial_no));
|
|
- mutex_unlock(&steam->mutex);
|
|
|
|
hid_info(steam->hdev, "Steam Controller '%s' connected",
|
|
steam->serial_no);
|
|
@@ -667,23 +983,31 @@ static int steam_register(struct steam_device *steam)
|
|
mutex_unlock(&steam_devices_lock);
|
|
}
|
|
|
|
- mutex_lock(&steam->mutex);
|
|
+ spin_lock_irqsave(&steam->lock, flags);
|
|
client_opened = steam->client_opened;
|
|
- if (!client_opened)
|
|
- steam_set_lizard_mode(steam, lizard_mode);
|
|
- mutex_unlock(&steam->mutex);
|
|
+ spin_unlock_irqrestore(&steam->lock, flags);
|
|
|
|
- if (!client_opened)
|
|
+ if (!client_opened) {
|
|
+ steam_set_lizard_mode(steam, lizard_mode);
|
|
ret = steam_input_register(steam);
|
|
- else
|
|
- ret = 0;
|
|
+ if (ret != 0)
|
|
+ goto steam_register_input_fail;
|
|
+ ret = steam_sensors_register(steam);
|
|
+ if (ret != 0)
|
|
+ goto steam_register_sensors_fail;
|
|
+ }
|
|
+ return 0;
|
|
|
|
+steam_register_sensors_fail:
|
|
+ steam_input_unregister(steam);
|
|
+steam_register_input_fail:
|
|
return ret;
|
|
}
|
|
|
|
static void steam_unregister(struct steam_device *steam)
|
|
{
|
|
steam_battery_unregister(steam);
|
|
+ steam_sensors_unregister(steam);
|
|
steam_input_unregister(steam);
|
|
if (steam->serial_no[0]) {
|
|
hid_info(steam->hdev, "Steam Controller '%s' disconnected",
|
|
@@ -719,6 +1043,59 @@ static void steam_work_connect_cb(struct work_struct *work)
|
|
}
|
|
}
|
|
|
|
+static void steam_mode_switch_cb(struct work_struct *work)
|
|
+{
|
|
+ struct steam_device *steam = container_of(to_delayed_work(work),
|
|
+ struct steam_device, mode_switch);
|
|
+ unsigned long flags;
|
|
+ bool client_opened;
|
|
+ steam->gamepad_mode = !steam->gamepad_mode;
|
|
+ if (!lizard_mode)
|
|
+ return;
|
|
+
|
|
+ if (steam->gamepad_mode)
|
|
+ steam_set_lizard_mode(steam, false);
|
|
+ else {
|
|
+ spin_lock_irqsave(&steam->lock, flags);
|
|
+ client_opened = steam->client_opened;
|
|
+ spin_unlock_irqrestore(&steam->lock, flags);
|
|
+ if (!client_opened)
|
|
+ steam_set_lizard_mode(steam, lizard_mode);
|
|
+ }
|
|
+
|
|
+ steam_haptic_pulse(steam, STEAM_PAD_RIGHT, 0x190, 0, 1, 0);
|
|
+ if (steam->gamepad_mode) {
|
|
+ steam_haptic_pulse(steam, STEAM_PAD_LEFT, 0x14D, 0x14D, 0x2D, 0);
|
|
+ } else {
|
|
+ steam_haptic_pulse(steam, STEAM_PAD_LEFT, 0x1F4, 0x1F4, 0x1E, 0);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void steam_work_unregister_cb(struct work_struct *work)
|
|
+{
|
|
+ struct steam_device *steam = container_of(work, struct steam_device,
|
|
+ unregister_work);
|
|
+ unsigned long flags;
|
|
+ bool connected;
|
|
+ bool opened;
|
|
+
|
|
+ spin_lock_irqsave(&steam->lock, flags);
|
|
+ opened = steam->client_opened;
|
|
+ connected = steam->connected;
|
|
+ spin_unlock_irqrestore(&steam->lock, flags);
|
|
+
|
|
+ if (connected) {
|
|
+ if (opened) {
|
|
+ steam_sensors_unregister(steam);
|
|
+ steam_input_unregister(steam);
|
|
+ } else {
|
|
+ steam_set_lizard_mode(steam, lizard_mode);
|
|
+ steam_input_register(steam);
|
|
+ steam_sensors_register(steam);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
static bool steam_is_valve_interface(struct hid_device *hdev)
|
|
{
|
|
struct hid_report_enum *rep_enum;
|
|
@@ -738,22 +1115,6 @@ static bool steam_is_valve_interface(struct hid_device *hdev)
|
|
return !list_empty(&rep_enum->report_list);
|
|
}
|
|
|
|
-static void steam_lizard_mode_heartbeat(struct work_struct *work)
|
|
-{
|
|
- struct steam_device *steam = container_of(work, struct steam_device,
|
|
- heartbeat.work);
|
|
-
|
|
- mutex_lock(&steam->mutex);
|
|
- if (!steam->client_opened && steam->client_hdev) {
|
|
- steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS);
|
|
- steam_write_registers(steam,
|
|
- STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
|
|
- 0);
|
|
- schedule_delayed_work(&steam->heartbeat, 5 * HZ);
|
|
- }
|
|
- mutex_unlock(&steam->mutex);
|
|
-}
|
|
-
|
|
static int steam_client_ll_parse(struct hid_device *hdev)
|
|
{
|
|
struct steam_device *steam = hdev->driver_data;
|
|
@@ -774,12 +1135,13 @@ static void steam_client_ll_stop(struct hid_device *hdev)
|
|
static int steam_client_ll_open(struct hid_device *hdev)
|
|
{
|
|
struct steam_device *steam = hdev->driver_data;
|
|
+ unsigned long flags;
|
|
|
|
- mutex_lock(&steam->mutex);
|
|
+ spin_lock_irqsave(&steam->lock, flags);
|
|
steam->client_opened = true;
|
|
- mutex_unlock(&steam->mutex);
|
|
+ spin_unlock_irqrestore(&steam->lock, flags);
|
|
|
|
- steam_input_unregister(steam);
|
|
+ schedule_work(&steam->unregister_work);
|
|
|
|
return 0;
|
|
}
|
|
@@ -792,17 +1154,11 @@ static void steam_client_ll_close(struct hid_device *hdev)
|
|
bool connected;
|
|
|
|
spin_lock_irqsave(&steam->lock, flags);
|
|
- connected = steam->connected;
|
|
- spin_unlock_irqrestore(&steam->lock, flags);
|
|
-
|
|
- mutex_lock(&steam->mutex);
|
|
steam->client_opened = false;
|
|
- if (connected)
|
|
- steam_set_lizard_mode(steam, lizard_mode);
|
|
- mutex_unlock(&steam->mutex);
|
|
+ connected = steam->connected && !steam->client_opened;
|
|
+ spin_unlock_irqrestore(&steam->lock, flags);
|
|
|
|
- if (connected)
|
|
- steam_input_register(steam);
|
|
+ schedule_work(&steam->unregister_work);
|
|
}
|
|
|
|
static int steam_client_ll_raw_request(struct hid_device *hdev,
|
|
@@ -881,26 +1237,20 @@ static int steam_probe(struct hid_device *hdev,
|
|
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
|
|
|
|
steam = devm_kzalloc(&hdev->dev, sizeof(*steam), GFP_KERNEL);
|
|
- if (!steam) {
|
|
- ret = -ENOMEM;
|
|
- goto steam_alloc_fail;
|
|
- }
|
|
+ if (!steam)
|
|
+ return -ENOMEM;
|
|
+
|
|
steam->hdev = hdev;
|
|
hid_set_drvdata(hdev, steam);
|
|
spin_lock_init(&steam->lock);
|
|
- mutex_init(&steam->mutex);
|
|
+ mutex_init(&steam->report_mutex);
|
|
steam->quirks = id->driver_data;
|
|
INIT_WORK(&steam->work_connect, steam_work_connect_cb);
|
|
+ INIT_DELAYED_WORK(&steam->mode_switch, steam_mode_switch_cb);
|
|
INIT_LIST_HEAD(&steam->list);
|
|
- INIT_DEFERRABLE_WORK(&steam->heartbeat, steam_lizard_mode_heartbeat);
|
|
INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
|
|
-
|
|
- steam->client_hdev = steam_create_client_hid(hdev);
|
|
- if (IS_ERR(steam->client_hdev)) {
|
|
- ret = PTR_ERR(steam->client_hdev);
|
|
- goto client_hdev_fail;
|
|
- }
|
|
- steam->client_hdev->driver_data = steam;
|
|
+ steam->sensor_timestamp_us = 0;
|
|
+ INIT_WORK(&steam->unregister_work, steam_work_unregister_cb);
|
|
|
|
/*
|
|
* With the real steam controller interface, do not connect hidraw.
|
|
@@ -908,18 +1258,14 @@ static int steam_probe(struct hid_device *hdev,
|
|
*/
|
|
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDRAW);
|
|
if (ret)
|
|
- goto hid_hw_start_fail;
|
|
-
|
|
- ret = hid_add_device(steam->client_hdev);
|
|
- if (ret)
|
|
- goto client_hdev_add_fail;
|
|
+ goto err_cancel_work;
|
|
|
|
ret = hid_hw_open(hdev);
|
|
if (ret) {
|
|
hid_err(hdev,
|
|
"%s:hid_hw_open\n",
|
|
__func__);
|
|
- goto hid_hw_open_fail;
|
|
+ goto err_hw_stop;
|
|
}
|
|
|
|
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
|
|
@@ -935,25 +1281,38 @@ static int steam_probe(struct hid_device *hdev,
|
|
hid_err(hdev,
|
|
"%s:steam_register failed with error %d\n",
|
|
__func__, ret);
|
|
- goto input_register_fail;
|
|
+ goto err_hw_close;
|
|
}
|
|
}
|
|
|
|
+ steam->client_hdev = steam_create_client_hid(hdev);
|
|
+ if (IS_ERR(steam->client_hdev)) {
|
|
+ ret = PTR_ERR(steam->client_hdev);
|
|
+ goto err_stream_unregister;
|
|
+ }
|
|
+ steam->client_hdev->driver_data = steam;
|
|
+
|
|
+ ret = hid_add_device(steam->client_hdev);
|
|
+ if (ret)
|
|
+ goto err_destroy;
|
|
+
|
|
return 0;
|
|
|
|
-input_register_fail:
|
|
-hid_hw_open_fail:
|
|
-client_hdev_add_fail:
|
|
- hid_hw_stop(hdev);
|
|
-hid_hw_start_fail:
|
|
+err_destroy:
|
|
hid_destroy_device(steam->client_hdev);
|
|
-client_hdev_fail:
|
|
+err_stream_unregister:
|
|
+ if (steam->connected)
|
|
+ steam_unregister(steam);
|
|
+err_hw_close:
|
|
+ hid_hw_close(hdev);
|
|
+err_hw_stop:
|
|
+ hid_hw_stop(hdev);
|
|
+err_cancel_work:
|
|
cancel_work_sync(&steam->work_connect);
|
|
- cancel_delayed_work_sync(&steam->heartbeat);
|
|
+ cancel_delayed_work_sync(&steam->mode_switch);
|
|
cancel_work_sync(&steam->rumble_work);
|
|
-steam_alloc_fail:
|
|
- hid_err(hdev, "%s: failed with error %d\n",
|
|
- __func__, ret);
|
|
+ cancel_work_sync(&steam->unregister_work);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -966,13 +1325,13 @@ static void steam_remove(struct hid_device *hdev)
|
|
return;
|
|
}
|
|
|
|
+ cancel_delayed_work_sync(&steam->mode_switch);
|
|
+ cancel_work_sync(&steam->work_connect);
|
|
+ cancel_work_sync(&steam->rumble_work);
|
|
+ cancel_work_sync(&steam->unregister_work);
|
|
hid_destroy_device(steam->client_hdev);
|
|
- mutex_lock(&steam->mutex);
|
|
steam->client_hdev = NULL;
|
|
steam->client_opened = false;
|
|
- cancel_delayed_work_sync(&steam->heartbeat);
|
|
- mutex_unlock(&steam->mutex);
|
|
- cancel_work_sync(&steam->work_connect);
|
|
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
|
|
hid_info(hdev, "Steam wireless receiver disconnected");
|
|
}
|
|
@@ -1154,12 +1513,12 @@ static void steam_do_input_event(struct steam_device *steam,
|
|
* 18-19 | s16 | ABS_HAT0Y | left-pad Y value
|
|
* 20-21 | s16 | ABS_HAT1X | right-pad X value
|
|
* 22-23 | s16 | ABS_HAT1Y | right-pad Y value
|
|
- * 24-25 | s16 | -- | accelerometer X value
|
|
- * 26-27 | s16 | -- | accelerometer Y value
|
|
- * 28-29 | s16 | -- | accelerometer Z value
|
|
- * 30-31 | s16 | -- | gyro X value
|
|
- * 32-33 | s16 | -- | gyro Y value
|
|
- * 34-35 | s16 | -- | gyro Z value
|
|
+ * 24-25 | s16 | IMU ABS_X | accelerometer X value
|
|
+ * 26-27 | s16 | IMU ABS_Z | accelerometer Y value
|
|
+ * 28-29 | s16 | IMU ABS_Y | accelerometer Z value
|
|
+ * 30-31 | s16 | IMU ABS_RX | gyro X value
|
|
+ * 32-33 | s16 | IMU ABS_RZ | gyro Y value
|
|
+ * 34-35 | s16 | IMU ABS_RY | gyro Z value
|
|
* 36-37 | s16 | -- | quaternion W value
|
|
* 38-39 | s16 | -- | quaternion X value
|
|
* 40-41 | s16 | -- | quaternion Y value
|
|
@@ -1254,6 +1613,17 @@ static void steam_do_deck_input_event(struct steam_device *steam,
|
|
b13 = data[13];
|
|
b14 = data[14];
|
|
|
|
+ if (!(b9 & BIT(6)) && steam->did_mode_switch) {
|
|
+ steam->did_mode_switch = false;
|
|
+ cancel_delayed_work(&steam->mode_switch);
|
|
+ } else if (!steam->client_opened && (b9 & BIT(6)) && !steam->did_mode_switch) {
|
|
+ steam->did_mode_switch = true;
|
|
+ schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100);
|
|
+ }
|
|
+
|
|
+ if (!steam->gamepad_mode)
|
|
+ return;
|
|
+
|
|
lpad_touched = b10 & BIT(3);
|
|
rpad_touched = b10 & BIT(4);
|
|
|
|
@@ -1309,6 +1679,32 @@ static void steam_do_deck_input_event(struct steam_device *steam,
|
|
input_sync(input);
|
|
}
|
|
|
|
+static void steam_do_deck_sensors_event(struct steam_device *steam,
|
|
+ struct input_dev *sensors, u8 *data)
|
|
+{
|
|
+ /*
|
|
+ * The deck input report is received every 4 ms on average,
|
|
+ * with a jitter of +/- 4 ms even though the USB descriptor claims
|
|
+ * that it uses 1 kHz.
|
|
+ * Since the HID report does not include a sensor timestamp,
|
|
+ * use a fixed increment here.
|
|
+ */
|
|
+ steam->sensor_timestamp_us += 4000;
|
|
+
|
|
+ if (!steam->gamepad_mode)
|
|
+ return;
|
|
+
|
|
+ input_event(sensors, EV_MSC, MSC_TIMESTAMP, steam->sensor_timestamp_us);
|
|
+ input_report_abs(sensors, ABS_X, steam_le16(data + 24));
|
|
+ input_report_abs(sensors, ABS_Z, -steam_le16(data + 26));
|
|
+ input_report_abs(sensors, ABS_Y, steam_le16(data + 28));
|
|
+ input_report_abs(sensors, ABS_RX, steam_le16(data + 30));
|
|
+ input_report_abs(sensors, ABS_RZ, -steam_le16(data + 32));
|
|
+ input_report_abs(sensors, ABS_RY, steam_le16(data + 34));
|
|
+
|
|
+ input_sync(sensors);
|
|
+}
|
|
+
|
|
/*
|
|
* The size for this message payload is 11.
|
|
* The known values are:
|
|
@@ -1346,6 +1742,7 @@ static int steam_raw_event(struct hid_device *hdev,
|
|
{
|
|
struct steam_device *steam = hid_get_drvdata(hdev);
|
|
struct input_dev *input;
|
|
+ struct input_dev *sensors;
|
|
struct power_supply *battery;
|
|
|
|
if (!steam)
|
|
@@ -1375,7 +1772,7 @@ static int steam_raw_event(struct hid_device *hdev,
|
|
return 0;
|
|
|
|
switch (data[2]) {
|
|
- case STEAM_EV_INPUT_DATA:
|
|
+ case ID_CONTROLLER_STATE:
|
|
if (steam->client_opened)
|
|
return 0;
|
|
rcu_read_lock();
|
|
@@ -1384,16 +1781,19 @@ static int steam_raw_event(struct hid_device *hdev,
|
|
steam_do_input_event(steam, input, data);
|
|
rcu_read_unlock();
|
|
break;
|
|
- case STEAM_EV_DECK_INPUT_DATA:
|
|
+ case ID_CONTROLLER_DECK_STATE:
|
|
if (steam->client_opened)
|
|
return 0;
|
|
rcu_read_lock();
|
|
input = rcu_dereference(steam->input);
|
|
if (likely(input))
|
|
steam_do_deck_input_event(steam, input, data);
|
|
+ sensors = rcu_dereference(steam->sensors);
|
|
+ if (likely(sensors))
|
|
+ steam_do_deck_sensors_event(steam, sensors, data);
|
|
rcu_read_unlock();
|
|
break;
|
|
- case STEAM_EV_CONNECT:
|
|
+ case ID_CONTROLLER_WIRELESS:
|
|
/*
|
|
* The payload of this event is a single byte:
|
|
* 0x01: disconnected.
|
|
@@ -1408,7 +1808,7 @@ static int steam_raw_event(struct hid_device *hdev,
|
|
break;
|
|
}
|
|
break;
|
|
- case STEAM_EV_BATTERY:
|
|
+ case ID_CONTROLLER_STATUS:
|
|
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
|
|
rcu_read_lock();
|
|
battery = rcu_dereference(steam->battery);
|
|
@@ -1439,10 +1839,8 @@ static int steam_param_set_lizard_mode(const char *val,
|
|
|
|
mutex_lock(&steam_devices_lock);
|
|
list_for_each_entry(steam, &steam_devices, list) {
|
|
- mutex_lock(&steam->mutex);
|
|
if (!steam->client_opened)
|
|
steam_set_lizard_mode(steam, lizard_mode);
|
|
- mutex_unlock(&steam->mutex);
|
|
}
|
|
mutex_unlock(&steam_devices_lock);
|
|
return 0;
|
|
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
|
|
index 6c3e758bbb09e3..3b81468a1df297 100644
|
|
--- a/drivers/hid/hid-thrustmaster.c
|
|
+++ b/drivers/hid/hid-thrustmaster.c
|
|
@@ -171,7 +171,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
|
|
b_ep = ep->desc.bEndpointAddress;
|
|
|
|
/* Are the expected endpoints present? */
|
|
- u8 ep_addr[1] = {b_ep};
|
|
+ u8 ep_addr[2] = {b_ep, 0};
|
|
|
|
if (!usb_check_int_endpoints(usbif, ep_addr)) {
|
|
hid_err(hdev, "Unexpected non-int endpoint\n");
|
|
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
|
|
index 15ee9208111879..924940ca9de0a0 100644
|
|
--- a/drivers/infiniband/hw/efa/efa_main.c
|
|
+++ b/drivers/infiniband/hw/efa/efa_main.c
|
|
@@ -452,7 +452,6 @@ static void efa_ib_device_remove(struct efa_dev *dev)
|
|
ibdev_info(&dev->ibdev, "Unregister ib device\n");
|
|
ib_unregister_device(&dev->ibdev);
|
|
efa_destroy_eqs(dev);
|
|
- efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
|
|
efa_release_doorbell_bar(dev);
|
|
}
|
|
|
|
@@ -623,12 +622,14 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
-static void efa_remove_device(struct pci_dev *pdev)
|
|
+static void efa_remove_device(struct pci_dev *pdev,
|
|
+ enum efa_regs_reset_reason_types reset_reason)
|
|
{
|
|
struct efa_dev *dev = pci_get_drvdata(pdev);
|
|
struct efa_com_dev *edev;
|
|
|
|
edev = &dev->edev;
|
|
+ efa_com_dev_reset(edev, reset_reason);
|
|
efa_com_admin_destroy(edev);
|
|
efa_free_irq(dev, &dev->admin_irq);
|
|
efa_disable_msix(dev);
|
|
@@ -656,7 +657,7 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
return 0;
|
|
|
|
err_remove_device:
|
|
- efa_remove_device(pdev);
|
|
+ efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR);
|
|
return err;
|
|
}
|
|
|
|
@@ -665,7 +666,7 @@ static void efa_remove(struct pci_dev *pdev)
|
|
struct efa_dev *dev = pci_get_drvdata(pdev);
|
|
|
|
efa_ib_device_remove(dev);
|
|
- efa_remove_device(pdev);
|
|
+ efa_remove_device(pdev, EFA_REGS_RESET_NORMAL);
|
|
}
|
|
|
|
static struct pci_driver efa_pci_driver = {
|
|
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
|
|
index ba63076cd8f2b2..2085b1705f144f 100644
|
|
--- a/drivers/md/md-bitmap.c
|
|
+++ b/drivers/md/md-bitmap.c
|
|
@@ -1465,22 +1465,12 @@ __acquires(bitmap->lock)
|
|
&(bitmap->bp[page].map[pageoff]);
|
|
}
|
|
|
|
-int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
|
|
+int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
|
|
+ unsigned long sectors)
|
|
{
|
|
if (!bitmap)
|
|
return 0;
|
|
|
|
- if (behind) {
|
|
- int bw;
|
|
- atomic_inc(&bitmap->behind_writes);
|
|
- bw = atomic_read(&bitmap->behind_writes);
|
|
- if (bw > bitmap->behind_writes_used)
|
|
- bitmap->behind_writes_used = bw;
|
|
-
|
|
- pr_debug("inc write-behind count %d/%lu\n",
|
|
- bw, bitmap->mddev->bitmap_info.max_write_behind);
|
|
- }
|
|
-
|
|
while (sectors) {
|
|
sector_t blocks;
|
|
bitmap_counter_t *bmc;
|
|
@@ -1527,20 +1517,12 @@ int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long s
|
|
}
|
|
return 0;
|
|
}
|
|
-EXPORT_SYMBOL(md_bitmap_startwrite);
|
|
|
|
void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
|
|
- unsigned long sectors, int success, int behind)
|
|
+ unsigned long sectors)
|
|
{
|
|
if (!bitmap)
|
|
return;
|
|
- if (behind) {
|
|
- if (atomic_dec_and_test(&bitmap->behind_writes))
|
|
- wake_up(&bitmap->behind_wait);
|
|
- pr_debug("dec write-behind count %d/%lu\n",
|
|
- atomic_read(&bitmap->behind_writes),
|
|
- bitmap->mddev->bitmap_info.max_write_behind);
|
|
- }
|
|
|
|
while (sectors) {
|
|
sector_t blocks;
|
|
@@ -1554,15 +1536,16 @@ void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
|
|
return;
|
|
}
|
|
|
|
- if (success && !bitmap->mddev->degraded &&
|
|
- bitmap->events_cleared < bitmap->mddev->events) {
|
|
- bitmap->events_cleared = bitmap->mddev->events;
|
|
- bitmap->need_sync = 1;
|
|
- sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
|
|
- }
|
|
-
|
|
- if (!success && !NEEDED(*bmc))
|
|
+ if (!bitmap->mddev->degraded) {
|
|
+ if (bitmap->events_cleared < bitmap->mddev->events) {
|
|
+ bitmap->events_cleared = bitmap->mddev->events;
|
|
+ bitmap->need_sync = 1;
|
|
+ sysfs_notify_dirent_safe(
|
|
+ bitmap->sysfs_can_clear);
|
|
+ }
|
|
+ } else if (!NEEDED(*bmc)) {
|
|
*bmc |= NEEDED_MASK;
|
|
+ }
|
|
|
|
if (COUNTER(*bmc) == COUNTER_MAX)
|
|
wake_up(&bitmap->overflow_wait);
|
|
@@ -1580,7 +1563,6 @@ void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
|
|
sectors = 0;
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL(md_bitmap_endwrite);
|
|
|
|
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
|
|
int degraded)
|
|
@@ -1842,6 +1824,39 @@ void md_bitmap_free(struct bitmap *bitmap)
|
|
}
|
|
EXPORT_SYMBOL(md_bitmap_free);
|
|
|
|
+void md_bitmap_start_behind_write(struct mddev *mddev)
|
|
+{
|
|
+ struct bitmap *bitmap = mddev->bitmap;
|
|
+ int bw;
|
|
+
|
|
+ if (!bitmap)
|
|
+ return;
|
|
+
|
|
+ atomic_inc(&bitmap->behind_writes);
|
|
+ bw = atomic_read(&bitmap->behind_writes);
|
|
+ if (bw > bitmap->behind_writes_used)
|
|
+ bitmap->behind_writes_used = bw;
|
|
+
|
|
+ pr_debug("inc write-behind count %d/%lu\n",
|
|
+ bw, bitmap->mddev->bitmap_info.max_write_behind);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(md_bitmap_start_behind_write);
|
|
+
|
|
+void md_bitmap_end_behind_write(struct mddev *mddev)
|
|
+{
|
|
+ struct bitmap *bitmap = mddev->bitmap;
|
|
+
|
|
+ if (!bitmap)
|
|
+ return;
|
|
+
|
|
+ if (atomic_dec_and_test(&bitmap->behind_writes))
|
|
+ wake_up(&bitmap->behind_wait);
|
|
+ pr_debug("dec write-behind count %d/%lu\n",
|
|
+ atomic_read(&bitmap->behind_writes),
|
|
+ bitmap->mddev->bitmap_info.max_write_behind);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(md_bitmap_end_behind_write);
|
|
+
|
|
void md_bitmap_wait_behind_writes(struct mddev *mddev)
|
|
{
|
|
struct bitmap *bitmap = mddev->bitmap;
|
|
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
|
|
index bb9eb418780a62..8b89e260a93b71 100644
|
|
--- a/drivers/md/md-bitmap.h
|
|
+++ b/drivers/md/md-bitmap.h
|
|
@@ -253,9 +253,11 @@ void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long
|
|
|
|
/* these are exported */
|
|
int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
|
|
- unsigned long sectors, int behind);
|
|
+ unsigned long sectors);
|
|
void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
|
|
- unsigned long sectors, int success, int behind);
|
|
+ unsigned long sectors);
|
|
+void md_bitmap_start_behind_write(struct mddev *mddev);
|
|
+void md_bitmap_end_behind_write(struct mddev *mddev);
|
|
int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
|
|
void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
|
|
void md_bitmap_close_sync(struct bitmap *bitmap);
|
|
diff --git a/drivers/md/md.c b/drivers/md/md.c
|
|
index d1f6770c5cc094..9bc19a5a4119bd 100644
|
|
--- a/drivers/md/md.c
|
|
+++ b/drivers/md/md.c
|
|
@@ -8713,12 +8713,32 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
|
|
}
|
|
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
|
|
|
|
+static void md_bitmap_start(struct mddev *mddev,
|
|
+ struct md_io_clone *md_io_clone)
|
|
+{
|
|
+ if (mddev->pers->bitmap_sector)
|
|
+ mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
|
|
+ &md_io_clone->sectors);
|
|
+
|
|
+ md_bitmap_startwrite(mddev->bitmap, md_io_clone->offset,
|
|
+ md_io_clone->sectors);
|
|
+}
|
|
+
|
|
+static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
|
|
+{
|
|
+ md_bitmap_endwrite(mddev->bitmap, md_io_clone->offset,
|
|
+ md_io_clone->sectors);
|
|
+}
|
|
+
|
|
static void md_end_clone_io(struct bio *bio)
|
|
{
|
|
struct md_io_clone *md_io_clone = bio->bi_private;
|
|
struct bio *orig_bio = md_io_clone->orig_bio;
|
|
struct mddev *mddev = md_io_clone->mddev;
|
|
|
|
+ if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
|
|
+ md_bitmap_end(mddev, md_io_clone);
|
|
+
|
|
if (bio->bi_status && !orig_bio->bi_status)
|
|
orig_bio->bi_status = bio->bi_status;
|
|
|
|
@@ -8743,6 +8763,12 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio)
|
|
if (blk_queue_io_stat(bdev->bd_disk->queue))
|
|
md_io_clone->start_time = bio_start_io_acct(*bio);
|
|
|
|
+ if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
|
|
+ md_io_clone->offset = (*bio)->bi_iter.bi_sector;
|
|
+ md_io_clone->sectors = bio_sectors(*bio);
|
|
+ md_bitmap_start(mddev, md_io_clone);
|
|
+ }
|
|
+
|
|
clone->bi_end_io = md_end_clone_io;
|
|
clone->bi_private = md_io_clone;
|
|
*bio = clone;
|
|
diff --git a/drivers/md/md.h b/drivers/md/md.h
|
|
index 7c9c13abd7cac0..f29fa8650cd0f0 100644
|
|
--- a/drivers/md/md.h
|
|
+++ b/drivers/md/md.h
|
|
@@ -661,6 +661,9 @@ struct md_personality
|
|
void *(*takeover) (struct mddev *mddev);
|
|
/* Changes the consistency policy of an active array. */
|
|
int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
|
|
+ /* convert io ranges from array to bitmap */
|
|
+ void (*bitmap_sector)(struct mddev *mddev, sector_t *offset,
|
|
+ unsigned long *sectors);
|
|
};
|
|
|
|
struct md_sysfs_entry {
|
|
@@ -743,6 +746,8 @@ struct md_io_clone {
|
|
struct mddev *mddev;
|
|
struct bio *orig_bio;
|
|
unsigned long start_time;
|
|
+ sector_t offset;
|
|
+ unsigned long sectors;
|
|
struct bio bio_clone;
|
|
};
|
|
|
|
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
|
|
index cc02e7ec72c08c..65309da1dca340 100644
|
|
--- a/drivers/md/raid1.c
|
|
+++ b/drivers/md/raid1.c
|
|
@@ -419,11 +419,8 @@ static void close_write(struct r1bio *r1_bio)
|
|
bio_put(r1_bio->behind_master_bio);
|
|
r1_bio->behind_master_bio = NULL;
|
|
}
|
|
- /* clear the bitmap if all writes complete successfully */
|
|
- md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
|
|
- r1_bio->sectors,
|
|
- !test_bit(R1BIO_Degraded, &r1_bio->state),
|
|
- test_bit(R1BIO_BehindIO, &r1_bio->state));
|
|
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
|
|
+ md_bitmap_end_behind_write(r1_bio->mddev);
|
|
md_write_end(r1_bio->mddev);
|
|
}
|
|
|
|
@@ -480,8 +477,6 @@ static void raid1_end_write_request(struct bio *bio)
|
|
if (!test_bit(Faulty, &rdev->flags))
|
|
set_bit(R1BIO_WriteError, &r1_bio->state);
|
|
else {
|
|
- /* Fail the request */
|
|
- set_bit(R1BIO_Degraded, &r1_bio->state);
|
|
/* Finished with this branch */
|
|
r1_bio->bios[mirror] = NULL;
|
|
to_put = bio;
|
|
@@ -1414,11 +1409,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|
break;
|
|
}
|
|
r1_bio->bios[i] = NULL;
|
|
- if (!rdev || test_bit(Faulty, &rdev->flags)) {
|
|
- if (i < conf->raid_disks)
|
|
- set_bit(R1BIO_Degraded, &r1_bio->state);
|
|
+ if (!rdev || test_bit(Faulty, &rdev->flags))
|
|
continue;
|
|
- }
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
if (test_bit(WriteErrorSeen, &rdev->flags)) {
|
|
@@ -1444,16 +1436,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|
*/
|
|
max_sectors = bad_sectors;
|
|
rdev_dec_pending(rdev, mddev);
|
|
- /* We don't set R1BIO_Degraded as that
|
|
- * only applies if the disk is
|
|
- * missing, so it might be re-added,
|
|
- * and we want to know to recover this
|
|
- * chunk.
|
|
- * In this case the device is here,
|
|
- * and the fact that this chunk is not
|
|
- * in-sync is recorded in the bad
|
|
- * block log
|
|
- */
|
|
continue;
|
|
}
|
|
if (is_bad) {
|
|
@@ -1530,8 +1512,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|
alloc_behind_master_bio(r1_bio, bio);
|
|
}
|
|
|
|
- md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
|
|
- test_bit(R1BIO_BehindIO, &r1_bio->state));
|
|
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
|
|
+ md_bitmap_start_behind_write(mddev);
|
|
first_clone = 0;
|
|
}
|
|
|
|
@@ -2476,12 +2458,9 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
|
|
* errors.
|
|
*/
|
|
fail = true;
|
|
- if (!narrow_write_error(r1_bio, m)) {
|
|
+ if (!narrow_write_error(r1_bio, m))
|
|
md_error(conf->mddev,
|
|
conf->mirrors[m].rdev);
|
|
- /* an I/O failed, we can't clear the bitmap */
|
|
- set_bit(R1BIO_Degraded, &r1_bio->state);
|
|
- }
|
|
rdev_dec_pending(conf->mirrors[m].rdev,
|
|
conf->mddev);
|
|
}
|
|
@@ -2573,8 +2552,6 @@ static void raid1d(struct md_thread *thread)
|
|
list_del(&r1_bio->retry_list);
|
|
idx = sector_to_idx(r1_bio->sector);
|
|
atomic_dec(&conf->nr_queued[idx]);
|
|
- if (mddev->degraded)
|
|
- set_bit(R1BIO_Degraded, &r1_bio->state);
|
|
if (test_bit(R1BIO_WriteError, &r1_bio->state))
|
|
close_write(r1_bio);
|
|
raid_end_bio_io(r1_bio);
|
|
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
|
|
index 14d4211a123a8e..44f2390a886690 100644
|
|
--- a/drivers/md/raid1.h
|
|
+++ b/drivers/md/raid1.h
|
|
@@ -187,7 +187,6 @@ struct r1bio {
|
|
enum r1bio_state {
|
|
R1BIO_Uptodate,
|
|
R1BIO_IsSync,
|
|
- R1BIO_Degraded,
|
|
R1BIO_BehindIO,
|
|
/* Set ReadError on bios that experience a readerror so that
|
|
* raid1d knows what to do with them.
|
|
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
|
|
index 02341312085162..c300fd609ef08c 100644
|
|
--- a/drivers/md/raid10.c
|
|
+++ b/drivers/md/raid10.c
|
|
@@ -427,11 +427,6 @@ static void raid10_end_read_request(struct bio *bio)
|
|
|
|
static void close_write(struct r10bio *r10_bio)
|
|
{
|
|
- /* clear the bitmap if all writes complete successfully */
|
|
- md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
|
|
- r10_bio->sectors,
|
|
- !test_bit(R10BIO_Degraded, &r10_bio->state),
|
|
- 0);
|
|
md_write_end(r10_bio->mddev);
|
|
}
|
|
|
|
@@ -501,7 +496,6 @@ static void raid10_end_write_request(struct bio *bio)
|
|
set_bit(R10BIO_WriteError, &r10_bio->state);
|
|
else {
|
|
/* Fail the request */
|
|
- set_bit(R10BIO_Degraded, &r10_bio->state);
|
|
r10_bio->devs[slot].bio = NULL;
|
|
to_put = bio;
|
|
dec_rdev = 1;
|
|
@@ -1490,10 +1484,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
|
r10_bio->devs[i].bio = NULL;
|
|
r10_bio->devs[i].repl_bio = NULL;
|
|
|
|
- if (!rdev && !rrdev) {
|
|
- set_bit(R10BIO_Degraded, &r10_bio->state);
|
|
+ if (!rdev && !rrdev)
|
|
continue;
|
|
- }
|
|
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
|
|
sector_t first_bad;
|
|
sector_t dev_sector = r10_bio->devs[i].addr;
|
|
@@ -1510,14 +1502,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
|
* to other devices yet
|
|
*/
|
|
max_sectors = bad_sectors;
|
|
- /* We don't set R10BIO_Degraded as that
|
|
- * only applies if the disk is missing,
|
|
- * so it might be re-added, and we want to
|
|
- * know to recover this chunk.
|
|
- * In this case the device is here, and the
|
|
- * fact that this chunk is not in-sync is
|
|
- * recorded in the bad block log.
|
|
- */
|
|
continue;
|
|
}
|
|
if (is_bad) {
|
|
@@ -1554,7 +1538,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
|
md_account_bio(mddev, &bio);
|
|
r10_bio->master_bio = bio;
|
|
atomic_set(&r10_bio->remaining, 1);
|
|
- md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
|
|
|
|
for (i = 0; i < conf->copies; i++) {
|
|
if (r10_bio->devs[i].bio)
|
|
@@ -3063,11 +3046,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
|
|
rdev_dec_pending(rdev, conf->mddev);
|
|
} else if (bio != NULL && bio->bi_status) {
|
|
fail = true;
|
|
- if (!narrow_write_error(r10_bio, m)) {
|
|
+ if (!narrow_write_error(r10_bio, m))
|
|
md_error(conf->mddev, rdev);
|
|
- set_bit(R10BIO_Degraded,
|
|
- &r10_bio->state);
|
|
- }
|
|
rdev_dec_pending(rdev, conf->mddev);
|
|
}
|
|
bio = r10_bio->devs[m].repl_bio;
|
|
@@ -3126,8 +3106,6 @@ static void raid10d(struct md_thread *thread)
|
|
r10_bio = list_first_entry(&tmp, struct r10bio,
|
|
retry_list);
|
|
list_del(&r10_bio->retry_list);
|
|
- if (mddev->degraded)
|
|
- set_bit(R10BIO_Degraded, &r10_bio->state);
|
|
|
|
if (test_bit(R10BIO_WriteError,
|
|
&r10_bio->state))
|
|
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
|
|
index 2e75e88d08023f..3f16ad6904a9fb 100644
|
|
--- a/drivers/md/raid10.h
|
|
+++ b/drivers/md/raid10.h
|
|
@@ -161,7 +161,6 @@ enum r10bio_state {
|
|
R10BIO_IsSync,
|
|
R10BIO_IsRecover,
|
|
R10BIO_IsReshape,
|
|
- R10BIO_Degraded,
|
|
/* Set ReadError on bios that experience a read error
|
|
* so that raid10d knows what to do with them.
|
|
*/
|
|
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
|
|
index 889bba60d6ff71..53f3718c01ebe1 100644
|
|
--- a/drivers/md/raid5-cache.c
|
|
+++ b/drivers/md/raid5-cache.c
|
|
@@ -313,10 +313,6 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
|
|
if (sh->dev[i].written) {
|
|
set_bit(R5_UPTODATE, &sh->dev[i].flags);
|
|
r5c_return_dev_pending_writes(conf, &sh->dev[i]);
|
|
- md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
|
- RAID5_STRIPE_SECTORS(conf),
|
|
- !test_bit(STRIPE_DEGRADED, &sh->state),
|
|
- 0);
|
|
}
|
|
}
|
|
}
|
|
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
|
|
index 2c7f11e5766735..f69e4a6a8a5923 100644
|
|
--- a/drivers/md/raid5.c
|
|
+++ b/drivers/md/raid5.c
|
|
@@ -905,7 +905,6 @@ static bool stripe_can_batch(struct stripe_head *sh)
|
|
if (raid5_has_log(conf) || raid5_has_ppl(conf))
|
|
return false;
|
|
return test_bit(STRIPE_BATCH_READY, &sh->state) &&
|
|
- !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
|
|
is_full_stripe_write(sh);
|
|
}
|
|
|
|
@@ -1359,8 +1358,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
|
submit_bio_noacct(rbi);
|
|
}
|
|
if (!rdev && !rrdev) {
|
|
- if (op_is_write(op))
|
|
- set_bit(STRIPE_DEGRADED, &sh->state);
|
|
pr_debug("skip op %d on disc %d for sector %llu\n",
|
|
bi->bi_opf, i, (unsigned long long)sh->sector);
|
|
clear_bit(R5_LOCKED, &sh->dev[i].flags);
|
|
@@ -2925,7 +2922,6 @@ static void raid5_end_write_request(struct bio *bi)
|
|
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
|
|
} else {
|
|
if (bi->bi_status) {
|
|
- set_bit(STRIPE_DEGRADED, &sh->state);
|
|
set_bit(WriteErrorSeen, &rdev->flags);
|
|
set_bit(R5_WriteError, &sh->dev[i].flags);
|
|
if (!test_and_set_bit(WantReplacement, &rdev->flags))
|
|
@@ -3590,29 +3586,9 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
|
|
(*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
|
|
sh->dev[dd_idx].sector);
|
|
|
|
- if (conf->mddev->bitmap && firstwrite) {
|
|
- /* Cannot hold spinlock over bitmap_startwrite,
|
|
- * but must ensure this isn't added to a batch until
|
|
- * we have added to the bitmap and set bm_seq.
|
|
- * So set STRIPE_BITMAP_PENDING to prevent
|
|
- * batching.
|
|
- * If multiple __add_stripe_bio() calls race here they
|
|
- * much all set STRIPE_BITMAP_PENDING. So only the first one
|
|
- * to complete "bitmap_startwrite" gets to set
|
|
- * STRIPE_BIT_DELAY. This is important as once a stripe
|
|
- * is added to a batch, STRIPE_BIT_DELAY cannot be changed
|
|
- * any more.
|
|
- */
|
|
- set_bit(STRIPE_BITMAP_PENDING, &sh->state);
|
|
- spin_unlock_irq(&sh->stripe_lock);
|
|
- md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
|
|
- RAID5_STRIPE_SECTORS(conf), 0);
|
|
- spin_lock_irq(&sh->stripe_lock);
|
|
- clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
|
|
- if (!sh->batch_head) {
|
|
- sh->bm_seq = conf->seq_flush+1;
|
|
- set_bit(STRIPE_BIT_DELAY, &sh->state);
|
|
- }
|
|
+ if (conf->mddev->bitmap && firstwrite && !sh->batch_head) {
|
|
+ sh->bm_seq = conf->seq_flush+1;
|
|
+ set_bit(STRIPE_BIT_DELAY, &sh->state);
|
|
}
|
|
}
|
|
|
|
@@ -3663,7 +3639,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
|
BUG_ON(sh->batch_head);
|
|
for (i = disks; i--; ) {
|
|
struct bio *bi;
|
|
- int bitmap_end = 0;
|
|
|
|
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
|
|
struct md_rdev *rdev;
|
|
@@ -3690,8 +3665,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
|
sh->dev[i].towrite = NULL;
|
|
sh->overwrite_disks = 0;
|
|
spin_unlock_irq(&sh->stripe_lock);
|
|
- if (bi)
|
|
- bitmap_end = 1;
|
|
|
|
log_stripe_write_finished(sh);
|
|
|
|
@@ -3706,10 +3679,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
|
bio_io_error(bi);
|
|
bi = nextbi;
|
|
}
|
|
- if (bitmap_end)
|
|
- md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
|
- RAID5_STRIPE_SECTORS(conf), 0, 0);
|
|
- bitmap_end = 0;
|
|
/* and fail all 'written' */
|
|
bi = sh->dev[i].written;
|
|
sh->dev[i].written = NULL;
|
|
@@ -3718,7 +3687,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
|
sh->dev[i].page = sh->dev[i].orig_page;
|
|
}
|
|
|
|
- if (bi) bitmap_end = 1;
|
|
while (bi && bi->bi_iter.bi_sector <
|
|
sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
|
|
struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
|
|
@@ -3752,9 +3720,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
|
bi = nextbi;
|
|
}
|
|
}
|
|
- if (bitmap_end)
|
|
- md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
|
- RAID5_STRIPE_SECTORS(conf), 0, 0);
|
|
/* If we were in the middle of a write the parity block might
|
|
* still be locked - so just clear all R5_LOCKED flags
|
|
*/
|
|
@@ -4105,10 +4070,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
|
|
bio_endio(wbi);
|
|
wbi = wbi2;
|
|
}
|
|
- md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
|
- RAID5_STRIPE_SECTORS(conf),
|
|
- !test_bit(STRIPE_DEGRADED, &sh->state),
|
|
- 0);
|
|
+
|
|
if (head_sh->batch_head) {
|
|
sh = list_first_entry(&sh->batch_list,
|
|
struct stripe_head,
|
|
@@ -4385,7 +4347,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
|
|
s->locked++;
|
|
set_bit(R5_Wantwrite, &dev->flags);
|
|
|
|
- clear_bit(STRIPE_DEGRADED, &sh->state);
|
|
set_bit(STRIPE_INSYNC, &sh->state);
|
|
break;
|
|
case check_state_run:
|
|
@@ -4542,7 +4503,6 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
|
|
clear_bit(R5_Wantwrite, &dev->flags);
|
|
s->locked--;
|
|
}
|
|
- clear_bit(STRIPE_DEGRADED, &sh->state);
|
|
|
|
set_bit(STRIPE_INSYNC, &sh->state);
|
|
break;
|
|
@@ -4942,8 +4902,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
|
|
(1 << STRIPE_COMPUTE_RUN) |
|
|
(1 << STRIPE_DISCARD) |
|
|
(1 << STRIPE_BATCH_READY) |
|
|
- (1 << STRIPE_BATCH_ERR) |
|
|
- (1 << STRIPE_BITMAP_PENDING)),
|
|
+ (1 << STRIPE_BATCH_ERR)),
|
|
"stripe state: %lx\n", sh->state);
|
|
WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
|
|
(1 << STRIPE_REPLACED)),
|
|
@@ -4951,7 +4910,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
|
|
|
|
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
|
|
(1 << STRIPE_PREREAD_ACTIVE) |
|
|
- (1 << STRIPE_DEGRADED) |
|
|
(1 << STRIPE_ON_UNPLUG_LIST)),
|
|
head_sh->state & (1 << STRIPE_INSYNC));
|
|
|
|
@@ -5848,13 +5806,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
|
|
}
|
|
spin_unlock_irq(&sh->stripe_lock);
|
|
if (conf->mddev->bitmap) {
|
|
- for (d = 0;
|
|
- d < conf->raid_disks - conf->max_degraded;
|
|
- d++)
|
|
- md_bitmap_startwrite(mddev->bitmap,
|
|
- sh->sector,
|
|
- RAID5_STRIPE_SECTORS(conf),
|
|
- 0);
|
|
sh->bm_seq = conf->seq_flush + 1;
|
|
set_bit(STRIPE_BIT_DELAY, &sh->state);
|
|
}
|
|
@@ -5972,6 +5923,87 @@ static bool reshape_disabled(struct mddev *mddev)
|
|
return is_md_suspended(mddev) || !md_is_rdwr(mddev);
|
|
}
|
|
|
|
+enum reshape_loc {
|
|
+ LOC_NO_RESHAPE,
|
|
+ LOC_AHEAD_OF_RESHAPE,
|
|
+ LOC_INSIDE_RESHAPE,
|
|
+ LOC_BEHIND_RESHAPE,
|
|
+};
|
|
+
|
|
+static enum reshape_loc get_reshape_loc(struct mddev *mddev,
|
|
+ struct r5conf *conf, sector_t logical_sector)
|
|
+{
|
|
+ sector_t reshape_progress, reshape_safe;
|
|
+ /*
|
|
+ * Spinlock is needed as reshape_progress may be
|
|
+ * 64bit on a 32bit platform, and so it might be
|
|
+ * possible to see a half-updated value
|
|
+ * Of course reshape_progress could change after
|
|
+ * the lock is dropped, so once we get a reference
|
|
+ * to the stripe that we think it is, we will have
|
|
+ * to check again.
|
|
+ */
|
|
+ spin_lock_irq(&conf->device_lock);
|
|
+ reshape_progress = conf->reshape_progress;
|
|
+ reshape_safe = conf->reshape_safe;
|
|
+ spin_unlock_irq(&conf->device_lock);
|
|
+ if (reshape_progress == MaxSector)
|
|
+ return LOC_NO_RESHAPE;
|
|
+ if (ahead_of_reshape(mddev, logical_sector, reshape_progress))
|
|
+ return LOC_AHEAD_OF_RESHAPE;
|
|
+ if (ahead_of_reshape(mddev, logical_sector, reshape_safe))
|
|
+ return LOC_INSIDE_RESHAPE;
|
|
+ return LOC_BEHIND_RESHAPE;
|
|
+}
|
|
+
|
|
+static void raid5_bitmap_sector(struct mddev *mddev, sector_t *offset,
|
|
+ unsigned long *sectors)
|
|
+{
|
|
+ struct r5conf *conf = mddev->private;
|
|
+ sector_t start = *offset;
|
|
+ sector_t end = start + *sectors;
|
|
+ sector_t prev_start = start;
|
|
+ sector_t prev_end = end;
|
|
+ int sectors_per_chunk;
|
|
+ enum reshape_loc loc;
|
|
+ int dd_idx;
|
|
+
|
|
+ sectors_per_chunk = conf->chunk_sectors *
|
|
+ (conf->raid_disks - conf->max_degraded);
|
|
+ start = round_down(start, sectors_per_chunk);
|
|
+ end = round_up(end, sectors_per_chunk);
|
|
+
|
|
+ start = raid5_compute_sector(conf, start, 0, &dd_idx, NULL);
|
|
+ end = raid5_compute_sector(conf, end, 0, &dd_idx, NULL);
|
|
+
|
|
+ /*
|
|
+ * For LOC_INSIDE_RESHAPE, this IO will wait for reshape to make
|
|
+ * progress, hence it's the same as LOC_BEHIND_RESHAPE.
|
|
+ */
|
|
+ loc = get_reshape_loc(mddev, conf, prev_start);
|
|
+ if (likely(loc != LOC_AHEAD_OF_RESHAPE)) {
|
|
+ *offset = start;
|
|
+ *sectors = end - start;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ sectors_per_chunk = conf->prev_chunk_sectors *
|
|
+ (conf->previous_raid_disks - conf->max_degraded);
|
|
+ prev_start = round_down(prev_start, sectors_per_chunk);
|
|
+ prev_end = round_down(prev_end, sectors_per_chunk);
|
|
+
|
|
+ prev_start = raid5_compute_sector(conf, prev_start, 1, &dd_idx, NULL);
|
|
+ prev_end = raid5_compute_sector(conf, prev_end, 1, &dd_idx, NULL);
|
|
+
|
|
+ /*
|
|
+ * for LOC_AHEAD_OF_RESHAPE, reshape can make progress before this IO
|
|
+ * is handled in make_stripe_request(), we can't know this here hence
|
|
+ * we set bits for both.
|
|
+ */
|
|
+ *offset = min(start, prev_start);
|
|
+ *sectors = max(end, prev_end) - *offset;
|
|
+}
|
|
+
|
|
static enum stripe_result make_stripe_request(struct mddev *mddev,
|
|
struct r5conf *conf, struct stripe_request_ctx *ctx,
|
|
sector_t logical_sector, struct bio *bi)
|
|
@@ -5986,28 +6018,14 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
|
|
seq = read_seqcount_begin(&conf->gen_lock);
|
|
|
|
if (unlikely(conf->reshape_progress != MaxSector)) {
|
|
- /*
|
|
- * Spinlock is needed as reshape_progress may be
|
|
- * 64bit on a 32bit platform, and so it might be
|
|
- * possible to see a half-updated value
|
|
- * Of course reshape_progress could change after
|
|
- * the lock is dropped, so once we get a reference
|
|
- * to the stripe that we think it is, we will have
|
|
- * to check again.
|
|
- */
|
|
- spin_lock_irq(&conf->device_lock);
|
|
- if (ahead_of_reshape(mddev, logical_sector,
|
|
- conf->reshape_progress)) {
|
|
- previous = 1;
|
|
- } else {
|
|
- if (ahead_of_reshape(mddev, logical_sector,
|
|
- conf->reshape_safe)) {
|
|
- spin_unlock_irq(&conf->device_lock);
|
|
- ret = STRIPE_SCHEDULE_AND_RETRY;
|
|
- goto out;
|
|
- }
|
|
+ enum reshape_loc loc = get_reshape_loc(mddev, conf,
|
|
+ logical_sector);
|
|
+ if (loc == LOC_INSIDE_RESHAPE) {
|
|
+ ret = STRIPE_SCHEDULE_AND_RETRY;
|
|
+ goto out;
|
|
}
|
|
- spin_unlock_irq(&conf->device_lock);
|
|
+ if (loc == LOC_AHEAD_OF_RESHAPE)
|
|
+ previous = 1;
|
|
}
|
|
|
|
new_sector = raid5_compute_sector(conf, logical_sector, previous,
|
|
@@ -6189,8 +6207,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
|
|
/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
|
|
if ((bi->bi_opf & REQ_NOWAIT) &&
|
|
(conf->reshape_progress != MaxSector) &&
|
|
- !ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) &&
|
|
- ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) {
|
|
+ get_reshape_loc(mddev, conf, logical_sector) == LOC_INSIDE_RESHAPE) {
|
|
bio_wouldblock_error(bi);
|
|
if (rw == WRITE)
|
|
md_write_end(mddev);
|
|
@@ -9090,6 +9107,7 @@ static struct md_personality raid6_personality =
|
|
.quiesce = raid5_quiesce,
|
|
.takeover = raid6_takeover,
|
|
.change_consistency_policy = raid5_change_consistency_policy,
|
|
+ .bitmap_sector = raid5_bitmap_sector,
|
|
};
|
|
static struct md_personality raid5_personality =
|
|
{
|
|
@@ -9115,6 +9133,7 @@ static struct md_personality raid5_personality =
|
|
.quiesce = raid5_quiesce,
|
|
.takeover = raid5_takeover,
|
|
.change_consistency_policy = raid5_change_consistency_policy,
|
|
+ .bitmap_sector = raid5_bitmap_sector,
|
|
};
|
|
|
|
static struct md_personality raid4_personality =
|
|
@@ -9141,6 +9160,7 @@ static struct md_personality raid4_personality =
|
|
.quiesce = raid5_quiesce,
|
|
.takeover = raid4_takeover,
|
|
.change_consistency_policy = raid5_change_consistency_policy,
|
|
+ .bitmap_sector = raid5_bitmap_sector,
|
|
};
|
|
|
|
static int __init raid5_init(void)
|
|
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
|
|
index 97a795979a3502..fd617155388000 100644
|
|
--- a/drivers/md/raid5.h
|
|
+++ b/drivers/md/raid5.h
|
|
@@ -358,7 +358,6 @@ enum {
|
|
STRIPE_REPLACED,
|
|
STRIPE_PREREAD_ACTIVE,
|
|
STRIPE_DELAYED,
|
|
- STRIPE_DEGRADED,
|
|
STRIPE_BIT_DELAY,
|
|
STRIPE_EXPANDING,
|
|
STRIPE_EXPAND_SOURCE,
|
|
@@ -372,9 +371,6 @@ enum {
|
|
STRIPE_ON_RELEASE_LIST,
|
|
STRIPE_BATCH_READY,
|
|
STRIPE_BATCH_ERR,
|
|
- STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
|
|
- * to batch yet.
|
|
- */
|
|
STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
|
|
* this bit is used in two scenarios:
|
|
*
|
|
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
|
|
index d925ca24183b50..415f1f91cc3072 100644
|
|
--- a/drivers/media/dvb-frontends/cxd2841er.c
|
|
+++ b/drivers/media/dvb-frontends/cxd2841er.c
|
|
@@ -311,12 +311,8 @@ static int cxd2841er_set_reg_bits(struct cxd2841er_priv *priv,
|
|
|
|
static u32 cxd2841er_calc_iffreq_xtal(enum cxd2841er_xtal xtal, u32 ifhz)
|
|
{
|
|
- u64 tmp;
|
|
-
|
|
- tmp = (u64) ifhz * 16777216;
|
|
- do_div(tmp, ((xtal == SONY_XTAL_24000) ? 48000000 : 41000000));
|
|
-
|
|
- return (u32) tmp;
|
|
+ return div_u64(ifhz * 16777216ull,
|
|
+ (xtal == SONY_XTAL_24000) ? 48000000 : 41000000);
|
|
}
|
|
|
|
static u32 cxd2841er_calc_iffreq(u32 ifhz)
|
|
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
|
|
index 5a650facae4153..ae33d1ecf835df 100644
|
|
--- a/drivers/media/i2c/ds90ub913.c
|
|
+++ b/drivers/media/i2c/ds90ub913.c
|
|
@@ -8,6 +8,7 @@
|
|
* Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
|
|
*/
|
|
|
|
+#include <linux/bitfield.h>
|
|
#include <linux/clk-provider.h>
|
|
#include <linux/clk.h>
|
|
#include <linux/delay.h>
|
|
@@ -146,6 +147,19 @@ static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val)
|
|
return ret;
|
|
}
|
|
|
|
+static int ub913_update_bits(const struct ub913_data *priv, u8 reg, u8 mask,
|
|
+ u8 val)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
|
|
+ if (ret < 0)
|
|
+ dev_err(&priv->client->dev,
|
|
+ "Cannot update register 0x%02x %d!\n", reg, ret);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
/*
|
|
* GPIO chip
|
|
*/
|
|
@@ -733,10 +747,13 @@ static int ub913_hw_init(struct ub913_data *priv)
|
|
if (ret)
|
|
return dev_err_probe(dev, ret, "i2c master init failed\n");
|
|
|
|
- ub913_read(priv, UB913_REG_GENERAL_CFG, &v);
|
|
- v &= ~UB913_REG_GENERAL_CFG_PCLK_RISING;
|
|
- v |= priv->pclk_polarity_rising ? UB913_REG_GENERAL_CFG_PCLK_RISING : 0;
|
|
- ub913_write(priv, UB913_REG_GENERAL_CFG, v);
|
|
+ ret = ub913_update_bits(priv, UB913_REG_GENERAL_CFG,
|
|
+ UB913_REG_GENERAL_CFG_PCLK_RISING,
|
|
+ FIELD_PREP(UB913_REG_GENERAL_CFG_PCLK_RISING,
|
|
+ priv->pclk_polarity_rising));
|
|
+
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
|
|
index 1dd29137d2d9f2..007c95ac34d931 100644
|
|
--- a/drivers/media/i2c/ds90ub953.c
|
|
+++ b/drivers/media/i2c/ds90ub953.c
|
|
@@ -398,8 +398,13 @@ static int ub953_gpiochip_probe(struct ub953_data *priv)
|
|
int ret;
|
|
|
|
/* Set all GPIOs to local input mode */
|
|
- ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
|
|
- ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
|
|
+ ret = ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
gc->label = dev_name(dev);
|
|
gc->parent = dev;
|
|
@@ -961,10 +966,11 @@ static void ub953_calc_clkout_params(struct ub953_data *priv,
|
|
clkout_data->rate = clkout_rate;
|
|
}
|
|
|
|
-static void ub953_write_clkout_regs(struct ub953_data *priv,
|
|
- const struct ub953_clkout_data *clkout_data)
|
|
+static int ub953_write_clkout_regs(struct ub953_data *priv,
|
|
+ const struct ub953_clkout_data *clkout_data)
|
|
{
|
|
u8 clkout_ctrl0, clkout_ctrl1;
|
|
+ int ret;
|
|
|
|
if (priv->hw_data->is_ub971)
|
|
clkout_ctrl0 = clkout_data->m;
|
|
@@ -974,8 +980,15 @@ static void ub953_write_clkout_regs(struct ub953_data *priv,
|
|
|
|
clkout_ctrl1 = clkout_data->n;
|
|
|
|
- ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
|
|
- ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
|
|
+ ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
|
|
@@ -1055,9 +1068,7 @@ static int ub953_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
dev_dbg(&priv->client->dev, "%s %lu (requested %lu)\n", __func__,
|
|
clkout_data.rate, rate);
|
|
|
|
- ub953_write_clkout_regs(priv, &clkout_data);
|
|
-
|
|
- return 0;
|
|
+ return ub953_write_clkout_regs(priv, &clkout_data);
|
|
}
|
|
|
|
static const struct clk_ops ub953_clkout_ops = {
|
|
@@ -1082,7 +1093,9 @@ static int ub953_register_clkout(struct ub953_data *priv)
|
|
|
|
/* Initialize clkout to 25MHz by default */
|
|
ub953_calc_clkout_params(priv, UB953_DEFAULT_CLKOUT_RATE, &clkout_data);
|
|
- ub953_write_clkout_regs(priv, &clkout_data);
|
|
+ ret = ub953_write_clkout_regs(priv, &clkout_data);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
priv->clkout_clk_hw.init = &init;
|
|
|
|
@@ -1229,10 +1242,15 @@ static int ub953_hw_init(struct ub953_data *priv)
|
|
if (ret)
|
|
return dev_err_probe(dev, ret, "i2c init failed\n");
|
|
|
|
- ub953_write(priv, UB953_REG_GENERAL_CFG,
|
|
- (priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK) |
|
|
- ((priv->num_data_lanes - 1) << UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT) |
|
|
- UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE);
|
|
+ v = 0;
|
|
+ v |= priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK;
|
|
+ v |= (priv->num_data_lanes - 1) <<
|
|
+ UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT;
|
|
+ v |= UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE;
|
|
+
|
|
+ ret = ub953_write(priv, UB953_REG_GENERAL_CFG, v);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
|
|
index 8b04e12af286cc..6e030584d598a7 100644
|
|
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
|
|
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
|
|
@@ -191,10 +191,11 @@ static int vidtv_start_streaming(struct vidtv_dvb *dvb)
|
|
|
|
mux_args.mux_buf_sz = mux_buf_sz;
|
|
|
|
- dvb->streaming = true;
|
|
dvb->mux = vidtv_mux_init(dvb->fe[0], dev, &mux_args);
|
|
if (!dvb->mux)
|
|
return -ENOMEM;
|
|
+
|
|
+ dvb->streaming = true;
|
|
vidtv_mux_start_thread(dvb->mux);
|
|
|
|
dev_dbg_ratelimited(dev, "Started streaming\n");
|
|
@@ -205,6 +206,11 @@ static int vidtv_stop_streaming(struct vidtv_dvb *dvb)
|
|
{
|
|
struct device *dev = &dvb->pdev->dev;
|
|
|
|
+ if (!dvb->streaming) {
|
|
+ dev_warn_ratelimited(dev, "No streaming. Skipping.\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
dvb->streaming = false;
|
|
vidtv_mux_stop_thread(dvb->mux);
|
|
vidtv_mux_destroy(dvb->mux);
|
|
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
|
|
index 95c5b90f3e7c11..ae2e8bd2b3f73d 100644
|
|
--- a/drivers/media/usb/uvc/uvc_driver.c
|
|
+++ b/drivers/media/usb/uvc/uvc_driver.c
|
|
@@ -2886,6 +2886,15 @@ static const struct usb_device_id uvc_ids[] = {
|
|
.bInterfaceSubClass = 1,
|
|
.bInterfaceProtocol = 0,
|
|
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
|
|
+ /* Sonix Technology Co. Ltd. - 292A IPC AR0330 */
|
|
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
|
|
+ | USB_DEVICE_ID_MATCH_INT_INFO,
|
|
+ .idVendor = 0x0c45,
|
|
+ .idProduct = 0x6366,
|
|
+ .bInterfaceClass = USB_CLASS_VIDEO,
|
|
+ .bInterfaceSubClass = 1,
|
|
+ .bInterfaceProtocol = 0,
|
|
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
|
|
/* MT6227 */
|
|
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
|
|
| USB_DEVICE_ID_MATCH_INT_INFO,
|
|
@@ -2914,6 +2923,15 @@ static const struct usb_device_id uvc_ids[] = {
|
|
.bInterfaceSubClass = 1,
|
|
.bInterfaceProtocol = 0,
|
|
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
|
|
+ /* Kurokesu C1 PRO */
|
|
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
|
|
+ | USB_DEVICE_ID_MATCH_INT_INFO,
|
|
+ .idVendor = 0x16d0,
|
|
+ .idProduct = 0x0ed1,
|
|
+ .bInterfaceClass = USB_CLASS_VIDEO,
|
|
+ .bInterfaceSubClass = 1,
|
|
+ .bInterfaceProtocol = 0,
|
|
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
|
|
/* Syntek (HP Spartan) */
|
|
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
|
|
| USB_DEVICE_ID_MATCH_INT_INFO,
|
|
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
|
|
index a2504e1e991b93..9572fdfe74f246 100644
|
|
--- a/drivers/media/usb/uvc/uvc_video.c
|
|
+++ b/drivers/media/usb/uvc/uvc_video.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <linux/atomic.h>
|
|
#include <asm/unaligned.h>
|
|
|
|
+#include <media/jpeg.h>
|
|
#include <media/v4l2-common.h>
|
|
|
|
#include "uvcvideo.h"
|
|
@@ -1114,6 +1115,7 @@ static void uvc_video_stats_stop(struct uvc_streaming *stream)
|
|
static int uvc_video_decode_start(struct uvc_streaming *stream,
|
|
struct uvc_buffer *buf, const u8 *data, int len)
|
|
{
|
|
+ u8 header_len;
|
|
u8 fid;
|
|
|
|
/*
|
|
@@ -1127,6 +1129,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ header_len = data[0];
|
|
fid = data[1] & UVC_STREAM_FID;
|
|
|
|
/*
|
|
@@ -1208,9 +1211,31 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
|
|
return -EAGAIN;
|
|
}
|
|
|
|
+ /*
|
|
+ * Some cameras, when running two parallel streams (one MJPEG alongside
|
|
+ * another non-MJPEG stream), are known to lose the EOF packet for a frame.
|
|
+ * We can detect the end of a frame by checking for a new SOI marker, as
|
|
+ * the SOI always lies on the packet boundary between two frames for
|
|
+ * these devices.
|
|
+ */
|
|
+ if (stream->dev->quirks & UVC_QUIRK_MJPEG_NO_EOF &&
|
|
+ (stream->cur_format->fcc == V4L2_PIX_FMT_MJPEG ||
|
|
+ stream->cur_format->fcc == V4L2_PIX_FMT_JPEG)) {
|
|
+ const u8 *packet = data + header_len;
|
|
+
|
|
+ if (len >= header_len + 2 &&
|
|
+ packet[0] == 0xff && packet[1] == JPEG_MARKER_SOI &&
|
|
+ buf->bytesused != 0) {
|
|
+ buf->state = UVC_BUF_STATE_READY;
|
|
+ buf->error = 1;
|
|
+ stream->last_fid ^= UVC_STREAM_FID;
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+ }
|
|
+
|
|
stream->last_fid = fid;
|
|
|
|
- return data[0];
|
|
+ return header_len;
|
|
}
|
|
|
|
static inline enum dma_data_direction uvc_stream_dir(
|
|
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
|
|
index 997f4b5b5e22ac..30fd056b2aec9d 100644
|
|
--- a/drivers/media/usb/uvc/uvcvideo.h
|
|
+++ b/drivers/media/usb/uvc/uvcvideo.h
|
|
@@ -76,6 +76,7 @@
|
|
#define UVC_QUIRK_NO_RESET_RESUME 0x00004000
|
|
#define UVC_QUIRK_DISABLE_AUTOSUSPEND 0x00008000
|
|
#define UVC_QUIRK_INVALID_DEVICE_SOF 0x00010000
|
|
+#define UVC_QUIRK_MJPEG_NO_EOF 0x00020000
|
|
|
|
/* Format flags */
|
|
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
|
|
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
|
|
index 4f4b7607eaa9b1..02f3748e46c144 100644
|
|
--- a/drivers/mmc/host/mtk-sd.c
|
|
+++ b/drivers/mmc/host/mtk-sd.c
|
|
@@ -260,6 +260,7 @@
|
|
#define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */
|
|
|
|
#define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */
|
|
+#define PAD_DS_TUNE_DLY2_SEL BIT(1) /* RW */
|
|
#define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */
|
|
#define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */
|
|
#define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */
|
|
@@ -305,6 +306,7 @@
|
|
|
|
/* EMMC50_PAD_DS_TUNE mask */
|
|
#define PAD_DS_DLY_SEL BIT(16) /* RW */
|
|
+#define PAD_DS_DLY2_SEL BIT(15) /* RW */
|
|
#define PAD_DS_DLY1 GENMASK(14, 10) /* RW */
|
|
#define PAD_DS_DLY3 GENMASK(4, 0) /* RW */
|
|
|
|
@@ -2309,13 +2311,23 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|
static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
{
|
|
struct msdc_host *host = mmc_priv(mmc);
|
|
+
|
|
host->hs400_mode = true;
|
|
|
|
- if (host->top_base)
|
|
- writel(host->hs400_ds_delay,
|
|
- host->top_base + EMMC50_PAD_DS_TUNE);
|
|
- else
|
|
- writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
|
|
+ if (host->top_base) {
|
|
+ if (host->hs400_ds_dly3)
|
|
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
|
|
+ PAD_DS_DLY3, host->hs400_ds_dly3);
|
|
+ if (host->hs400_ds_delay)
|
|
+ writel(host->hs400_ds_delay,
|
|
+ host->top_base + EMMC50_PAD_DS_TUNE);
|
|
+ } else {
|
|
+ if (host->hs400_ds_dly3)
|
|
+ sdr_set_field(host->base + PAD_DS_TUNE,
|
|
+ PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
|
|
+ if (host->hs400_ds_delay)
|
|
+ writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
|
|
+ }
|
|
/* hs400 mode must set it to 0 */
|
|
sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
|
|
/* to improve read performance, set outstanding to 2 */
|
|
@@ -2335,14 +2347,11 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
|
|
if (host->top_base) {
|
|
sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
|
|
PAD_DS_DLY_SEL);
|
|
- if (host->hs400_ds_dly3)
|
|
- sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
|
|
- PAD_DS_DLY3, host->hs400_ds_dly3);
|
|
+ sdr_clr_bits(host->top_base + EMMC50_PAD_DS_TUNE,
|
|
+ PAD_DS_DLY2_SEL);
|
|
} else {
|
|
sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
|
|
- if (host->hs400_ds_dly3)
|
|
- sdr_set_field(host->base + PAD_DS_TUNE,
|
|
- PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
|
|
+ sdr_clr_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY2_SEL);
|
|
}
|
|
|
|
host->hs400_tuning = true;
|
|
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
|
|
index f44ba2600415f6..7f405bcf11c23e 100644
|
|
--- a/drivers/net/can/c_can/c_can_platform.c
|
|
+++ b/drivers/net/can/c_can/c_can_platform.c
|
|
@@ -394,15 +394,16 @@ static int c_can_plat_probe(struct platform_device *pdev)
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
|
|
KBUILD_MODNAME, ret);
|
|
- goto exit_free_device;
|
|
+ goto exit_pm_runtime;
|
|
}
|
|
|
|
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
|
|
KBUILD_MODNAME, priv->base, dev->irq);
|
|
return 0;
|
|
|
|
-exit_free_device:
|
|
+exit_pm_runtime:
|
|
pm_runtime_disable(priv->device);
|
|
+exit_free_device:
|
|
free_c_can_dev(dev);
|
|
exit:
|
|
dev_err(&pdev->dev, "probe failed\n");
|
|
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
|
|
index 64c349fd46007f..f65c1a1e05ccdf 100644
|
|
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
|
|
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
|
|
@@ -867,10 +867,12 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
|
|
}
|
|
break;
|
|
case CAN_STATE_ERROR_ACTIVE:
|
|
- cf->can_id |= CAN_ERR_CNT;
|
|
- cf->data[1] = CAN_ERR_CRTL_ACTIVE;
|
|
- cf->data[6] = bec.txerr;
|
|
- cf->data[7] = bec.rxerr;
|
|
+ if (skb) {
|
|
+ cf->can_id |= CAN_ERR_CNT;
|
|
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
|
|
+ cf->data[6] = bec.txerr;
|
|
+ cf->data[7] = bec.rxerr;
|
|
+ }
|
|
break;
|
|
default:
|
|
netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
|
|
diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
|
|
index 635edeb8f68cdf..e763a9904bedd0 100644
|
|
--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
|
|
+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
|
|
@@ -248,7 +248,11 @@ static int es58x_devlink_info_get(struct devlink *devlink,
|
|
return ret;
|
|
}
|
|
|
|
- return devlink_info_serial_number_put(req, es58x_dev->udev->serial);
|
|
+ if (es58x_dev->udev->serial)
|
|
+ ret = devlink_info_serial_number_put(req,
|
|
+ es58x_dev->udev->serial);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
const struct devlink_ops es58x_dl_ops = {
|
|
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
|
|
index 91a4722460f66a..ae93b45cf55e8e 100644
|
|
--- a/drivers/net/ethernet/intel/igc/igc_main.c
|
|
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
|
|
@@ -1096,6 +1096,7 @@ static int igc_init_empty_frame(struct igc_ring *ring,
|
|
return -ENOMEM;
|
|
}
|
|
|
|
+ buffer->type = IGC_TX_BUFFER_TYPE_SKB;
|
|
buffer->skb = skb;
|
|
buffer->protocol = 0;
|
|
buffer->bytecount = skb->len;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
|
|
index 472830d07ac12e..13b5281d676b45 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
|
|
@@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
|
|
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
|
|
if (err)
|
|
return;
|
|
- mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
|
|
+ err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
|
|
+ if (err)
|
|
+ return;
|
|
for (i = 0; i < len; i++) {
|
|
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
|
|
if (!hw_stats[i].cells_bytes)
|
|
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
|
|
index 3612b0633bd177..88187dd4eb2d40 100644
|
|
--- a/drivers/net/netdevsim/ipsec.c
|
|
+++ b/drivers/net/netdevsim/ipsec.c
|
|
@@ -39,10 +39,14 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
|
|
if (!sap->used)
|
|
continue;
|
|
|
|
- p += scnprintf(p, bufsize - (p - buf),
|
|
- "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
|
|
- i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
|
|
- sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
|
|
+ if (sap->xs->props.family == AF_INET6)
|
|
+ p += scnprintf(p, bufsize - (p - buf),
|
|
+ "sa[%i] %cx ipaddr=%pI6c\n",
|
|
+ i, (sap->rx ? 'r' : 't'), &sap->ipaddr);
|
|
+ else
|
|
+ p += scnprintf(p, bufsize - (p - buf),
|
|
+ "sa[%i] %cx ipaddr=%pI4\n",
|
|
+ i, (sap->rx ? 'r' : 't'), &sap->ipaddr[3]);
|
|
p += scnprintf(p, bufsize - (p - buf),
|
|
"sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
|
|
i, be32_to_cpu(sap->xs->id.spi),
|
|
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
|
|
index 46a7c9fb6300e3..1ce3bccd4ebd4e 100644
|
|
--- a/drivers/net/team/team.c
|
|
+++ b/drivers/net/team/team.c
|
|
@@ -2657,7 +2657,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|
ctx.data.u32_val = nla_get_u32(attr_data);
|
|
break;
|
|
case TEAM_OPTION_TYPE_STRING:
|
|
- if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
|
|
+ if (nla_len(attr_data) > TEAM_STRING_MAX_LEN ||
|
|
+ !memchr(nla_data(attr_data), '\0',
|
|
+ nla_len(attr_data))) {
|
|
err = -EINVAL;
|
|
goto team_put;
|
|
}
|
|
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
|
|
index ee02a92338da1b..64db3e98a1b664 100644
|
|
--- a/drivers/net/vxlan/vxlan_core.c
|
|
+++ b/drivers/net/vxlan/vxlan_core.c
|
|
@@ -2966,8 +2966,11 @@ static int vxlan_init(struct net_device *dev)
|
|
struct vxlan_dev *vxlan = netdev_priv(dev);
|
|
int err;
|
|
|
|
- if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
|
|
- vxlan_vnigroup_init(vxlan);
|
|
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
|
|
+ err = vxlan_vnigroup_init(vxlan);
|
|
+ if (err)
|
|
+ return err;
|
|
+ }
|
|
|
|
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
|
if (!dev->tstats) {
|
|
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
|
|
index 9105fdd14c6671..c977dfbae0a464 100644
|
|
--- a/drivers/net/wireless/ath/ath12k/wmi.c
|
|
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
|
|
@@ -4418,6 +4418,22 @@ static struct ath12k_reg_rule
|
|
return reg_rule_ptr;
|
|
}
|
|
|
|
+static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
|
|
+ u32 num_reg_rules)
|
|
+{
|
|
+ u8 num_invalid_5ghz_rules = 0;
|
|
+ u32 count, start_freq;
|
|
+
|
|
+ for (count = 0; count < num_reg_rules; count++) {
|
|
+ start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
|
|
+
|
|
+ if (start_freq >= ATH12K_MIN_6G_FREQ)
|
|
+ num_invalid_5ghz_rules++;
|
|
+ }
|
|
+
|
|
+ return num_invalid_5ghz_rules;
|
|
+}
|
|
+
|
|
static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
|
|
struct sk_buff *skb,
|
|
struct ath12k_reg_info *reg_info)
|
|
@@ -4428,6 +4444,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
|
|
u32 num_2g_reg_rules, num_5g_reg_rules;
|
|
u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
|
|
u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
|
|
+ u8 num_invalid_5ghz_ext_rules;
|
|
u32 total_reg_rules = 0;
|
|
int ret, i, j;
|
|
|
|
@@ -4521,20 +4538,6 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
|
|
|
|
memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
|
|
|
|
- /* FIXME: Currently FW includes 6G reg rule also in 5G rule
|
|
- * list for country US.
|
|
- * Having same 6G reg rule in 5G and 6G rules list causes
|
|
- * intersect check to be true, and same rules will be shown
|
|
- * multiple times in iw cmd. So added hack below to avoid
|
|
- * parsing 6G rule from 5G reg rule list, and this can be
|
|
- * removed later, after FW updates to remove 6G reg rule
|
|
- * from 5G rules list.
|
|
- */
|
|
- if (memcmp(reg_info->alpha2, "US", 2) == 0) {
|
|
- reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
|
|
- num_5g_reg_rules = reg_info->num_5g_reg_rules;
|
|
- }
|
|
-
|
|
reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
|
|
reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
|
|
reg_info->num_phy = le32_to_cpu(ev->num_phy);
|
|
@@ -4636,8 +4639,29 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
|
|
}
|
|
}
|
|
|
|
+ ext_wmi_reg_rule += num_2g_reg_rules;
|
|
+
|
|
+ /* Firmware might include 6 GHz reg rule in 5 GHz rule list
|
|
+ * for few countries along with separate 6 GHz rule.
|
|
+ * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
|
|
+ * causes intersect check to be true, and same rules will be
|
|
+ * shown multiple times in iw cmd.
|
|
+ * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
|
|
+ */
|
|
+ num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
|
|
+ num_5g_reg_rules);
|
|
+
|
|
+ if (num_invalid_5ghz_ext_rules) {
|
|
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
|
|
+ "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
|
|
+ reg_info->alpha2, reg_info->num_5g_reg_rules,
|
|
+ num_invalid_5ghz_ext_rules);
|
|
+
|
|
+ num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
|
|
+ reg_info->num_5g_reg_rules = num_5g_reg_rules;
|
|
+ }
|
|
+
|
|
if (num_5g_reg_rules) {
|
|
- ext_wmi_reg_rule += num_2g_reg_rules;
|
|
reg_info->reg_rules_5g_ptr =
|
|
create_ext_reg_rules_from_wmi(num_5g_reg_rules,
|
|
ext_wmi_reg_rule);
|
|
@@ -4649,7 +4673,12 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
|
|
}
|
|
}
|
|
|
|
- ext_wmi_reg_rule += num_5g_reg_rules;
|
|
+ /* We have adjusted the number of 5 GHz reg rules above. But still those
|
|
+ * many rules needs to be adjusted in ext_wmi_reg_rule.
|
|
+ *
|
|
+ * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
|
|
+ */
|
|
+ ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
|
|
|
|
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
|
|
reg_info->reg_rules_6g_ap_ptr[i] =
|
|
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
|
|
index a19a2c29f2264a..4cfcc83f52269b 100644
|
|
--- a/drivers/net/wireless/ath/ath12k/wmi.h
|
|
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
|
|
@@ -3891,7 +3891,6 @@ struct ath12k_wmi_eht_rate_set_params {
|
|
#define MAX_REG_RULES 10
|
|
#define REG_ALPHA2_LEN 2
|
|
#define MAX_6G_REG_RULES 5
|
|
-#define REG_US_5G_NUM_REG_RULES 4
|
|
|
|
enum wmi_start_event_param {
|
|
WMI_VDEV_START_RESP_EVENT = 0,
|
|
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
|
|
index fd35ad0648a07b..70f484b811dea7 100644
|
|
--- a/drivers/pci/quirks.c
|
|
+++ b/drivers/pci/quirks.c
|
|
@@ -5978,6 +5978,17 @@ SWITCHTEC_QUIRK(0x5552); /* PAXA 52XG5 */
|
|
SWITCHTEC_QUIRK(0x5536); /* PAXA 36XG5 */
|
|
SWITCHTEC_QUIRK(0x5528); /* PAXA 28XG5 */
|
|
|
|
+#define SWITCHTEC_PCI100X_QUIRK(vid) \
|
|
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_EFAR, vid, \
|
|
+ PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
|
|
+SWITCHTEC_PCI100X_QUIRK(0x1001); /* PCI1001XG4 */
|
|
+SWITCHTEC_PCI100X_QUIRK(0x1002); /* PCI1002XG4 */
|
|
+SWITCHTEC_PCI100X_QUIRK(0x1003); /* PCI1003XG4 */
|
|
+SWITCHTEC_PCI100X_QUIRK(0x1004); /* PCI1004XG4 */
|
|
+SWITCHTEC_PCI100X_QUIRK(0x1005); /* PCI1005XG4 */
|
|
+SWITCHTEC_PCI100X_QUIRK(0x1006); /* PCI1006XG4 */
|
|
+
|
|
+
|
|
/*
|
|
* The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
|
|
* These IDs are used to forward responses to the originator on the other
|
|
@@ -6247,6 +6258,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
|
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
|
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
|
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
|
|
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa72f, dpc_log_size);
|
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
|
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
|
|
#endif
|
|
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
|
|
index 5a4adf6c04cf89..455fa5035a2450 100644
|
|
--- a/drivers/pci/switch/switchtec.c
|
|
+++ b/drivers/pci/switch/switchtec.c
|
|
@@ -1737,6 +1737,26 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
|
|
.driver_data = gen, \
|
|
}
|
|
|
|
+#define SWITCHTEC_PCI100X_DEVICE(device_id, gen) \
|
|
+ { \
|
|
+ .vendor = PCI_VENDOR_ID_EFAR, \
|
|
+ .device = device_id, \
|
|
+ .subvendor = PCI_ANY_ID, \
|
|
+ .subdevice = PCI_ANY_ID, \
|
|
+ .class = (PCI_CLASS_MEMORY_OTHER << 8), \
|
|
+ .class_mask = 0xFFFFFFFF, \
|
|
+ .driver_data = gen, \
|
|
+ }, \
|
|
+ { \
|
|
+ .vendor = PCI_VENDOR_ID_EFAR, \
|
|
+ .device = device_id, \
|
|
+ .subvendor = PCI_ANY_ID, \
|
|
+ .subdevice = PCI_ANY_ID, \
|
|
+ .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
|
|
+ .class_mask = 0xFFFFFFFF, \
|
|
+ .driver_data = gen, \
|
|
+ }
|
|
+
|
|
static const struct pci_device_id switchtec_pci_tbl[] = {
|
|
SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */
|
|
SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */
|
|
@@ -1831,6 +1851,12 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
|
|
SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */
|
|
SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */
|
|
SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */
|
|
+ SWITCHTEC_PCI100X_DEVICE(0x1001, SWITCHTEC_GEN4), /* PCI1001 16XG4 */
|
|
+ SWITCHTEC_PCI100X_DEVICE(0x1002, SWITCHTEC_GEN4), /* PCI1002 12XG4 */
|
|
+ SWITCHTEC_PCI100X_DEVICE(0x1003, SWITCHTEC_GEN4), /* PCI1003 16XG4 */
|
|
+ SWITCHTEC_PCI100X_DEVICE(0x1004, SWITCHTEC_GEN4), /* PCI1004 16XG4 */
|
|
+ SWITCHTEC_PCI100X_DEVICE(0x1005, SWITCHTEC_GEN4), /* PCI1005 16XG4 */
|
|
+ SWITCHTEC_PCI100X_DEVICE(0x1006, SWITCHTEC_GEN4), /* PCI1006 16XG4 */
|
|
{0}
|
|
};
|
|
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
|
|
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
|
|
index f2b9db66fdb6a4..d2488d80912c9f 100644
|
|
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
|
|
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
|
|
@@ -1281,7 +1281,7 @@ static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq)
|
|
|
|
ret = devm_request_threaded_irq(chip->dev, irq,
|
|
NULL, cy8c95x0_irq_handler,
|
|
- IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH,
|
|
+ IRQF_ONESHOT | IRQF_SHARED,
|
|
dev_name(chip->dev), chip);
|
|
if (ret) {
|
|
dev_err(chip->dev, "failed to request irq %d\n", irq);
|
|
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
|
|
index e94d46372a6396..402cf939c03263 100644
|
|
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
|
|
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
|
|
@@ -646,15 +646,20 @@ static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
|
|
};
|
|
|
|
static const struct nvmem_keepout tegra234_fuse_keepouts[] = {
|
|
- { .start = 0x01c, .end = 0x0c8 },
|
|
- { .start = 0x12c, .end = 0x184 },
|
|
+ { .start = 0x01c, .end = 0x064 },
|
|
+ { .start = 0x084, .end = 0x0a0 },
|
|
+ { .start = 0x0a4, .end = 0x0c8 },
|
|
+ { .start = 0x12c, .end = 0x164 },
|
|
+ { .start = 0x16c, .end = 0x184 },
|
|
{ .start = 0x190, .end = 0x198 },
|
|
{ .start = 0x1a0, .end = 0x204 },
|
|
- { .start = 0x21c, .end = 0x250 },
|
|
- { .start = 0x25c, .end = 0x2f0 },
|
|
+ { .start = 0x21c, .end = 0x2f0 },
|
|
{ .start = 0x310, .end = 0x3d8 },
|
|
- { .start = 0x400, .end = 0x4f0 },
|
|
- { .start = 0x4f8, .end = 0x7e8 },
|
|
+ { .start = 0x400, .end = 0x420 },
|
|
+ { .start = 0x444, .end = 0x490 },
|
|
+ { .start = 0x4bc, .end = 0x4f0 },
|
|
+ { .start = 0x4f8, .end = 0x54c },
|
|
+ { .start = 0x57c, .end = 0x7e8 },
|
|
{ .start = 0x8d0, .end = 0x8d8 },
|
|
{ .start = 0xacc, .end = 0xf00 }
|
|
};
|
|
diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
|
|
index a7c3b3923b4af7..fd8c8eb37d01d6 100644
|
|
--- a/drivers/spi/spi-sn-f-ospi.c
|
|
+++ b/drivers/spi/spi-sn-f-ospi.c
|
|
@@ -116,6 +116,9 @@ struct f_ospi {
|
|
|
|
static u32 f_ospi_get_dummy_cycle(const struct spi_mem_op *op)
|
|
{
|
|
+ if (!op->dummy.nbytes)
|
|
+ return 0;
|
|
+
|
|
return (op->dummy.nbytes * 8) / op->dummy.buswidth;
|
|
}
|
|
|
|
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
|
|
index 1aa3e55c8b47da..f76c9ecc51bcd6 100644
|
|
--- a/drivers/tty/serial/8250/8250.h
|
|
+++ b/drivers/tty/serial/8250/8250.h
|
|
@@ -350,6 +350,7 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
|
|
|
|
#ifdef CONFIG_SERIAL_8250_DMA
|
|
extern int serial8250_tx_dma(struct uart_8250_port *);
|
|
+extern void serial8250_tx_dma_flush(struct uart_8250_port *);
|
|
extern int serial8250_rx_dma(struct uart_8250_port *);
|
|
extern void serial8250_rx_dma_flush(struct uart_8250_port *);
|
|
extern int serial8250_request_dma(struct uart_8250_port *);
|
|
@@ -382,6 +383,7 @@ static inline int serial8250_tx_dma(struct uart_8250_port *p)
|
|
{
|
|
return -1;
|
|
}
|
|
+static inline void serial8250_tx_dma_flush(struct uart_8250_port *p) { }
|
|
static inline int serial8250_rx_dma(struct uart_8250_port *p)
|
|
{
|
|
return -1;
|
|
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
|
|
index 7fa66501792dd8..7f23037813bc85 100644
|
|
--- a/drivers/tty/serial/8250/8250_dma.c
|
|
+++ b/drivers/tty/serial/8250/8250_dma.c
|
|
@@ -139,6 +139,22 @@ int serial8250_tx_dma(struct uart_8250_port *p)
|
|
return ret;
|
|
}
|
|
|
|
+void serial8250_tx_dma_flush(struct uart_8250_port *p)
|
|
+{
|
|
+ struct uart_8250_dma *dma = p->dma;
|
|
+
|
|
+ if (!dma->tx_running)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * kfifo_reset() has been called by the serial core, avoid
|
|
+ * advancing and underflowing in __dma_tx_complete().
|
|
+ */
|
|
+ dma->tx_size = 0;
|
|
+
|
|
+ dmaengine_terminate_async(dma->rxchan);
|
|
+}
|
|
+
|
|
int serial8250_rx_dma(struct uart_8250_port *p)
|
|
{
|
|
struct uart_8250_dma *dma = p->dma;
|
|
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
|
|
index 2b1b2928ef7b7c..c2778300e15100 100644
|
|
--- a/drivers/tty/serial/8250/8250_port.c
|
|
+++ b/drivers/tty/serial/8250/8250_port.c
|
|
@@ -2557,6 +2557,14 @@ static unsigned int npcm_get_divisor(struct uart_8250_port *up,
|
|
return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2;
|
|
}
|
|
|
|
+static void serial8250_flush_buffer(struct uart_port *port)
|
|
+{
|
|
+ struct uart_8250_port *up = up_to_u8250p(port);
|
|
+
|
|
+ if (up->dma)
|
|
+ serial8250_tx_dma_flush(up);
|
|
+}
|
|
+
|
|
static unsigned int serial8250_do_get_divisor(struct uart_port *port,
|
|
unsigned int baud,
|
|
unsigned int *frac)
|
|
@@ -3260,6 +3268,7 @@ static const struct uart_ops serial8250_pops = {
|
|
.break_ctl = serial8250_break_ctl,
|
|
.startup = serial8250_startup,
|
|
.shutdown = serial8250_shutdown,
|
|
+ .flush_buffer = serial8250_flush_buffer,
|
|
.set_termios = serial8250_set_termios,
|
|
.set_ldisc = serial8250_set_ldisc,
|
|
.pm = serial8250_pm,
|
|
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
|
|
index 469ad26cde4870..a21c287077039d 100644
|
|
--- a/drivers/tty/serial/serial_port.c
|
|
+++ b/drivers/tty/serial/serial_port.c
|
|
@@ -172,6 +172,7 @@ EXPORT_SYMBOL(uart_remove_one_port);
|
|
* The caller is responsible to initialize the following fields of the @port
|
|
* ->dev (must be valid)
|
|
* ->flags
|
|
+ * ->iobase
|
|
* ->mapbase
|
|
* ->mapsize
|
|
* ->regshift (if @use_defaults is false)
|
|
@@ -213,7 +214,7 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
|
|
/* Read the registers I/O access type (default: MMIO 8-bit) */
|
|
ret = device_property_read_u32(dev, "reg-io-width", &value);
|
|
if (ret) {
|
|
- port->iotype = UPIO_MEM;
|
|
+ port->iotype = port->iobase ? UPIO_PORT : UPIO_MEM;
|
|
} else {
|
|
switch (value) {
|
|
case 1:
|
|
@@ -226,11 +227,11 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
|
|
port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
|
|
break;
|
|
default:
|
|
+ port->iotype = UPIO_UNKNOWN;
|
|
if (!use_defaults) {
|
|
dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
|
|
return -EINVAL;
|
|
}
|
|
- port->iotype = UPIO_UNKNOWN;
|
|
break;
|
|
}
|
|
}
|
|
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
|
|
index f21423a7a6d7db..8fbd46cd8c2b8e 100644
|
|
--- a/drivers/ufs/core/ufs_bsg.c
|
|
+++ b/drivers/ufs/core/ufs_bsg.c
|
|
@@ -216,6 +216,7 @@ void ufs_bsg_remove(struct ufs_hba *hba)
|
|
return;
|
|
|
|
bsg_remove_queue(hba->bsg_queue);
|
|
+ hba->bsg_queue = NULL;
|
|
|
|
device_del(bsg_dev);
|
|
put_device(bsg_dev);
|
|
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
|
|
index 605fea4611029b..c1d7d87b32cc5a 100644
|
|
--- a/drivers/usb/class/cdc-acm.c
|
|
+++ b/drivers/usb/class/cdc-acm.c
|
|
@@ -371,7 +371,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
|
|
static void acm_ctrl_irq(struct urb *urb)
|
|
{
|
|
struct acm *acm = urb->context;
|
|
- struct usb_cdc_notification *dr = urb->transfer_buffer;
|
|
+ struct usb_cdc_notification *dr;
|
|
unsigned int current_size = urb->actual_length;
|
|
unsigned int expected_size, copy_size, alloc_size;
|
|
int retval;
|
|
@@ -398,14 +398,25 @@ static void acm_ctrl_irq(struct urb *urb)
|
|
|
|
usb_mark_last_busy(acm->dev);
|
|
|
|
- if (acm->nb_index)
|
|
+ if (acm->nb_index == 0) {
|
|
+ /*
|
|
+ * The first chunk of a message must contain at least the
|
|
+ * notification header with the length field, otherwise we
|
|
+ * can't get an expected_size.
|
|
+ */
|
|
+ if (current_size < sizeof(struct usb_cdc_notification)) {
|
|
+ dev_dbg(&acm->control->dev, "urb too short\n");
|
|
+ goto exit;
|
|
+ }
|
|
+ dr = urb->transfer_buffer;
|
|
+ } else {
|
|
dr = (struct usb_cdc_notification *)acm->notification_buffer;
|
|
-
|
|
+ }
|
|
/* size = notification-header + (optional) data */
|
|
expected_size = sizeof(struct usb_cdc_notification) +
|
|
le16_to_cpu(dr->wLength);
|
|
|
|
- if (current_size < expected_size) {
|
|
+ if (acm->nb_index != 0 || current_size < expected_size) {
|
|
/* notification is transmitted fragmented, reassemble */
|
|
if (acm->nb_size < expected_size) {
|
|
u8 *new_buffer;
|
|
@@ -1727,13 +1738,16 @@ static const struct usb_device_id acm_ids[] = {
|
|
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
|
|
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
|
|
},
|
|
- { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */
|
|
+ { USB_DEVICE(0x045b, 0x023c), /* Renesas R-Car H3 USB Download mode */
|
|
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */
|
|
+ },
|
|
+ { USB_DEVICE(0x045b, 0x0247), /* Renesas R-Car D3 USB Download mode */
|
|
.driver_info = DISABLE_ECHO, /* Don't echo banner */
|
|
},
|
|
- { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */
|
|
+ { USB_DEVICE(0x045b, 0x0248), /* Renesas R-Car M3-N USB Download mode */
|
|
.driver_info = DISABLE_ECHO, /* Don't echo banner */
|
|
},
|
|
- { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */
|
|
+ { USB_DEVICE(0x045b, 0x024D), /* Renesas R-Car E3 USB Download mode */
|
|
.driver_info = DISABLE_ECHO, /* Don't echo banner */
|
|
},
|
|
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
|
|
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
|
|
index 0944cfae8b5567..38f3f5a766dfdf 100644
|
|
--- a/drivers/usb/core/hub.c
|
|
+++ b/drivers/usb/core/hub.c
|
|
@@ -1818,6 +1818,17 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
|
desc = intf->cur_altsetting;
|
|
hdev = interface_to_usbdev(intf);
|
|
|
|
+ /*
|
|
+ * The USB 2.0 spec prohibits hubs from having more than one
|
|
+ * configuration or interface, and we rely on this prohibition.
|
|
+ * Refuse to accept a device that violates it.
|
|
+ */
|
|
+ if (hdev->descriptor.bNumConfigurations > 1 ||
|
|
+ hdev->actconfig->desc.bNumInterfaces > 1) {
|
|
+ dev_err(&intf->dev, "Invalid hub with more than one config or interface\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
/*
|
|
* Set default autosuspend delay as 0 to speedup bus suspend,
|
|
* based on the below considerations:
|
|
@@ -4666,7 +4677,6 @@ void usb_ep0_reinit(struct usb_device *udev)
|
|
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
|
|
|
|
#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
|
|
-#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
|
|
|
|
static int hub_set_address(struct usb_device *udev, int devnum)
|
|
{
|
|
@@ -4772,7 +4782,7 @@ static int get_bMaxPacketSize0(struct usb_device *udev,
|
|
for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
|
|
/* Start with invalid values in case the transfer fails */
|
|
buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
|
|
- rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
|
|
+ rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
|
|
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
|
|
USB_DT_DEVICE << 8, 0,
|
|
buf, size,
|
|
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
|
|
index 13171454f9591a..027479179f09e9 100644
|
|
--- a/drivers/usb/core/quirks.c
|
|
+++ b/drivers/usb/core/quirks.c
|
|
@@ -432,6 +432,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|
{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
|
|
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
|
|
|
|
+ /* Sony Xperia XZ1 Compact (lilac) smartphone in fastboot mode */
|
|
+ { USB_DEVICE(0x0fce, 0x0dde), .driver_info = USB_QUIRK_NO_LPM },
|
|
+
|
|
/* Action Semiconductor flash disk */
|
|
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
|
|
USB_QUIRK_STRING_FETCH_255 },
|
|
@@ -522,6 +525,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|
/* Blackmagic Design UltraStudio SDI */
|
|
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
|
|
|
|
+ /* Teclast disk */
|
|
+ { USB_DEVICE(0x1f75, 0x0917), .driver_info = USB_QUIRK_NO_LPM },
|
|
+
|
|
/* Hauppauge HVR-950q */
|
|
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
|
|
USB_QUIRK_CONFIG_INTF_STRINGS },
|
|
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
|
|
index b26de09f6b6d5e..ce20c06a902531 100644
|
|
--- a/drivers/usb/dwc2/gadget.c
|
|
+++ b/drivers/usb/dwc2/gadget.c
|
|
@@ -4612,6 +4612,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
|
|
spin_lock_irqsave(&hsotg->lock, flags);
|
|
|
|
hsotg->driver = NULL;
|
|
+ hsotg->gadget.dev.of_node = NULL;
|
|
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
|
|
hsotg->enabled = 0;
|
|
|
|
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
|
index 9b8099cba41429..f6d9a9c67db4e2 100644
|
|
--- a/drivers/usb/dwc3/gadget.c
|
|
+++ b/drivers/usb/dwc3/gadget.c
|
|
@@ -2618,10 +2618,38 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
|
|
{
|
|
u32 reg;
|
|
u32 timeout = 2000;
|
|
+ u32 saved_config = 0;
|
|
|
|
if (pm_runtime_suspended(dwc->dev))
|
|
return 0;
|
|
|
|
+ /*
|
|
+ * When operating in USB 2.0 speeds (HS/FS), ensure that
|
|
+ * GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY are cleared before starting
|
|
+ * or stopping the controller. This resolves timeout issues that occur
|
|
+ * during frequent role switches between host and device modes.
|
|
+ *
|
|
+ * Save and clear these settings, then restore them after completing the
|
|
+ * controller start or stop sequence.
|
|
+ *
|
|
+ * This solution was discovered through experimentation as it is not
|
|
+ * mentioned in the dwc3 programming guide. It has been tested on an
|
|
+ * Exynos platforms.
|
|
+ */
|
|
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
|
|
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
|
|
+ saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
|
|
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
|
|
+ }
|
|
+
|
|
+ if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
|
|
+ saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
|
|
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
|
|
+ }
|
|
+
|
|
+ if (saved_config)
|
|
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
|
|
+
|
|
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
|
|
if (is_on) {
|
|
if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
|
|
@@ -2649,6 +2677,12 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
|
|
reg &= DWC3_DSTS_DEVCTRLHLT;
|
|
} while (--timeout && !(!is_on ^ !reg));
|
|
|
|
+ if (saved_config) {
|
|
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
|
|
+ reg |= saved_config;
|
|
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
|
|
+ }
|
|
+
|
|
if (!timeout)
|
|
return -ETIMEDOUT;
|
|
|
|
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
|
|
index 2d02f25f959791..49946af11a9058 100644
|
|
--- a/drivers/usb/gadget/function/f_midi.c
|
|
+++ b/drivers/usb/gadget/function/f_midi.c
|
|
@@ -906,6 +906,15 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
|
|
|
|
status = -ENODEV;
|
|
|
|
+ /*
|
|
+ * Reset wMaxPacketSize with maximum packet size of FS bulk transfer before
|
|
+ * endpoint claim. This ensures that the wMaxPacketSize does not exceed the
|
|
+ * limit during bind retries where configured dwc3 TX/RX FIFO's maxpacket
|
|
+ * size of 512 bytes for IN/OUT endpoints in support HS speed only.
|
|
+ */
|
|
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(64);
|
|
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(64);
|
|
+
|
|
/* allocate instance-specific endpoints */
|
|
midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc);
|
|
if (!midi->in_ep)
|
|
@@ -999,11 +1008,11 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
|
|
}
|
|
|
|
/* configure the endpoint descriptors ... */
|
|
- ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
|
|
- ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
|
|
+ ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
|
|
+ ms_out_desc.bNumEmbMIDIJack = midi->out_ports;
|
|
|
|
- ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
|
|
- ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
|
|
+ ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
|
|
+ ms_in_desc.bNumEmbMIDIJack = midi->in_ports;
|
|
|
|
/* ... and add them to the list */
|
|
endpoint_descriptor_index = i;
|
|
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
|
|
index 3b01734ce1b7e5..a93ad93390ba17 100644
|
|
--- a/drivers/usb/gadget/udc/renesas_usb3.c
|
|
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
|
|
@@ -310,7 +310,7 @@ struct renesas_usb3_request {
|
|
struct list_head queue;
|
|
};
|
|
|
|
-#define USB3_EP_NAME_SIZE 8
|
|
+#define USB3_EP_NAME_SIZE 16
|
|
struct renesas_usb3_ep {
|
|
struct usb_ep ep;
|
|
struct renesas_usb3 *usb3;
|
|
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
|
|
index 2665832f9addff..b96d9062a0837a 100644
|
|
--- a/drivers/usb/host/pci-quirks.c
|
|
+++ b/drivers/usb/host/pci-quirks.c
|
|
@@ -946,6 +946,15 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
|
|
* booting from USB disk or using a usb keyboard
|
|
*/
|
|
hcc_params = readl(base + EHCI_HCC_PARAMS);
|
|
+
|
|
+ /* LS7A EHCI controller doesn't have extended capabilities, the
|
|
+ * EECP (EHCI Extended Capabilities Pointer) field of HCCPARAMS
|
|
+ * register should be 0x0 but it reads as 0xa0. So clear it to
|
|
+ * avoid error messages on boot.
|
|
+ */
|
|
+ if (pdev->vendor == PCI_VENDOR_ID_LOONGSON && pdev->device == 0x7a14)
|
|
+ hcc_params &= ~(0xffL << 8);
|
|
+
|
|
offset = (hcc_params >> 8) & 0xff;
|
|
while (offset && --count) {
|
|
pci_read_config_dword(pdev, offset, &cap);
|
|
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
|
|
index 70165dd86b5de9..8664449ca2ff8a 100644
|
|
--- a/drivers/usb/roles/class.c
|
|
+++ b/drivers/usb/roles/class.c
|
|
@@ -355,14 +355,15 @@ usb_role_switch_register(struct device *parent,
|
|
dev_set_name(&sw->dev, "%s-role-switch",
|
|
desc->name ? desc->name : dev_name(parent));
|
|
|
|
+ sw->registered = true;
|
|
+
|
|
ret = device_register(&sw->dev);
|
|
if (ret) {
|
|
+ sw->registered = false;
|
|
put_device(&sw->dev);
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
- sw->registered = true;
|
|
-
|
|
/* TODO: Symlinks for the host port and the device controller. */
|
|
|
|
return sw;
|
|
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
|
|
index 86ac20e2874bab..37ff48702e43e1 100644
|
|
--- a/drivers/usb/serial/option.c
|
|
+++ b/drivers/usb/serial/option.c
|
|
@@ -619,15 +619,6 @@ static void option_instat_callback(struct urb *urb);
|
|
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
|
|
#define LUAT_PRODUCT_AIR720U 0x4e00
|
|
|
|
-/* MeiG Smart Technology products */
|
|
-#define MEIGSMART_VENDOR_ID 0x2dee
|
|
-/* MeiG Smart SRM815/SRM825L based on Qualcomm 315 */
|
|
-#define MEIGSMART_PRODUCT_SRM825L 0x4d22
|
|
-/* MeiG Smart SLM320 based on UNISOC UIS8910 */
|
|
-#define MEIGSMART_PRODUCT_SLM320 0x4d41
|
|
-/* MeiG Smart SLM770A based on ASR1803 */
|
|
-#define MEIGSMART_PRODUCT_SLM770A 0x4d57
|
|
-
|
|
/* Device flags */
|
|
|
|
/* Highest interface number which can be used with NCTRL() and RSVD() */
|
|
@@ -1367,15 +1358,15 @@ static const struct usb_device_id option_ids[] = {
|
|
.driver_info = NCTRL(2) | RSVD(3) },
|
|
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
|
|
.driver_info = NCTRL(0) | RSVD(1) },
|
|
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */
|
|
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990A (rmnet) */
|
|
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
|
|
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */
|
|
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990A (MBIM) */
|
|
.driver_info = NCTRL(0) | RSVD(1) },
|
|
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */
|
|
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990A (RNDIS) */
|
|
.driver_info = NCTRL(2) | RSVD(3) },
|
|
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
|
|
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990A (ECM) */
|
|
.driver_info = NCTRL(0) | RSVD(1) },
|
|
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
|
|
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */
|
|
.driver_info = RSVD(0) },
|
|
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
|
|
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
|
|
@@ -1403,6 +1394,22 @@ static const struct usb_device_id option_ids[] = {
|
|
.driver_info = RSVD(0) | NCTRL(3) },
|
|
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */
|
|
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) }, /* Telit FN990B (rmnet) */
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) },
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30),
|
|
+ .driver_info = NCTRL(5) },
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) }, /* Telit FN990B (MBIM) */
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) },
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30),
|
|
+ .driver_info = NCTRL(6) },
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) }, /* Telit FN990B (RNDIS) */
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) },
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30),
|
|
+ .driver_info = NCTRL(6) },
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) }, /* Telit FN990B (ECM) */
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) },
|
|
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30),
|
|
+ .driver_info = NCTRL(6) },
|
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
|
|
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
|
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
|
|
@@ -2347,6 +2354,14 @@ static const struct usb_device_id option_ids[] = {
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d41, 0xff, 0, 0) }, /* MeiG Smart SLM320 */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d57, 0xff, 0, 0) }, /* MeiG Smart SLM770A */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0, 0) }, /* MeiG Smart SRM815 */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x02) }, /* MeiG Smart SLM828 */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x03) }, /* MeiG Smart SLM828 */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x30) }, /* MeiG Smart SRM815 and SRM825L */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x40) }, /* MeiG Smart SRM825L */
|
|
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x60) }, /* MeiG Smart SRM825L */
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
|
|
@@ -2403,12 +2418,6 @@ static const struct usb_device_id option_ids[] = {
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0, 0) },
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
|
|
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */
|
|
.driver_info = NCTRL(1) },
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff), /* TCL IK512 ECM */
|
|
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
|
|
index e27de61ac9fe75..8191c8fcfb2565 100644
|
|
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
|
|
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
|
|
@@ -16,6 +16,7 @@
|
|
#include <linux/io.h>
|
|
#include <linux/vfio.h>
|
|
#include <linux/vgaarb.h>
|
|
+#include <linux/io-64-nonatomic-lo-hi.h>
|
|
|
|
#include "vfio_pci_priv.h"
|
|
|
|
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
|
|
index d63c2d266d0735..3bf1043cd7957c 100644
|
|
--- a/drivers/vfio/platform/vfio_platform_common.c
|
|
+++ b/drivers/vfio/platform/vfio_platform_common.c
|
|
@@ -393,11 +393,6 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
|
|
|
|
count = min_t(size_t, count, reg->size - off);
|
|
|
|
- if (off >= reg->size)
|
|
- return -EINVAL;
|
|
-
|
|
- count = min_t(size_t, count, reg->size - off);
|
|
-
|
|
if (!reg->ioaddr) {
|
|
reg->ioaddr =
|
|
ioremap(reg->addr, reg->size);
|
|
@@ -482,11 +477,6 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
|
|
|
|
count = min_t(size_t, count, reg->size - off);
|
|
|
|
- if (off >= reg->size)
|
|
- return -EINVAL;
|
|
-
|
|
- count = min_t(size_t, count, reg->size - off);
|
|
-
|
|
if (!reg->ioaddr) {
|
|
reg->ioaddr =
|
|
ioremap(reg->addr, reg->size);
|
|
diff --git a/drivers/video/fbdev/omap/lcd_dma.c b/drivers/video/fbdev/omap/lcd_dma.c
|
|
index f85817635a8c2c..0da23c57e4757e 100644
|
|
--- a/drivers/video/fbdev/omap/lcd_dma.c
|
|
+++ b/drivers/video/fbdev/omap/lcd_dma.c
|
|
@@ -432,8 +432,8 @@ static int __init omap_init_lcd_dma(void)
|
|
|
|
spin_lock_init(&lcd_dma.lock);
|
|
|
|
- r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
|
|
- "LCD DMA", NULL);
|
|
+ r = request_threaded_irq(INT_DMA_LCD, NULL, lcd_dma_irq_handler,
|
|
+ IRQF_ONESHOT, "LCD DMA", NULL);
|
|
if (r != 0)
|
|
pr_err("unable to request IRQ for LCD DMA (error %d)\n", r);
|
|
|
|
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
|
|
index 6d0d1c8a508bf9..b6e54ab3b6f3bb 100644
|
|
--- a/drivers/xen/swiotlb-xen.c
|
|
+++ b/drivers/xen/swiotlb-xen.c
|
|
@@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
|
|
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
|
|
}
|
|
|
|
+static inline bool range_requires_alignment(phys_addr_t p, size_t size)
|
|
+{
|
|
+ phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
|
|
+ phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
|
|
+
|
|
+ return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
|
|
+}
|
|
+
|
|
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
|
|
{
|
|
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
|
|
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
|
|
- phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
|
|
|
|
next_bfn = pfn_to_bfn(xen_pfn);
|
|
|
|
- /* If buffer is physically aligned, ensure DMA alignment. */
|
|
- if (IS_ALIGNED(p, algn) &&
|
|
- !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
|
|
- return 1;
|
|
-
|
|
for (i = 1; i < nr_pages; i++)
|
|
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
|
|
return 1;
|
|
@@ -155,7 +157,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
|
|
|
|
*dma_handle = xen_phys_to_dma(dev, phys);
|
|
if (*dma_handle + size - 1 > dma_mask ||
|
|
- range_straddles_page_boundary(phys, size)) {
|
|
+ range_straddles_page_boundary(phys, size) ||
|
|
+ range_requires_alignment(phys, size)) {
|
|
if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
|
|
dma_handle) != 0)
|
|
goto out_free_pages;
|
|
@@ -181,7 +184,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|
size = ALIGN(size, XEN_PAGE_SIZE);
|
|
|
|
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
|
|
- WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
|
|
+ WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
|
|
+ range_requires_alignment(phys, size)))
|
|
return;
|
|
|
|
if (TestClearPageXenRemapped(virt_to_page(vaddr)))
|
|
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
|
|
index c2d0c62b087c22..68092b64e29eac 100644
|
|
--- a/fs/btrfs/file.c
|
|
+++ b/fs/btrfs/file.c
|
|
@@ -1134,7 +1134,6 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
|
|
loff_t pos = iocb->ki_pos;
|
|
int ret;
|
|
loff_t oldsize;
|
|
- loff_t start_pos;
|
|
|
|
/*
|
|
* Quickly bail out on NOWAIT writes if we don't have the nodatacow or
|
|
@@ -1158,9 +1157,8 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
|
|
*/
|
|
update_time_for_write(inode);
|
|
|
|
- start_pos = round_down(pos, fs_info->sectorsize);
|
|
oldsize = i_size_read(inode);
|
|
- if (start_pos > oldsize) {
|
|
+ if (pos > oldsize) {
|
|
/* Expand hole size to cover write data, preventing empty gap */
|
|
loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
|
|
|
|
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
|
|
index bf378ecd5d9fdd..7b59a40d40c061 100644
|
|
--- a/fs/nfs/sysfs.c
|
|
+++ b/fs/nfs/sysfs.c
|
|
@@ -280,9 +280,9 @@ void nfs_sysfs_link_rpc_client(struct nfs_server *server,
|
|
char name[RPC_CLIENT_NAME_SIZE];
|
|
int ret;
|
|
|
|
- strcpy(name, clnt->cl_program->name);
|
|
- strcat(name, uniq ? uniq : "");
|
|
- strcat(name, "_client");
|
|
+ strscpy(name, clnt->cl_program->name, sizeof(name));
|
|
+ strncat(name, uniq ? uniq : "", sizeof(name) - strlen(name) - 1);
|
|
+ strncat(name, "_client", sizeof(name) - strlen(name) - 1);
|
|
|
|
ret = sysfs_create_link_nowarn(&server->kobj,
|
|
&clnt->cl_sysfs->kobject, name);
|
|
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
|
|
index 12b2b9bc07bfe3..f1f32ad4f42ca1 100644
|
|
--- a/fs/nfsd/nfs2acl.c
|
|
+++ b/fs/nfsd/nfs2acl.c
|
|
@@ -84,6 +84,8 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst *rqstp)
|
|
fail:
|
|
posix_acl_release(resp->acl_access);
|
|
posix_acl_release(resp->acl_default);
|
|
+ resp->acl_access = NULL;
|
|
+ resp->acl_default = NULL;
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
|
|
index 73adca47d37398..d7af28f445044e 100644
|
|
--- a/fs/nfsd/nfs3acl.c
|
|
+++ b/fs/nfsd/nfs3acl.c
|
|
@@ -76,6 +76,8 @@ static __be32 nfsd3_proc_getacl(struct svc_rqst *rqstp)
|
|
fail:
|
|
posix_acl_release(resp->acl_access);
|
|
posix_acl_release(resp->acl_default);
|
|
+ resp->acl_access = NULL;
|
|
+ resp->acl_default = NULL;
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
|
|
index 875ea311ca3c20..d47173d98eef71 100644
|
|
--- a/fs/nfsd/nfs4callback.c
|
|
+++ b/fs/nfsd/nfs4callback.c
|
|
@@ -1410,8 +1410,11 @@ nfsd4_run_cb_work(struct work_struct *work)
|
|
nfsd4_process_cb_update(cb);
|
|
|
|
clnt = clp->cl_cb_client;
|
|
- if (!clnt) {
|
|
- /* Callback channel broken, or client killed; give up: */
|
|
+ if (!clnt || clp->cl_state == NFSD4_COURTESY) {
|
|
+ /*
|
|
+ * Callback channel broken, client killed or
|
|
+ * nfs4_client in courtesy state; give up.
|
|
+ */
|
|
nfsd41_destroy_cb(cb);
|
|
return;
|
|
}
|
|
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
|
|
index 1b508f5433846e..fa41db08848802 100644
|
|
--- a/fs/orangefs/orangefs-debugfs.c
|
|
+++ b/fs/orangefs/orangefs-debugfs.c
|
|
@@ -393,9 +393,9 @@ static ssize_t orangefs_debug_write(struct file *file,
|
|
* Thwart users who try to jamb a ridiculous number
|
|
* of bytes into the debug file...
|
|
*/
|
|
- if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) {
|
|
+ if (count > ORANGEFS_MAX_DEBUG_STRING_LEN) {
|
|
silly = count;
|
|
- count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1;
|
|
+ count = ORANGEFS_MAX_DEBUG_STRING_LEN;
|
|
}
|
|
|
|
buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
|
|
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
|
|
index 958ed7e89b301e..1d482c2aabbdfa 100644
|
|
--- a/include/linux/blk-mq.h
|
|
+++ b/include/linux/blk-mq.h
|
|
@@ -849,12 +849,22 @@ static inline bool blk_mq_add_to_batch(struct request *req,
|
|
void (*complete)(struct io_comp_batch *))
|
|
{
|
|
/*
|
|
- * blk_mq_end_request_batch() can't end request allocated from
|
|
- * sched tags
|
|
+ * Check various conditions that exclude batch processing:
|
|
+ * 1) No batch container
|
|
+ * 2) Has scheduler data attached
|
|
+ * 3) Not a passthrough request and end_io set
|
|
+ * 4) Not a passthrough request and an ioerror
|
|
*/
|
|
- if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
|
|
- (req->end_io && !blk_rq_is_passthrough(req)))
|
|
+ if (!iob)
|
|
return false;
|
|
+ if (req->rq_flags & RQF_SCHED_TAGS)
|
|
+ return false;
|
|
+ if (!blk_rq_is_passthrough(req)) {
|
|
+ if (req->end_io)
|
|
+ return false;
|
|
+ if (ioerror < 0)
|
|
+ return false;
|
|
+ }
|
|
|
|
if (!iob->complete)
|
|
iob->complete = complete;
|
|
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
|
|
index 6eefe5153a6ff7..c0c2b26725d0fc 100644
|
|
--- a/include/linux/cgroup-defs.h
|
|
+++ b/include/linux/cgroup-defs.h
|
|
@@ -71,9 +71,6 @@ enum {
|
|
|
|
/* Cgroup is frozen. */
|
|
CGRP_FROZEN,
|
|
-
|
|
- /* Control group has to be killed. */
|
|
- CGRP_KILL,
|
|
};
|
|
|
|
/* cgroup_root->flags */
|
|
@@ -438,6 +435,9 @@ struct cgroup {
|
|
|
|
int nr_threaded_children; /* # of live threaded child cgroups */
|
|
|
|
+ /* sequence number for cgroup.kill, serialized by css_set_lock. */
|
|
+ unsigned int kill_seq;
|
|
+
|
|
struct kernfs_node *kn; /* cgroup kernfs entry */
|
|
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
|
|
struct cgroup_file events_file; /* handle for "cgroup.events" */
|
|
diff --git a/include/linux/efi.h b/include/linux/efi.h
|
|
index 80b21d1c6eafaf..7db1c0759c0969 100644
|
|
--- a/include/linux/efi.h
|
|
+++ b/include/linux/efi.h
|
|
@@ -127,6 +127,7 @@ typedef struct {
|
|
#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
|
|
#define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */
|
|
#define EFI_MEMORY_CPU_CRYPTO ((u64)0x0000000000080000ULL) /* supports encryption */
|
|
+#define EFI_MEMORY_HOT_PLUGGABLE BIT_ULL(20) /* supports unplugging at runtime */
|
|
#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
|
|
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
|
|
|
|
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
|
|
index 8336b2f6f83462..bf169cfef7f12d 100644
|
|
--- a/include/linux/i8253.h
|
|
+++ b/include/linux/i8253.h
|
|
@@ -24,6 +24,7 @@ extern raw_spinlock_t i8253_lock;
|
|
extern bool i8253_clear_counter_on_shutdown;
|
|
extern struct clock_event_device i8253_clockevent;
|
|
extern void clockevent_i8253_init(bool oneshot);
|
|
+extern void clockevent_i8253_disable(void);
|
|
|
|
extern void setup_pit_timer(void);
|
|
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index 8b5121eb8757ef..95ee88dfe0b9c6 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -2593,6 +2593,12 @@ struct net *dev_net(const struct net_device *dev)
|
|
return read_pnet(&dev->nd_net);
|
|
}
|
|
|
|
+static inline
|
|
+struct net *dev_net_rcu(const struct net_device *dev)
|
|
+{
|
|
+ return read_pnet_rcu(&dev->nd_net);
|
|
+}
|
|
+
|
|
static inline
|
|
void dev_net_set(struct net_device *dev, struct net *net)
|
|
{
|
|
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
|
|
index a23af225c89839..8dbecab4c4000f 100644
|
|
--- a/include/linux/sched/task.h
|
|
+++ b/include/linux/sched/task.h
|
|
@@ -41,6 +41,7 @@ struct kernel_clone_args {
|
|
void *fn_arg;
|
|
struct cgroup *cgrp;
|
|
struct css_set *cset;
|
|
+ unsigned int kill_seq;
|
|
};
|
|
|
|
/*
|
|
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
|
|
index 031c661aa14df7..bdfa9d414360c7 100644
|
|
--- a/include/net/l3mdev.h
|
|
+++ b/include/net/l3mdev.h
|
|
@@ -198,10 +198,12 @@ struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
|
|
if (netif_is_l3_slave(dev)) {
|
|
struct net_device *master;
|
|
|
|
+ rcu_read_lock();
|
|
master = netdev_master_upper_dev_get_rcu(dev);
|
|
if (master && master->l3mdev_ops->l3mdev_l3_out)
|
|
skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
|
|
skb, proto);
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
return skb;
|
|
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
|
|
index 1befad79a67349..ce3f84c6eb8eb3 100644
|
|
--- a/include/net/net_namespace.h
|
|
+++ b/include/net/net_namespace.h
|
|
@@ -369,21 +369,30 @@ static inline void put_net_track(struct net *net, netns_tracker *tracker)
|
|
|
|
typedef struct {
|
|
#ifdef CONFIG_NET_NS
|
|
- struct net *net;
|
|
+ struct net __rcu *net;
|
|
#endif
|
|
} possible_net_t;
|
|
|
|
static inline void write_pnet(possible_net_t *pnet, struct net *net)
|
|
{
|
|
#ifdef CONFIG_NET_NS
|
|
- pnet->net = net;
|
|
+ rcu_assign_pointer(pnet->net, net);
|
|
#endif
|
|
}
|
|
|
|
static inline struct net *read_pnet(const possible_net_t *pnet)
|
|
{
|
|
#ifdef CONFIG_NET_NS
|
|
- return pnet->net;
|
|
+ return rcu_dereference_protected(pnet->net, true);
|
|
+#else
|
|
+ return &init_net;
|
|
+#endif
|
|
+}
|
|
+
|
|
+static inline struct net *read_pnet_rcu(const possible_net_t *pnet)
|
|
+{
|
|
+#ifdef CONFIG_NET_NS
|
|
+ return rcu_dereference(pnet->net);
|
|
#else
|
|
return &init_net;
|
|
#endif
|
|
diff --git a/include/net/route.h b/include/net/route.h
|
|
index 51a45b1887b562..0171e9e1bbea3d 100644
|
|
--- a/include/net/route.h
|
|
+++ b/include/net/route.h
|
|
@@ -357,10 +357,15 @@ static inline int inet_iif(const struct sk_buff *skb)
|
|
static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
|
|
{
|
|
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
|
|
- struct net *net = dev_net(dst->dev);
|
|
|
|
- if (hoplimit == 0)
|
|
+ if (hoplimit == 0) {
|
|
+ const struct net *net;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ net = dev_net_rcu(dst->dev);
|
|
hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
|
|
+ rcu_read_unlock();
|
|
+ }
|
|
return hoplimit;
|
|
}
|
|
|
|
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
|
|
index 702c08c26cd4fa..b6fbae874f27f7 100644
|
|
--- a/io_uring/kbuf.c
|
|
+++ b/io_uring/kbuf.c
|
|
@@ -301,6 +301,12 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
|
|
}
|
|
}
|
|
|
|
+static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
|
|
+{
|
|
+ xa_erase(&ctx->io_bl_xa, bl->bgid);
|
|
+ io_put_bl(ctx, bl);
|
|
+}
|
|
+
|
|
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
{
|
|
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
|
|
@@ -642,12 +648,13 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
|
|
/* if mapped buffer ring OR classic exists, don't allow */
|
|
if (bl->is_mapped || !list_empty(&bl->buf_list))
|
|
return -EEXIST;
|
|
- } else {
|
|
- free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
|
|
- if (!bl)
|
|
- return -ENOMEM;
|
|
+ io_destroy_bl(ctx, bl);
|
|
}
|
|
|
|
+ free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
|
|
+ if (!bl)
|
|
+ return -ENOMEM;
|
|
+
|
|
if (!(reg.flags & IOU_PBUF_RING_MMAP))
|
|
ret = io_pin_pbuf_ring(®, bl);
|
|
else
|
|
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
|
|
index 36097e8c904fe5..3ccf80dfa587a3 100644
|
|
--- a/kernel/cgroup/cgroup.c
|
|
+++ b/kernel/cgroup/cgroup.c
|
|
@@ -3941,7 +3941,7 @@ static void __cgroup_kill(struct cgroup *cgrp)
|
|
lockdep_assert_held(&cgroup_mutex);
|
|
|
|
spin_lock_irq(&css_set_lock);
|
|
- set_bit(CGRP_KILL, &cgrp->flags);
|
|
+ cgrp->kill_seq++;
|
|
spin_unlock_irq(&css_set_lock);
|
|
|
|
css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it);
|
|
@@ -3957,10 +3957,6 @@ static void __cgroup_kill(struct cgroup *cgrp)
|
|
send_sig(SIGKILL, task, 0);
|
|
}
|
|
css_task_iter_end(&it);
|
|
-
|
|
- spin_lock_irq(&css_set_lock);
|
|
- clear_bit(CGRP_KILL, &cgrp->flags);
|
|
- spin_unlock_irq(&css_set_lock);
|
|
}
|
|
|
|
static void cgroup_kill(struct cgroup *cgrp)
|
|
@@ -6399,6 +6395,10 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
|
|
spin_lock_irq(&css_set_lock);
|
|
cset = task_css_set(current);
|
|
get_css_set(cset);
|
|
+ if (kargs->cgrp)
|
|
+ kargs->kill_seq = kargs->cgrp->kill_seq;
|
|
+ else
|
|
+ kargs->kill_seq = cset->dfl_cgrp->kill_seq;
|
|
spin_unlock_irq(&css_set_lock);
|
|
|
|
if (!(kargs->flags & CLONE_INTO_CGROUP)) {
|
|
@@ -6582,6 +6582,7 @@ void cgroup_post_fork(struct task_struct *child,
|
|
struct kernel_clone_args *kargs)
|
|
__releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
|
|
{
|
|
+ unsigned int cgrp_kill_seq = 0;
|
|
unsigned long cgrp_flags = 0;
|
|
bool kill = false;
|
|
struct cgroup_subsys *ss;
|
|
@@ -6595,10 +6596,13 @@ void cgroup_post_fork(struct task_struct *child,
|
|
|
|
/* init tasks are special, only link regular threads */
|
|
if (likely(child->pid)) {
|
|
- if (kargs->cgrp)
|
|
+ if (kargs->cgrp) {
|
|
cgrp_flags = kargs->cgrp->flags;
|
|
- else
|
|
+ cgrp_kill_seq = kargs->cgrp->kill_seq;
|
|
+ } else {
|
|
cgrp_flags = cset->dfl_cgrp->flags;
|
|
+ cgrp_kill_seq = cset->dfl_cgrp->kill_seq;
|
|
+ }
|
|
|
|
WARN_ON_ONCE(!list_empty(&child->cg_list));
|
|
cset->nr_tasks++;
|
|
@@ -6633,7 +6637,7 @@ void cgroup_post_fork(struct task_struct *child,
|
|
* child down right after we finished preparing it for
|
|
* userspace.
|
|
*/
|
|
- kill = test_bit(CGRP_KILL, &cgrp_flags);
|
|
+ kill = kargs->kill_seq != cgrp_kill_seq;
|
|
}
|
|
|
|
spin_unlock_irq(&css_set_lock);
|
|
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
|
|
index d80d7a60814129..c32439b855f5dd 100644
|
|
--- a/kernel/cgroup/rstat.c
|
|
+++ b/kernel/cgroup/rstat.c
|
|
@@ -469,7 +469,6 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
|
|
|
|
cputime->sum_exec_runtime += user;
|
|
cputime->sum_exec_runtime += sys;
|
|
- cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
|
|
|
|
#ifdef CONFIG_SCHED_CORE
|
|
bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
|
|
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
|
|
index aa864999dc21be..3130f24daf5979 100644
|
|
--- a/kernel/time/clocksource.c
|
|
+++ b/kernel/time/clocksource.c
|
|
@@ -351,16 +351,18 @@ void clocksource_verify_percpu(struct clocksource *cs)
|
|
cpumask_clear(&cpus_ahead);
|
|
cpumask_clear(&cpus_behind);
|
|
cpus_read_lock();
|
|
- preempt_disable();
|
|
+ migrate_disable();
|
|
clocksource_verify_choose_cpus();
|
|
if (cpumask_empty(&cpus_chosen)) {
|
|
- preempt_enable();
|
|
+ migrate_enable();
|
|
cpus_read_unlock();
|
|
pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
|
|
return;
|
|
}
|
|
testcpu = smp_processor_id();
|
|
- pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
|
|
+ pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n",
|
|
+ cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
|
|
+ preempt_disable();
|
|
for_each_cpu(cpu, &cpus_chosen) {
|
|
if (cpu == testcpu)
|
|
continue;
|
|
@@ -380,6 +382,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
|
|
cs_nsec_min = cs_nsec;
|
|
}
|
|
preempt_enable();
|
|
+ migrate_enable();
|
|
cpus_read_unlock();
|
|
if (!cpumask_empty(&cpus_ahead))
|
|
pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
|
|
diff --git a/mm/gup.c b/mm/gup.c
|
|
index fdd75384160d8d..69d259f7bf37ec 100644
|
|
--- a/mm/gup.c
|
|
+++ b/mm/gup.c
|
|
@@ -1946,14 +1946,14 @@ struct page *get_dump_page(unsigned long addr)
|
|
/*
|
|
* Returns the number of collected pages. Return value is always >= 0.
|
|
*/
|
|
-static unsigned long collect_longterm_unpinnable_pages(
|
|
+static void collect_longterm_unpinnable_pages(
|
|
struct list_head *movable_page_list,
|
|
unsigned long nr_pages,
|
|
struct page **pages)
|
|
{
|
|
- unsigned long i, collected = 0;
|
|
struct folio *prev_folio = NULL;
|
|
bool drain_allow = true;
|
|
+ unsigned long i;
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
struct folio *folio = page_folio(pages[i]);
|
|
@@ -1965,8 +1965,6 @@ static unsigned long collect_longterm_unpinnable_pages(
|
|
if (folio_is_longterm_pinnable(folio))
|
|
continue;
|
|
|
|
- collected++;
|
|
-
|
|
if (folio_is_device_coherent(folio))
|
|
continue;
|
|
|
|
@@ -1988,8 +1986,6 @@ static unsigned long collect_longterm_unpinnable_pages(
|
|
NR_ISOLATED_ANON + folio_is_file_lru(folio),
|
|
folio_nr_pages(folio));
|
|
}
|
|
-
|
|
- return collected;
|
|
}
|
|
|
|
/*
|
|
@@ -2082,12 +2078,10 @@ static int migrate_longterm_unpinnable_pages(
|
|
static long check_and_migrate_movable_pages(unsigned long nr_pages,
|
|
struct page **pages)
|
|
{
|
|
- unsigned long collected;
|
|
LIST_HEAD(movable_page_list);
|
|
|
|
- collected = collect_longterm_unpinnable_pages(&movable_page_list,
|
|
- nr_pages, pages);
|
|
- if (!collected)
|
|
+ collect_longterm_unpinnable_pages(&movable_page_list, nr_pages, pages);
|
|
+ if (list_empty(&movable_page_list))
|
|
return 0;
|
|
|
|
return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages,
|
|
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
|
|
index 0f66dd8715bd87..4a16142ac58a94 100644
|
|
--- a/net/ax25/af_ax25.c
|
|
+++ b/net/ax25/af_ax25.c
|
|
@@ -685,6 +685,15 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
|
|
break;
|
|
}
|
|
|
|
+ if (ax25->ax25_dev) {
|
|
+ if (dev == ax25->ax25_dev->dev) {
|
|
+ rcu_read_unlock();
|
|
+ break;
|
|
+ }
|
|
+ netdev_put(ax25->ax25_dev->dev, &ax25->dev_tracker);
|
|
+ ax25_dev_put(ax25->ax25_dev);
|
|
+ }
|
|
+
|
|
ax25->ax25_dev = ax25_dev_ax25dev(dev);
|
|
if (!ax25->ax25_dev) {
|
|
rcu_read_unlock();
|
|
@@ -692,6 +701,8 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
|
|
break;
|
|
}
|
|
ax25_fillin_cb(ax25, ax25->ax25_dev);
|
|
+ netdev_hold(dev, &ax25->dev_tracker, GFP_ATOMIC);
|
|
+ ax25_dev_hold(ax25->ax25_dev);
|
|
rcu_read_unlock();
|
|
break;
|
|
|
|
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
|
|
index ac11f1f08db0f9..d35479c465e2c4 100644
|
|
--- a/net/batman-adv/bat_v.c
|
|
+++ b/net/batman-adv/bat_v.c
|
|
@@ -113,8 +113,6 @@ static void
|
|
batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
|
|
{
|
|
ewma_throughput_init(&hardif_neigh->bat_v.throughput);
|
|
- INIT_WORK(&hardif_neigh->bat_v.metric_work,
|
|
- batadv_v_elp_throughput_metric_update);
|
|
}
|
|
|
|
/**
|
|
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
|
|
index 1d704574e6bf54..b065578b4436ee 100644
|
|
--- a/net/batman-adv/bat_v_elp.c
|
|
+++ b/net/batman-adv/bat_v_elp.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <linux/if_ether.h>
|
|
#include <linux/jiffies.h>
|
|
#include <linux/kref.h>
|
|
+#include <linux/list.h>
|
|
#include <linux/minmax.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/nl80211.h>
|
|
@@ -26,6 +27,7 @@
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/rtnetlink.h>
|
|
#include <linux/skbuff.h>
|
|
+#include <linux/slab.h>
|
|
#include <linux/stddef.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
@@ -41,6 +43,18 @@
|
|
#include "routing.h"
|
|
#include "send.h"
|
|
|
|
+/**
|
|
+ * struct batadv_v_metric_queue_entry - list of hardif neighbors which require
|
|
+ * and metric update
|
|
+ */
|
|
+struct batadv_v_metric_queue_entry {
|
|
+ /** @hardif_neigh: hardif neighbor scheduled for metric update */
|
|
+ struct batadv_hardif_neigh_node *hardif_neigh;
|
|
+
|
|
+ /** @list: list node for metric_queue */
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
/**
|
|
* batadv_v_elp_start_timer() - restart timer for ELP periodic work
|
|
* @hard_iface: the interface for which the timer has to be reset
|
|
@@ -59,25 +73,36 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
|
|
/**
|
|
* batadv_v_elp_get_throughput() - get the throughput towards a neighbour
|
|
* @neigh: the neighbour for which the throughput has to be obtained
|
|
+ * @pthroughput: calculated throughput towards the given neighbour in multiples
|
|
+ * of 100kpbs (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
|
|
*
|
|
- * Return: The throughput towards the given neighbour in multiples of 100kpbs
|
|
- * (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
|
|
+ * Return: true when value behind @pthroughput was set
|
|
*/
|
|
-static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
|
|
+static bool batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh,
|
|
+ u32 *pthroughput)
|
|
{
|
|
struct batadv_hard_iface *hard_iface = neigh->if_incoming;
|
|
+ struct net_device *soft_iface = hard_iface->soft_iface;
|
|
struct ethtool_link_ksettings link_settings;
|
|
struct net_device *real_netdev;
|
|
struct station_info sinfo;
|
|
u32 throughput;
|
|
int ret;
|
|
|
|
+ /* don't query throughput when no longer associated with any
|
|
+ * batman-adv interface
|
|
+ */
|
|
+ if (!soft_iface)
|
|
+ return false;
|
|
+
|
|
/* if the user specified a customised value for this interface, then
|
|
* return it directly
|
|
*/
|
|
throughput = atomic_read(&hard_iface->bat_v.throughput_override);
|
|
- if (throughput != 0)
|
|
- return throughput;
|
|
+ if (throughput != 0) {
|
|
+ *pthroughput = throughput;
|
|
+ return true;
|
|
+ }
|
|
|
|
/* if this is a wireless device, then ask its throughput through
|
|
* cfg80211 API
|
|
@@ -104,27 +129,39 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
|
|
* possible to delete this neighbor. For now set
|
|
* the throughput metric to 0.
|
|
*/
|
|
- return 0;
|
|
+ *pthroughput = 0;
|
|
+ return true;
|
|
}
|
|
if (ret)
|
|
goto default_throughput;
|
|
|
|
- if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT))
|
|
- return sinfo.expected_throughput / 100;
|
|
+ if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT)) {
|
|
+ *pthroughput = sinfo.expected_throughput / 100;
|
|
+ return true;
|
|
+ }
|
|
|
|
/* try to estimate the expected throughput based on reported tx
|
|
* rates
|
|
*/
|
|
- if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))
|
|
- return cfg80211_calculate_bitrate(&sinfo.txrate) / 3;
|
|
+ if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)) {
|
|
+ *pthroughput = cfg80211_calculate_bitrate(&sinfo.txrate) / 3;
|
|
+ return true;
|
|
+ }
|
|
|
|
goto default_throughput;
|
|
}
|
|
|
|
+ /* only use rtnl_trylock because the elp worker will be cancelled while
|
|
+ * the rntl_lock is held. the cancel_delayed_work_sync() would otherwise
|
|
+ * wait forever when the elp work_item was started and it is then also
|
|
+ * trying to rtnl_lock
|
|
+ */
|
|
+ if (!rtnl_trylock())
|
|
+ return false;
|
|
+
|
|
/* if not a wifi interface, check if this device provides data via
|
|
* ethtool (e.g. an Ethernet adapter)
|
|
*/
|
|
- rtnl_lock();
|
|
ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings);
|
|
rtnl_unlock();
|
|
if (ret == 0) {
|
|
@@ -135,13 +172,15 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
|
|
hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX;
|
|
|
|
throughput = link_settings.base.speed;
|
|
- if (throughput && throughput != SPEED_UNKNOWN)
|
|
- return throughput * 10;
|
|
+ if (throughput && throughput != SPEED_UNKNOWN) {
|
|
+ *pthroughput = throughput * 10;
|
|
+ return true;
|
|
+ }
|
|
}
|
|
|
|
default_throughput:
|
|
if (!(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) {
|
|
- batadv_info(hard_iface->soft_iface,
|
|
+ batadv_info(soft_iface,
|
|
"WiFi driver or ethtool info does not provide information about link speeds on interface %s, therefore defaulting to hardcoded throughput values of %u.%1u Mbps. Consider overriding the throughput manually or checking your driver.\n",
|
|
hard_iface->net_dev->name,
|
|
BATADV_THROUGHPUT_DEFAULT_VALUE / 10,
|
|
@@ -150,31 +189,26 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
|
|
}
|
|
|
|
/* if none of the above cases apply, return the base_throughput */
|
|
- return BATADV_THROUGHPUT_DEFAULT_VALUE;
|
|
+ *pthroughput = BATADV_THROUGHPUT_DEFAULT_VALUE;
|
|
+ return true;
|
|
}
|
|
|
|
/**
|
|
* batadv_v_elp_throughput_metric_update() - worker updating the throughput
|
|
* metric of a single hop neighbour
|
|
- * @work: the work queue item
|
|
+ * @neigh: the neighbour to probe
|
|
*/
|
|
-void batadv_v_elp_throughput_metric_update(struct work_struct *work)
|
|
+static void
|
|
+batadv_v_elp_throughput_metric_update(struct batadv_hardif_neigh_node *neigh)
|
|
{
|
|
- struct batadv_hardif_neigh_node_bat_v *neigh_bat_v;
|
|
- struct batadv_hardif_neigh_node *neigh;
|
|
-
|
|
- neigh_bat_v = container_of(work, struct batadv_hardif_neigh_node_bat_v,
|
|
- metric_work);
|
|
- neigh = container_of(neigh_bat_v, struct batadv_hardif_neigh_node,
|
|
- bat_v);
|
|
+ u32 throughput;
|
|
+ bool valid;
|
|
|
|
- ewma_throughput_add(&neigh->bat_v.throughput,
|
|
- batadv_v_elp_get_throughput(neigh));
|
|
+ valid = batadv_v_elp_get_throughput(neigh, &throughput);
|
|
+ if (!valid)
|
|
+ return;
|
|
|
|
- /* decrement refcounter to balance increment performed before scheduling
|
|
- * this task
|
|
- */
|
|
- batadv_hardif_neigh_put(neigh);
|
|
+ ewma_throughput_add(&neigh->bat_v.throughput, throughput);
|
|
}
|
|
|
|
/**
|
|
@@ -248,14 +282,16 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
|
|
*/
|
|
static void batadv_v_elp_periodic_work(struct work_struct *work)
|
|
{
|
|
+ struct batadv_v_metric_queue_entry *metric_entry;
|
|
+ struct batadv_v_metric_queue_entry *metric_safe;
|
|
struct batadv_hardif_neigh_node *hardif_neigh;
|
|
struct batadv_hard_iface *hard_iface;
|
|
struct batadv_hard_iface_bat_v *bat_v;
|
|
struct batadv_elp_packet *elp_packet;
|
|
+ struct list_head metric_queue;
|
|
struct batadv_priv *bat_priv;
|
|
struct sk_buff *skb;
|
|
u32 elp_interval;
|
|
- bool ret;
|
|
|
|
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
|
|
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
|
|
@@ -291,6 +327,8 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
|
|
|
|
atomic_inc(&hard_iface->bat_v.elp_seqno);
|
|
|
|
+ INIT_LIST_HEAD(&metric_queue);
|
|
+
|
|
/* The throughput metric is updated on each sent packet. This way, if a
|
|
* node is dead and no longer sends packets, batman-adv is still able to
|
|
* react timely to its death.
|
|
@@ -315,16 +353,28 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
|
|
|
|
/* Reading the estimated throughput from cfg80211 is a task that
|
|
* may sleep and that is not allowed in an rcu protected
|
|
- * context. Therefore schedule a task for that.
|
|
+ * context. Therefore add it to metric_queue and process it
|
|
+ * outside rcu protected context.
|
|
*/
|
|
- ret = queue_work(batadv_event_workqueue,
|
|
- &hardif_neigh->bat_v.metric_work);
|
|
-
|
|
- if (!ret)
|
|
+ metric_entry = kzalloc(sizeof(*metric_entry), GFP_ATOMIC);
|
|
+ if (!metric_entry) {
|
|
batadv_hardif_neigh_put(hardif_neigh);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ metric_entry->hardif_neigh = hardif_neigh;
|
|
+ list_add(&metric_entry->list, &metric_queue);
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
+ list_for_each_entry_safe(metric_entry, metric_safe, &metric_queue, list) {
|
|
+ batadv_v_elp_throughput_metric_update(metric_entry->hardif_neigh);
|
|
+
|
|
+ batadv_hardif_neigh_put(metric_entry->hardif_neigh);
|
|
+ list_del(&metric_entry->list);
|
|
+ kfree(metric_entry);
|
|
+ }
|
|
+
|
|
restart_timer:
|
|
batadv_v_elp_start_timer(hard_iface);
|
|
out:
|
|
diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
|
|
index 9e2740195fa2d4..c9cb0a30710045 100644
|
|
--- a/net/batman-adv/bat_v_elp.h
|
|
+++ b/net/batman-adv/bat_v_elp.h
|
|
@@ -10,7 +10,6 @@
|
|
#include "main.h"
|
|
|
|
#include <linux/skbuff.h>
|
|
-#include <linux/workqueue.h>
|
|
|
|
int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface);
|
|
void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface);
|
|
@@ -19,6 +18,5 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
|
|
void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface);
|
|
int batadv_v_elp_packet_recv(struct sk_buff *skb,
|
|
struct batadv_hard_iface *if_incoming);
|
|
-void batadv_v_elp_throughput_metric_update(struct work_struct *work);
|
|
|
|
#endif /* _NET_BATMAN_ADV_BAT_V_ELP_H_ */
|
|
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
|
|
index 17d5ea1d8e84e3..d6854c109cd291 100644
|
|
--- a/net/batman-adv/types.h
|
|
+++ b/net/batman-adv/types.h
|
|
@@ -596,9 +596,6 @@ struct batadv_hardif_neigh_node_bat_v {
|
|
* neighbor
|
|
*/
|
|
unsigned long last_unicast_tx;
|
|
-
|
|
- /** @metric_work: work queue callback item for metric update */
|
|
- struct work_struct metric_work;
|
|
};
|
|
|
|
/**
|
|
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
|
|
index 1f49d6164ea1da..cf9a12d8da6f90 100644
|
|
--- a/net/can/j1939/socket.c
|
|
+++ b/net/can/j1939/socket.c
|
|
@@ -1132,7 +1132,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
|
|
|
|
todo_size = size;
|
|
|
|
- while (todo_size) {
|
|
+ do {
|
|
struct j1939_sk_buff_cb *skcb;
|
|
|
|
segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
|
|
@@ -1177,7 +1177,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
|
|
|
|
todo_size -= segment_size;
|
|
session->total_queued_size += segment_size;
|
|
- }
|
|
+ } while (todo_size);
|
|
|
|
switch (ret) {
|
|
case 0: /* OK */
|
|
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
|
|
index 95f7a7e65a73fa..9b72d118d756dd 100644
|
|
--- a/net/can/j1939/transport.c
|
|
+++ b/net/can/j1939/transport.c
|
|
@@ -382,8 +382,9 @@ sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
|
|
skb_queue_walk(&session->skb_queue, do_skb) {
|
|
do_skcb = j1939_skb_to_cb(do_skb);
|
|
|
|
- if (offset_start >= do_skcb->offset &&
|
|
- offset_start < (do_skcb->offset + do_skb->len)) {
|
|
+ if ((offset_start >= do_skcb->offset &&
|
|
+ offset_start < (do_skcb->offset + do_skb->len)) ||
|
|
+ (offset_start == 0 && do_skcb->offset == 0 && do_skb->len == 0)) {
|
|
skb = do_skb;
|
|
}
|
|
}
|
|
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
|
|
index b22d20cc417b21..00a5c41c1831df 100644
|
|
--- a/net/core/flow_dissector.c
|
|
+++ b/net/core/flow_dissector.c
|
|
@@ -1084,10 +1084,12 @@ bool __skb_flow_dissect(const struct net *net,
|
|
FLOW_DISSECTOR_KEY_BASIC,
|
|
target_container);
|
|
|
|
+ rcu_read_lock();
|
|
+
|
|
if (skb) {
|
|
if (!net) {
|
|
if (skb->dev)
|
|
- net = dev_net(skb->dev);
|
|
+ net = dev_net_rcu(skb->dev);
|
|
else if (skb->sk)
|
|
net = sock_net(skb->sk);
|
|
}
|
|
@@ -1098,7 +1100,6 @@ bool __skb_flow_dissect(const struct net *net,
|
|
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
|
|
struct bpf_prog_array *run_array;
|
|
|
|
- rcu_read_lock();
|
|
run_array = rcu_dereference(init_net.bpf.run_array[type]);
|
|
if (!run_array)
|
|
run_array = rcu_dereference(net->bpf.run_array[type]);
|
|
@@ -1126,17 +1127,17 @@ bool __skb_flow_dissect(const struct net *net,
|
|
prog = READ_ONCE(run_array->items[0].prog);
|
|
result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
|
|
hlen, flags);
|
|
- if (result == BPF_FLOW_DISSECTOR_CONTINUE)
|
|
- goto dissect_continue;
|
|
- __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
|
|
- target_container);
|
|
- rcu_read_unlock();
|
|
- return result == BPF_OK;
|
|
+ if (result != BPF_FLOW_DISSECTOR_CONTINUE) {
|
|
+ __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
|
|
+ target_container);
|
|
+ rcu_read_unlock();
|
|
+ return result == BPF_OK;
|
|
+ }
|
|
}
|
|
-dissect_continue:
|
|
- rcu_read_unlock();
|
|
}
|
|
|
|
+ rcu_read_unlock();
|
|
+
|
|
if (dissector_uses_key(flow_dissector,
|
|
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
|
|
struct ethhdr *eth = eth_hdr(skb);
|
|
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
|
|
index cb0c233e83962f..e44feb39d459a7 100644
|
|
--- a/net/core/neighbour.c
|
|
+++ b/net/core/neighbour.c
|
|
@@ -3508,10 +3508,12 @@ static const struct seq_operations neigh_stat_seq_ops = {
|
|
static void __neigh_notify(struct neighbour *n, int type, int flags,
|
|
u32 pid)
|
|
{
|
|
- struct net *net = dev_net(n->dev);
|
|
struct sk_buff *skb;
|
|
int err = -ENOBUFS;
|
|
+ struct net *net;
|
|
|
|
+ rcu_read_lock();
|
|
+ net = dev_net_rcu(n->dev);
|
|
skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
|
|
if (skb == NULL)
|
|
goto errout;
|
|
@@ -3524,10 +3526,11 @@ static void __neigh_notify(struct neighbour *n, int type, int flags,
|
|
goto errout;
|
|
}
|
|
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
|
|
- return;
|
|
+ goto out;
|
|
errout:
|
|
- if (err < 0)
|
|
- rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
|
|
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
|
|
+out:
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
void neigh_app_ns(struct neighbour *n)
|
|
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
|
|
index 0d0d725b46ad0c..02776453bf97a8 100644
|
|
--- a/net/ipv4/arp.c
|
|
+++ b/net/ipv4/arp.c
|
|
@@ -658,10 +658,12 @@ static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb
|
|
*/
|
|
void arp_xmit(struct sk_buff *skb)
|
|
{
|
|
+ rcu_read_lock();
|
|
/* Send it off, maybe filter it using firewalling first. */
|
|
NF_HOOK(NFPROTO_ARP, NF_ARP_OUT,
|
|
- dev_net(skb->dev), NULL, skb, NULL, skb->dev,
|
|
+ dev_net_rcu(skb->dev), NULL, skb, NULL, skb->dev,
|
|
arp_xmit_finish);
|
|
+ rcu_read_unlock();
|
|
}
|
|
EXPORT_SYMBOL(arp_xmit);
|
|
|
|
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
|
|
index 4822f68edbf08b..c33b1ecc591e4e 100644
|
|
--- a/net/ipv4/devinet.c
|
|
+++ b/net/ipv4/devinet.c
|
|
@@ -1341,10 +1341,11 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
|
|
__be32 addr = 0;
|
|
unsigned char localnet_scope = RT_SCOPE_HOST;
|
|
struct in_device *in_dev;
|
|
- struct net *net = dev_net(dev);
|
|
+ struct net *net;
|
|
int master_idx;
|
|
|
|
rcu_read_lock();
|
|
+ net = dev_net_rcu(dev);
|
|
in_dev = __in_dev_get_rcu(dev);
|
|
if (!in_dev)
|
|
goto no_in_dev;
|
|
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
|
|
index a6adf6a2ec4b57..a21d32b3ae6c36 100644
|
|
--- a/net/ipv4/icmp.c
|
|
+++ b/net/ipv4/icmp.c
|
|
@@ -403,10 +403,10 @@ static void icmp_push_reply(struct sock *sk,
|
|
|
|
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
|
|
{
|
|
- struct ipcm_cookie ipc;
|
|
struct rtable *rt = skb_rtable(skb);
|
|
- struct net *net = dev_net(rt->dst.dev);
|
|
+ struct net *net = dev_net_rcu(rt->dst.dev);
|
|
bool apply_ratelimit = false;
|
|
+ struct ipcm_cookie ipc;
|
|
struct flowi4 fl4;
|
|
struct sock *sk;
|
|
struct inet_sock *inet;
|
|
@@ -609,12 +609,14 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
|
|
struct sock *sk;
|
|
|
|
if (!rt)
|
|
- goto out;
|
|
+ return;
|
|
+
|
|
+ rcu_read_lock();
|
|
|
|
if (rt->dst.dev)
|
|
- net = dev_net(rt->dst.dev);
|
|
+ net = dev_net_rcu(rt->dst.dev);
|
|
else if (skb_in->dev)
|
|
- net = dev_net(skb_in->dev);
|
|
+ net = dev_net_rcu(skb_in->dev);
|
|
else
|
|
goto out;
|
|
|
|
@@ -783,7 +785,8 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
|
|
icmp_xmit_unlock(sk);
|
|
out_bh_enable:
|
|
local_bh_enable();
|
|
-out:;
|
|
+out:
|
|
+ rcu_read_unlock();
|
|
}
|
|
EXPORT_SYMBOL(__icmp_send);
|
|
|
|
@@ -832,7 +835,7 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
|
|
* avoid additional coding at protocol handlers.
|
|
*/
|
|
if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
|
|
- __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
|
|
+ __ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS);
|
|
return;
|
|
}
|
|
|
|
@@ -866,7 +869,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
|
|
struct net *net;
|
|
u32 info = 0;
|
|
|
|
- net = dev_net(skb_dst(skb)->dev);
|
|
+ net = dev_net_rcu(skb_dst(skb)->dev);
|
|
|
|
/*
|
|
* Incomplete header ?
|
|
@@ -977,7 +980,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
|
|
static enum skb_drop_reason icmp_redirect(struct sk_buff *skb)
|
|
{
|
|
if (skb->len < sizeof(struct iphdr)) {
|
|
- __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
|
|
+ __ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS);
|
|
return SKB_DROP_REASON_PKT_TOO_SMALL;
|
|
}
|
|
|
|
@@ -1009,7 +1012,7 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
|
|
struct icmp_bxm icmp_param;
|
|
struct net *net;
|
|
|
|
- net = dev_net(skb_dst(skb)->dev);
|
|
+ net = dev_net_rcu(skb_dst(skb)->dev);
|
|
/* should there be an ICMP stat for ignored echos? */
|
|
if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all))
|
|
return SKB_NOT_DROPPED_YET;
|
|
@@ -1038,9 +1041,9 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
|
|
|
|
bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
|
|
{
|
|
+ struct net *net = dev_net_rcu(skb->dev);
|
|
struct icmp_ext_hdr *ext_hdr, _ext_hdr;
|
|
struct icmp_ext_echo_iio *iio, _iio;
|
|
- struct net *net = dev_net(skb->dev);
|
|
struct inet6_dev *in6_dev;
|
|
struct in_device *in_dev;
|
|
struct net_device *dev;
|
|
@@ -1179,7 +1182,7 @@ static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb)
|
|
return SKB_NOT_DROPPED_YET;
|
|
|
|
out_err:
|
|
- __ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
|
|
+ __ICMP_INC_STATS(dev_net_rcu(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
|
|
return SKB_DROP_REASON_PKT_TOO_SMALL;
|
|
}
|
|
|
|
@@ -1196,7 +1199,7 @@ int icmp_rcv(struct sk_buff *skb)
|
|
{
|
|
enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
|
|
struct rtable *rt = skb_rtable(skb);
|
|
- struct net *net = dev_net(rt->dst.dev);
|
|
+ struct net *net = dev_net_rcu(rt->dst.dev);
|
|
struct icmphdr *icmph;
|
|
|
|
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
|
|
@@ -1369,9 +1372,9 @@ int icmp_err(struct sk_buff *skb, u32 info)
|
|
struct iphdr *iph = (struct iphdr *)skb->data;
|
|
int offset = iph->ihl<<2;
|
|
struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
|
|
+ struct net *net = dev_net_rcu(skb->dev);
|
|
int type = icmp_hdr(skb)->type;
|
|
int code = icmp_hdr(skb)->code;
|
|
- struct net *net = dev_net(skb->dev);
|
|
|
|
/*
|
|
* Use ping_err to handle all icmp errors except those
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index 61fc2166a870e6..97dc30a03dbf26 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -393,7 +393,13 @@ static inline int ip_rt_proc_init(void)
|
|
|
|
static inline bool rt_is_expired(const struct rtable *rth)
|
|
{
|
|
- return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
|
|
+ bool res;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ res = rth->rt_genid != rt_genid_ipv4(dev_net_rcu(rth->dst.dev));
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ return res;
|
|
}
|
|
|
|
void rt_cache_flush(struct net *net)
|
|
@@ -1014,9 +1020,9 @@ out: kfree_skb_reason(skb, reason);
|
|
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
|
|
{
|
|
struct dst_entry *dst = &rt->dst;
|
|
- struct net *net = dev_net(dst->dev);
|
|
struct fib_result res;
|
|
bool lock = false;
|
|
+ struct net *net;
|
|
u32 old_mtu;
|
|
|
|
if (ip_mtu_locked(dst))
|
|
@@ -1026,6 +1032,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
|
|
if (old_mtu < mtu)
|
|
return;
|
|
|
|
+ rcu_read_lock();
|
|
+ net = dev_net_rcu(dst->dev);
|
|
if (mtu < net->ipv4.ip_rt_min_pmtu) {
|
|
lock = true;
|
|
mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
|
|
@@ -1033,17 +1041,29 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
|
|
|
|
if (rt->rt_pmtu == mtu && !lock &&
|
|
time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
|
|
- return;
|
|
+ goto out;
|
|
|
|
- rcu_read_lock();
|
|
if (fib_lookup(net, fl4, &res, 0) == 0) {
|
|
struct fib_nh_common *nhc;
|
|
|
|
fib_select_path(net, &res, fl4, NULL);
|
|
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
|
+ if (fib_info_num_path(res.fi) > 1) {
|
|
+ int nhsel;
|
|
+
|
|
+ for (nhsel = 0; nhsel < fib_info_num_path(res.fi); nhsel++) {
|
|
+ nhc = fib_info_nhc(res.fi, nhsel);
|
|
+ update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
|
|
+ jiffies + net->ipv4.ip_rt_mtu_expires);
|
|
+ }
|
|
+ goto out;
|
|
+ }
|
|
+#endif /* CONFIG_IP_ROUTE_MULTIPATH */
|
|
nhc = FIB_RES_NHC(res);
|
|
update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
|
|
jiffies + net->ipv4.ip_rt_mtu_expires);
|
|
}
|
|
+out:
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
@@ -1306,10 +1326,15 @@ static void set_class_tag(struct rtable *rt, u32 tag)
|
|
|
|
static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
|
|
{
|
|
- struct net *net = dev_net(dst->dev);
|
|
unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
|
|
- unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
|
|
- net->ipv4.ip_rt_min_advmss);
|
|
+ unsigned int advmss;
|
|
+ struct net *net;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ net = dev_net_rcu(dst->dev);
|
|
+ advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
|
|
+ net->ipv4.ip_rt_min_advmss);
|
|
+ rcu_read_unlock();
|
|
|
|
return min(advmss, IPV4_MAX_PMTU - header_size);
|
|
}
|
|
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
|
|
index 35df405ce1f753..fd91fd139d76cf 100644
|
|
--- a/net/ipv6/icmp.c
|
|
+++ b/net/ipv6/icmp.c
|
|
@@ -76,7 +76,7 @@ static int icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|
{
|
|
/* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
|
|
struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
|
|
- struct net *net = dev_net(skb->dev);
|
|
+ struct net *net = dev_net_rcu(skb->dev);
|
|
|
|
if (type == ICMPV6_PKT_TOOBIG)
|
|
ip6_update_pmtu(skb, net, info, skb->dev->ifindex, 0, sock_net_uid(net, NULL));
|
|
@@ -473,7 +473,10 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|
|
|
if (!skb->dev)
|
|
return;
|
|
- net = dev_net(skb->dev);
|
|
+
|
|
+ rcu_read_lock();
|
|
+
|
|
+ net = dev_net_rcu(skb->dev);
|
|
mark = IP6_REPLY_MARK(net, skb->mark);
|
|
/*
|
|
* Make sure we respect the rules
|
|
@@ -496,7 +499,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|
!(type == ICMPV6_PARAMPROB &&
|
|
code == ICMPV6_UNK_OPTION &&
|
|
(opt_unrec(skb, info))))
|
|
- return;
|
|
+ goto out;
|
|
|
|
saddr = NULL;
|
|
}
|
|
@@ -526,7 +529,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|
if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
|
|
net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
|
|
&hdr->saddr, &hdr->daddr);
|
|
- return;
|
|
+ goto out;
|
|
}
|
|
|
|
/*
|
|
@@ -535,7 +538,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|
if (is_ineligible(skb)) {
|
|
net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
|
|
&hdr->saddr, &hdr->daddr);
|
|
- return;
|
|
+ goto out;
|
|
}
|
|
|
|
/* Needed by both icmpv6_global_allow and icmpv6_xmit_lock */
|
|
@@ -582,7 +585,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|
np = inet6_sk(sk);
|
|
|
|
if (!icmpv6_xrlim_allow(sk, type, &fl6, apply_ratelimit))
|
|
- goto out;
|
|
+ goto out_unlock;
|
|
|
|
tmp_hdr.icmp6_type = type;
|
|
tmp_hdr.icmp6_code = code;
|
|
@@ -600,7 +603,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|
|
|
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
|
|
if (IS_ERR(dst))
|
|
- goto out;
|
|
+ goto out_unlock;
|
|
|
|
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
|
|
|
|
@@ -616,7 +619,6 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|
goto out_dst_release;
|
|
}
|
|
|
|
- rcu_read_lock();
|
|
idev = __in6_dev_get(skb->dev);
|
|
|
|
if (ip6_append_data(sk, icmpv6_getfrag, &msg,
|
|
@@ -630,13 +632,15 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|
icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
|
|
len + sizeof(struct icmp6hdr));
|
|
}
|
|
- rcu_read_unlock();
|
|
+
|
|
out_dst_release:
|
|
dst_release(dst);
|
|
-out:
|
|
+out_unlock:
|
|
icmpv6_xmit_unlock(sk);
|
|
out_bh_enable:
|
|
local_bh_enable();
|
|
+out:
|
|
+ rcu_read_unlock();
|
|
}
|
|
EXPORT_SYMBOL(icmp6_send);
|
|
|
|
@@ -679,8 +683,8 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
|
|
skb_pull(skb2, nhs);
|
|
skb_reset_network_header(skb2);
|
|
|
|
- rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0,
|
|
- skb, 0);
|
|
+ rt = rt6_lookup(dev_net_rcu(skb->dev), &ipv6_hdr(skb2)->saddr,
|
|
+ NULL, 0, skb, 0);
|
|
|
|
if (rt && rt->dst.dev)
|
|
skb2->dev = rt->dst.dev;
|
|
@@ -717,7 +721,7 @@ EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach);
|
|
|
|
static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
|
|
{
|
|
- struct net *net = dev_net(skb->dev);
|
|
+ struct net *net = dev_net_rcu(skb->dev);
|
|
struct sock *sk;
|
|
struct inet6_dev *idev;
|
|
struct ipv6_pinfo *np;
|
|
@@ -832,7 +836,7 @@ enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
|
|
u8 code, __be32 info)
|
|
{
|
|
struct inet6_skb_parm *opt = IP6CB(skb);
|
|
- struct net *net = dev_net(skb->dev);
|
|
+ struct net *net = dev_net_rcu(skb->dev);
|
|
const struct inet6_protocol *ipprot;
|
|
enum skb_drop_reason reason;
|
|
int inner_offset;
|
|
@@ -889,7 +893,7 @@ enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
|
|
static int icmpv6_rcv(struct sk_buff *skb)
|
|
{
|
|
enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
|
|
- struct net *net = dev_net(skb->dev);
|
|
+ struct net *net = dev_net_rcu(skb->dev);
|
|
struct net_device *dev = icmp6_dev(skb);
|
|
struct inet6_dev *idev = __in6_dev_get(dev);
|
|
const struct in6_addr *saddr, *daddr;
|
|
@@ -921,7 +925,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
|
|
skb_set_network_header(skb, nh);
|
|
}
|
|
|
|
- __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS);
|
|
+ __ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_INMSGS);
|
|
|
|
saddr = &ipv6_hdr(skb)->saddr;
|
|
daddr = &ipv6_hdr(skb)->daddr;
|
|
@@ -939,7 +943,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
|
|
|
|
type = hdr->icmp6_type;
|
|
|
|
- ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type);
|
|
+ ICMP6MSGIN_INC_STATS(dev_net_rcu(dev), idev, type);
|
|
|
|
switch (type) {
|
|
case ICMPV6_ECHO_REQUEST:
|
|
@@ -1034,9 +1038,9 @@ static int icmpv6_rcv(struct sk_buff *skb)
|
|
|
|
csum_error:
|
|
reason = SKB_DROP_REASON_ICMP_CSUM;
|
|
- __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
|
|
+ __ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_CSUMERRORS);
|
|
discard_it:
|
|
- __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
|
|
+ __ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_INERRORS);
|
|
drop_no_count:
|
|
kfree_skb_reason(skb, reason);
|
|
return 0;
|
|
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
|
|
index 6e2f77a95a657a..9bb246c09fcee8 100644
|
|
--- a/net/ipv6/mcast.c
|
|
+++ b/net/ipv6/mcast.c
|
|
@@ -1729,21 +1729,19 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
|
|
struct net_device *dev = idev->dev;
|
|
int hlen = LL_RESERVED_SPACE(dev);
|
|
int tlen = dev->needed_tailroom;
|
|
- struct net *net = dev_net(dev);
|
|
const struct in6_addr *saddr;
|
|
struct in6_addr addr_buf;
|
|
struct mld2_report *pmr;
|
|
struct sk_buff *skb;
|
|
unsigned int size;
|
|
struct sock *sk;
|
|
- int err;
|
|
+ struct net *net;
|
|
|
|
- sk = net->ipv6.igmp_sk;
|
|
/* we assume size > sizeof(ra) here
|
|
* Also try to not allocate high-order pages for big MTU
|
|
*/
|
|
size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
|
|
- skb = sock_alloc_send_skb(sk, size, 1, &err);
|
|
+ skb = alloc_skb(size, GFP_KERNEL);
|
|
if (!skb)
|
|
return NULL;
|
|
|
|
@@ -1751,6 +1749,12 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
|
|
skb_reserve(skb, hlen);
|
|
skb_tailroom_reserve(skb, mtu, tlen);
|
|
|
|
+ rcu_read_lock();
|
|
+
|
|
+ net = dev_net_rcu(dev);
|
|
+ sk = net->ipv6.igmp_sk;
|
|
+ skb_set_owner_w(skb, sk);
|
|
+
|
|
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
|
|
/* <draft-ietf-magma-mld-source-05.txt>:
|
|
* use unspecified address as the source address
|
|
@@ -1762,6 +1766,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
|
|
|
|
ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
|
|
|
|
+ rcu_read_unlock();
|
|
+
|
|
skb_put_data(skb, ra, sizeof(ra));
|
|
|
|
skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
|
|
@@ -2121,21 +2127,21 @@ static void mld_send_cr(struct inet6_dev *idev)
|
|
|
|
static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
|
|
{
|
|
- struct net *net = dev_net(dev);
|
|
- struct sock *sk = net->ipv6.igmp_sk;
|
|
+ const struct in6_addr *snd_addr, *saddr;
|
|
+ int err, len, payload_len, full_len;
|
|
+ struct in6_addr addr_buf;
|
|
struct inet6_dev *idev;
|
|
struct sk_buff *skb;
|
|
struct mld_msg *hdr;
|
|
- const struct in6_addr *snd_addr, *saddr;
|
|
- struct in6_addr addr_buf;
|
|
int hlen = LL_RESERVED_SPACE(dev);
|
|
int tlen = dev->needed_tailroom;
|
|
- int err, len, payload_len, full_len;
|
|
u8 ra[8] = { IPPROTO_ICMPV6, 0,
|
|
IPV6_TLV_ROUTERALERT, 2, 0, 0,
|
|
IPV6_TLV_PADN, 0 };
|
|
- struct flowi6 fl6;
|
|
struct dst_entry *dst;
|
|
+ struct flowi6 fl6;
|
|
+ struct net *net;
|
|
+ struct sock *sk;
|
|
|
|
if (type == ICMPV6_MGM_REDUCTION)
|
|
snd_addr = &in6addr_linklocal_allrouters;
|
|
@@ -2146,19 +2152,21 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
|
|
payload_len = len + sizeof(ra);
|
|
full_len = sizeof(struct ipv6hdr) + payload_len;
|
|
|
|
- rcu_read_lock();
|
|
- IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUTREQUESTS);
|
|
- rcu_read_unlock();
|
|
+ skb = alloc_skb(hlen + tlen + full_len, GFP_KERNEL);
|
|
|
|
- skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
|
|
+ rcu_read_lock();
|
|
|
|
+ net = dev_net_rcu(dev);
|
|
+ idev = __in6_dev_get(dev);
|
|
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
|
|
if (!skb) {
|
|
- rcu_read_lock();
|
|
- IP6_INC_STATS(net, __in6_dev_get(dev),
|
|
- IPSTATS_MIB_OUTDISCARDS);
|
|
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
|
|
rcu_read_unlock();
|
|
return;
|
|
}
|
|
+ sk = net->ipv6.igmp_sk;
|
|
+ skb_set_owner_w(skb, sk);
|
|
+
|
|
skb->priority = TC_PRIO_CONTROL;
|
|
skb_reserve(skb, hlen);
|
|
|
|
@@ -2183,9 +2191,6 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
|
|
IPPROTO_ICMPV6,
|
|
csum_partial(hdr, len, 0));
|
|
|
|
- rcu_read_lock();
|
|
- idev = __in6_dev_get(skb->dev);
|
|
-
|
|
icmpv6_flow_init(sk, &fl6, type,
|
|
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
|
|
skb->dev->ifindex);
|
|
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
|
|
index 2ad0ef47b07c24..8d853971f2f68e 100644
|
|
--- a/net/ipv6/ndisc.c
|
|
+++ b/net/ipv6/ndisc.c
|
|
@@ -418,15 +418,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
|
|
{
|
|
int hlen = LL_RESERVED_SPACE(dev);
|
|
int tlen = dev->needed_tailroom;
|
|
- struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
|
|
struct sk_buff *skb;
|
|
|
|
skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
|
|
- if (!skb) {
|
|
- ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
|
|
- __func__);
|
|
+ if (!skb)
|
|
return NULL;
|
|
- }
|
|
|
|
skb->protocol = htons(ETH_P_IPV6);
|
|
skb->dev = dev;
|
|
@@ -437,7 +433,9 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
|
|
/* Manually assign socket ownership as we avoid calling
|
|
* sock_alloc_send_pskb() to bypass wmem buffer limits
|
|
*/
|
|
- skb_set_owner_w(skb, sk);
|
|
+ rcu_read_lock();
|
|
+ skb_set_owner_w(skb, dev_net_rcu(dev)->ipv6.ndisc_sk);
|
|
+ rcu_read_unlock();
|
|
|
|
return skb;
|
|
}
|
|
@@ -473,16 +471,20 @@ static void ip6_nd_hdr(struct sk_buff *skb,
|
|
void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
|
|
const struct in6_addr *saddr)
|
|
{
|
|
+ struct icmp6hdr *icmp6h = icmp6_hdr(skb);
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
- struct net *net = dev_net(skb->dev);
|
|
- struct sock *sk = net->ipv6.ndisc_sk;
|
|
struct inet6_dev *idev;
|
|
+ struct net *net;
|
|
+ struct sock *sk;
|
|
int err;
|
|
- struct icmp6hdr *icmp6h = icmp6_hdr(skb);
|
|
u8 type;
|
|
|
|
type = icmp6h->icmp6_type;
|
|
|
|
+ rcu_read_lock();
|
|
+
|
|
+ net = dev_net_rcu(skb->dev);
|
|
+ sk = net->ipv6.ndisc_sk;
|
|
if (!dst) {
|
|
struct flowi6 fl6;
|
|
int oif = skb->dev->ifindex;
|
|
@@ -490,6 +492,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
|
|
icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
|
|
dst = icmp6_dst_alloc(skb->dev, &fl6);
|
|
if (IS_ERR(dst)) {
|
|
+ rcu_read_unlock();
|
|
kfree_skb(skb);
|
|
return;
|
|
}
|
|
@@ -504,7 +507,6 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
|
|
|
|
ip6_nd_hdr(skb, saddr, daddr, inet6_sk(sk)->hop_limit, skb->len);
|
|
|
|
- rcu_read_lock();
|
|
idev = __in6_dev_get(dst->dev);
|
|
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
|
|
|
|
@@ -1680,7 +1682,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
|
|
bool ret;
|
|
|
|
if (netif_is_l3_master(skb->dev)) {
|
|
- dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
|
|
+ dev = dev_get_by_index_rcu(dev_net(skb->dev), IPCB(skb)->iif);
|
|
if (!dev)
|
|
return;
|
|
}
|
|
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
|
|
index c5cee40a658b46..5715d54f3d0bed 100644
|
|
--- a/net/ipv6/route.c
|
|
+++ b/net/ipv6/route.c
|
|
@@ -3188,13 +3188,18 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
|
|
{
|
|
struct net_device *dev = dst->dev;
|
|
unsigned int mtu = dst_mtu(dst);
|
|
- struct net *net = dev_net(dev);
|
|
+ struct net *net;
|
|
|
|
mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
|
|
|
|
+ rcu_read_lock();
|
|
+
|
|
+ net = dev_net_rcu(dev);
|
|
if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
|
|
mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
|
|
|
|
+ rcu_read_unlock();
|
|
+
|
|
/*
|
|
* Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
|
|
* corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
|
|
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
|
|
index b7232142c13f83..cb52fac7caa3cd 100644
|
|
--- a/net/openvswitch/datapath.c
|
|
+++ b/net/openvswitch/datapath.c
|
|
@@ -2103,6 +2103,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
|
|
{
|
|
struct ovs_header *ovs_header;
|
|
struct ovs_vport_stats vport_stats;
|
|
+ struct net *net_vport;
|
|
int err;
|
|
|
|
ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
|
|
@@ -2119,12 +2120,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
|
|
nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
|
|
goto nla_put_failure;
|
|
|
|
- if (!net_eq(net, dev_net(vport->dev))) {
|
|
- int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
|
|
+ rcu_read_lock();
|
|
+ net_vport = dev_net_rcu(vport->dev);
|
|
+ if (!net_eq(net, net_vport)) {
|
|
+ int id = peernet2id_alloc(net, net_vport, GFP_ATOMIC);
|
|
|
|
if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
|
|
- goto nla_put_failure;
|
|
+ goto nla_put_failure_unlock;
|
|
}
|
|
+ rcu_read_unlock();
|
|
|
|
ovs_vport_get_stats(vport, &vport_stats);
|
|
if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
|
|
@@ -2145,6 +2149,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
|
|
genlmsg_end(skb, ovs_header);
|
|
return 0;
|
|
|
|
+nla_put_failure_unlock:
|
|
+ rcu_read_unlock();
|
|
nla_put_failure:
|
|
err = -EMSGSIZE;
|
|
error:
|
|
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
|
|
index f4dbf5f87962d9..618b18e80cea04 100644
|
|
--- a/net/vmw_vsock/af_vsock.c
|
|
+++ b/net/vmw_vsock/af_vsock.c
|
|
@@ -336,7 +336,10 @@ EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
|
|
|
|
void vsock_remove_sock(struct vsock_sock *vsk)
|
|
{
|
|
- vsock_remove_bound(vsk);
|
|
+ /* Transport reassignment must not remove the binding. */
|
|
+ if (sock_flag(sk_vsock(vsk), SOCK_DEAD))
|
|
+ vsock_remove_bound(vsk);
|
|
+
|
|
vsock_remove_connected(vsk);
|
|
}
|
|
EXPORT_SYMBOL_GPL(vsock_remove_sock);
|
|
@@ -820,6 +823,13 @@ static void __vsock_release(struct sock *sk, int level)
|
|
*/
|
|
lock_sock_nested(sk, level);
|
|
|
|
+ /* Indicate to vsock_remove_sock() that the socket is being released and
|
|
+ * can be removed from the bound_table. Unlike transport reassignment
|
|
+ * case, where the socket must remain bound despite vsock_remove_sock()
|
|
+ * being called from the transport release() callback.
|
|
+ */
|
|
+ sock_set_flag(sk, SOCK_DEAD);
|
|
+
|
|
if (vsk->transport)
|
|
vsk->transport->release(vsk);
|
|
else if (sock_type_connectible(sk->sk_type))
|
|
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
|
|
index ddf68be0af14a5..ce80adc30fe946 100644
|
|
--- a/sound/soc/intel/boards/bytcr_rt5640.c
|
|
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
|
|
@@ -1132,7 +1132,22 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
|
|
BYT_RT5640_SSP0_AIF2 |
|
|
BYT_RT5640_MCLK_EN),
|
|
},
|
|
- { /* Vexia Edu Atla 10 tablet */
|
|
+ {
|
|
+ /* Vexia Edu Atla 10 tablet 5V version */
|
|
+ .matches = {
|
|
+ /* Having all 3 of these not set is somewhat unique */
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
|
|
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
|
|
+ /* Above strings are too generic, also match on BIOS date */
|
|
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
|
|
+ },
|
|
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
|
|
+ BYT_RT5640_JD_NOT_INV |
|
|
+ BYT_RT5640_SSP0_AIF1 |
|
|
+ BYT_RT5640_MCLK_EN),
|
|
+ },
|
|
+ { /* Vexia Edu Atla 10 tablet 9V version */
|
|
.matches = {
|
|
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
|
DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
|
|
diff --git a/tools/testing/selftests/gpio/gpio-sim.sh b/tools/testing/selftests/gpio/gpio-sim.sh
|
|
index 6fb66a687f1737..bbc29ed9c60a91 100755
|
|
--- a/tools/testing/selftests/gpio/gpio-sim.sh
|
|
+++ b/tools/testing/selftests/gpio/gpio-sim.sh
|
|
@@ -46,12 +46,6 @@ remove_chip() {
|
|
rmdir $CONFIGFS_DIR/$CHIP || fail "Unable to remove the chip"
|
|
}
|
|
|
|
-configfs_cleanup() {
|
|
- for CHIP in `ls $CONFIGFS_DIR/`; do
|
|
- remove_chip $CHIP
|
|
- done
|
|
-}
|
|
-
|
|
create_chip() {
|
|
local CHIP=$1
|
|
|
|
@@ -105,6 +99,13 @@ disable_chip() {
|
|
echo 0 > $CONFIGFS_DIR/$CHIP/live || fail "Unable to disable the chip"
|
|
}
|
|
|
|
+configfs_cleanup() {
|
|
+ for CHIP in `ls $CONFIGFS_DIR/`; do
|
|
+ disable_chip $CHIP
|
|
+ remove_chip $CHIP
|
|
+ done
|
|
+}
|
|
+
|
|
configfs_chip_name() {
|
|
local CHIP=$1
|
|
local BANK=$2
|
|
@@ -181,6 +182,7 @@ create_chip chip
|
|
create_bank chip bank
|
|
enable_chip chip
|
|
test -n `cat $CONFIGFS_DIR/chip/bank/chip_name` || fail "chip_name doesn't work"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "1.2. chip_name returns 'none' if the chip is still pending"
|
|
@@ -195,6 +197,7 @@ create_chip chip
|
|
create_bank chip bank
|
|
enable_chip chip
|
|
test -n `cat $CONFIGFS_DIR/chip/dev_name` || fail "dev_name doesn't work"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "2. Creating and configuring simulated chips"
|
|
@@ -204,6 +207,7 @@ create_chip chip
|
|
create_bank chip bank
|
|
enable_chip chip
|
|
test "`get_chip_num_lines chip bank`" = "1" || fail "default number of lines is not 1"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "2.2. Number of lines can be specified"
|
|
@@ -212,6 +216,7 @@ create_bank chip bank
|
|
set_num_lines chip bank 16
|
|
enable_chip chip
|
|
test "`get_chip_num_lines chip bank`" = "16" || fail "number of lines is not 16"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "2.3. Label can be set"
|
|
@@ -220,6 +225,7 @@ create_bank chip bank
|
|
set_label chip bank foobar
|
|
enable_chip chip
|
|
test "`get_chip_label chip bank`" = "foobar" || fail "label is incorrect"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "2.4. Label can be left empty"
|
|
@@ -227,6 +233,7 @@ create_chip chip
|
|
create_bank chip bank
|
|
enable_chip chip
|
|
test -z "`cat $CONFIGFS_DIR/chip/bank/label`" || fail "label is not empty"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "2.5. Line names can be configured"
|
|
@@ -238,6 +245,7 @@ set_line_name chip bank 2 bar
|
|
enable_chip chip
|
|
test "`get_line_name chip bank 0`" = "foo" || fail "line name is incorrect"
|
|
test "`get_line_name chip bank 2`" = "bar" || fail "line name is incorrect"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "2.6. Line config can remain unused if offset is greater than number of lines"
|
|
@@ -248,6 +256,7 @@ set_line_name chip bank 5 foobar
|
|
enable_chip chip
|
|
test "`get_line_name chip bank 0`" = "" || fail "line name is incorrect"
|
|
test "`get_line_name chip bank 1`" = "" || fail "line name is incorrect"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "2.7. Line configfs directory names are sanitized"
|
|
@@ -267,6 +276,7 @@ for CHIP in $CHIPS; do
|
|
enable_chip $CHIP
|
|
done
|
|
for CHIP in $CHIPS; do
|
|
+ disable_chip $CHIP
|
|
remove_chip $CHIP
|
|
done
|
|
|
|
@@ -278,6 +288,7 @@ echo foobar > $CONFIGFS_DIR/chip/bank/label 2> /dev/null && \
|
|
fail "Setting label of a live chip should fail"
|
|
echo 8 > $CONFIGFS_DIR/chip/bank/num_lines 2> /dev/null && \
|
|
fail "Setting number of lines of a live chip should fail"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "2.10. Can't create line items when chip is live"
|
|
@@ -285,6 +296,7 @@ create_chip chip
|
|
create_bank chip bank
|
|
enable_chip chip
|
|
mkdir $CONFIGFS_DIR/chip/bank/line0 2> /dev/null && fail "Creating line item should fail"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "2.11. Probe errors are propagated to user-space"
|
|
@@ -316,6 +328,7 @@ mkdir -p $CONFIGFS_DIR/chip/bank/line4/hog
|
|
enable_chip chip
|
|
$BASE_DIR/gpio-mockup-cdev -s 1 /dev/`configfs_chip_name chip bank` 4 2> /dev/null && \
|
|
fail "Setting the value of a hogged line shouldn't succeed"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "3. Controlling simulated chips"
|
|
@@ -331,6 +344,7 @@ test "$?" = "1" || fail "pull set incorrectly"
|
|
sysfs_set_pull chip bank 0 pull-down
|
|
$BASE_DIR/gpio-mockup-cdev /dev/`configfs_chip_name chip bank` 1
|
|
test "$?" = "0" || fail "pull set incorrectly"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "3.2. Pull can be read from sysfs"
|
|
@@ -344,6 +358,7 @@ SYSFS_PATH=/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/pull
|
|
test `cat $SYSFS_PATH` = "pull-down" || fail "reading the pull failed"
|
|
sysfs_set_pull chip bank 0 pull-up
|
|
test `cat $SYSFS_PATH` = "pull-up" || fail "reading the pull failed"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "3.3. Incorrect input in sysfs is rejected"
|
|
@@ -355,6 +370,7 @@ DEVNAME=`configfs_dev_name chip`
|
|
CHIPNAME=`configfs_chip_name chip bank`
|
|
SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/pull"
|
|
echo foobar > $SYSFS_PATH 2> /dev/null && fail "invalid input not detected"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "3.4. Can't write to value"
|
|
@@ -365,6 +381,7 @@ DEVNAME=`configfs_dev_name chip`
|
|
CHIPNAME=`configfs_chip_name chip bank`
|
|
SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value"
|
|
echo 1 > $SYSFS_PATH 2> /dev/null && fail "writing to 'value' succeeded unexpectedly"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "4. Simulated GPIO chips are functional"
|
|
@@ -382,6 +399,7 @@ $BASE_DIR/gpio-mockup-cdev -s 1 /dev/`configfs_chip_name chip bank` 0 &
|
|
sleep 0.1 # FIXME Any better way?
|
|
test `cat $SYSFS_PATH` = "1" || fail "incorrect value read from sysfs"
|
|
kill $!
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "4.2. Bias settings work correctly"
|
|
@@ -394,6 +412,7 @@ CHIPNAME=`configfs_chip_name chip bank`
|
|
SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value"
|
|
$BASE_DIR/gpio-mockup-cdev -b pull-up /dev/`configfs_chip_name chip bank` 0
|
|
test `cat $SYSFS_PATH` = "1" || fail "bias setting does not work"
|
|
+disable_chip chip
|
|
remove_chip chip
|
|
|
|
echo "GPIO $MODULE test PASS"
|
|
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
|
|
index 1c0dd2f7816782..771f237c43534e 100755
|
|
--- a/tools/testing/selftests/net/pmtu.sh
|
|
+++ b/tools/testing/selftests/net/pmtu.sh
|
|
@@ -197,6 +197,12 @@
|
|
#
|
|
# - pmtu_ipv6_route_change
|
|
# Same as above but with IPv6
|
|
+#
|
|
+# - pmtu_ipv4_mp_exceptions
|
|
+# Use the same topology as in pmtu_ipv4, but add routeable addresses
|
|
+# on host A and B on lo reachable via both routers. Host A and B
|
|
+# addresses have multipath routes to each other, b_r1 mtu = 1500.
|
|
+# Check that PMTU exceptions are created for both paths.
|
|
|
|
source lib.sh
|
|
source net_helper.sh
|
|
@@ -266,7 +272,8 @@ tests="
|
|
list_flush_ipv4_exception ipv4: list and flush cached exceptions 1
|
|
list_flush_ipv6_exception ipv6: list and flush cached exceptions 1
|
|
pmtu_ipv4_route_change ipv4: PMTU exception w/route replace 1
|
|
- pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1"
|
|
+ pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1
|
|
+ pmtu_ipv4_mp_exceptions ipv4: PMTU multipath nh exceptions 1"
|
|
|
|
# Addressing and routing for tests with routers: four network segments, with
|
|
# index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an
|
|
@@ -343,6 +350,9 @@ tunnel6_a_addr="fd00:2::a"
|
|
tunnel6_b_addr="fd00:2::b"
|
|
tunnel6_mask="64"
|
|
|
|
+host4_a_addr="192.168.99.99"
|
|
+host4_b_addr="192.168.88.88"
|
|
+
|
|
dummy6_0_prefix="fc00:1000::"
|
|
dummy6_1_prefix="fc00:1001::"
|
|
dummy6_mask="64"
|
|
@@ -902,6 +912,52 @@ setup_ovs_bridge() {
|
|
run_cmd ip route add ${prefix6}:${b_r1}::1 via ${prefix6}:${a_r1}::2
|
|
}
|
|
|
|
+setup_multipath_new() {
|
|
+ # Set up host A with multipath routes to host B host4_b_addr
|
|
+ run_cmd ${ns_a} ip addr add ${host4_a_addr} dev lo
|
|
+ run_cmd ${ns_a} ip nexthop add id 401 via ${prefix4}.${a_r1}.2 dev veth_A-R1
|
|
+ run_cmd ${ns_a} ip nexthop add id 402 via ${prefix4}.${a_r2}.2 dev veth_A-R2
|
|
+ run_cmd ${ns_a} ip nexthop add id 403 group 401/402
|
|
+ run_cmd ${ns_a} ip route add ${host4_b_addr} src ${host4_a_addr} nhid 403
|
|
+
|
|
+ # Set up host B with multipath routes to host A host4_a_addr
|
|
+ run_cmd ${ns_b} ip addr add ${host4_b_addr} dev lo
|
|
+ run_cmd ${ns_b} ip nexthop add id 401 via ${prefix4}.${b_r1}.2 dev veth_B-R1
|
|
+ run_cmd ${ns_b} ip nexthop add id 402 via ${prefix4}.${b_r2}.2 dev veth_B-R2
|
|
+ run_cmd ${ns_b} ip nexthop add id 403 group 401/402
|
|
+ run_cmd ${ns_b} ip route add ${host4_a_addr} src ${host4_b_addr} nhid 403
|
|
+}
|
|
+
|
|
+setup_multipath_old() {
|
|
+ # Set up host A with multipath routes to host B host4_b_addr
|
|
+ run_cmd ${ns_a} ip addr add ${host4_a_addr} dev lo
|
|
+ run_cmd ${ns_a} ip route add ${host4_b_addr} \
|
|
+ src ${host4_a_addr} \
|
|
+ nexthop via ${prefix4}.${a_r1}.2 weight 1 \
|
|
+ nexthop via ${prefix4}.${a_r2}.2 weight 1
|
|
+
|
|
+ # Set up host B with multipath routes to host A host4_a_addr
|
|
+ run_cmd ${ns_b} ip addr add ${host4_b_addr} dev lo
|
|
+ run_cmd ${ns_b} ip route add ${host4_a_addr} \
|
|
+ src ${host4_b_addr} \
|
|
+ nexthop via ${prefix4}.${b_r1}.2 weight 1 \
|
|
+ nexthop via ${prefix4}.${b_r2}.2 weight 1
|
|
+}
|
|
+
|
|
+setup_multipath() {
|
|
+ if [ "$USE_NH" = "yes" ]; then
|
|
+ setup_multipath_new
|
|
+ else
|
|
+ setup_multipath_old
|
|
+ fi
|
|
+
|
|
+ # Set up routers with routes to dummies
|
|
+ run_cmd ${ns_r1} ip route add ${host4_a_addr} via ${prefix4}.${a_r1}.1
|
|
+ run_cmd ${ns_r2} ip route add ${host4_a_addr} via ${prefix4}.${a_r2}.1
|
|
+ run_cmd ${ns_r1} ip route add ${host4_b_addr} via ${prefix4}.${b_r1}.1
|
|
+ run_cmd ${ns_r2} ip route add ${host4_b_addr} via ${prefix4}.${b_r2}.1
|
|
+}
|
|
+
|
|
setup() {
|
|
[ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip
|
|
|
|
@@ -982,23 +1038,15 @@ link_get_mtu() {
|
|
}
|
|
|
|
route_get_dst_exception() {
|
|
- ns_cmd="${1}"
|
|
- dst="${2}"
|
|
- dsfield="${3}"
|
|
+ ns_cmd="${1}"; shift
|
|
|
|
- if [ -z "${dsfield}" ]; then
|
|
- dsfield=0
|
|
- fi
|
|
-
|
|
- ${ns_cmd} ip route get "${dst}" dsfield "${dsfield}"
|
|
+ ${ns_cmd} ip route get "$@"
|
|
}
|
|
|
|
route_get_dst_pmtu_from_exception() {
|
|
- ns_cmd="${1}"
|
|
- dst="${2}"
|
|
- dsfield="${3}"
|
|
+ ns_cmd="${1}"; shift
|
|
|
|
- mtu_parse "$(route_get_dst_exception "${ns_cmd}" "${dst}" "${dsfield}")"
|
|
+ mtu_parse "$(route_get_dst_exception "${ns_cmd}" "$@")"
|
|
}
|
|
|
|
check_pmtu_value() {
|
|
@@ -1141,10 +1189,10 @@ test_pmtu_ipv4_dscp_icmp_exception() {
|
|
run_cmd "${ns_a}" ping -q -M want -Q "${dsfield}" -c 1 -w 1 -s "${len}" "${dst2}"
|
|
|
|
# Check that exceptions have been created with the correct PMTU
|
|
- pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" "${policy_mark}")"
|
|
+ pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" dsfield "${policy_mark}")"
|
|
check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
|
|
|
|
- pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" "${policy_mark}")"
|
|
+ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" dsfield "${policy_mark}")"
|
|
check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
|
|
}
|
|
|
|
@@ -1191,9 +1239,9 @@ test_pmtu_ipv4_dscp_udp_exception() {
|
|
UDP:"${dst2}":50000,tos="${dsfield}"
|
|
|
|
# Check that exceptions have been created with the correct PMTU
|
|
- pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" "${policy_mark}")"
|
|
+ pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" dsfield "${policy_mark}")"
|
|
check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
|
|
- pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" "${policy_mark}")"
|
|
+ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" dsfield "${policy_mark}")"
|
|
check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
|
|
}
|
|
|
|
@@ -2234,6 +2282,36 @@ test_pmtu_ipv6_route_change() {
|
|
test_pmtu_ipvX_route_change 6
|
|
}
|
|
|
|
+test_pmtu_ipv4_mp_exceptions() {
|
|
+ setup namespaces routing multipath || return $ksft_skip
|
|
+
|
|
+ trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
|
|
+ "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \
|
|
+ "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \
|
|
+ "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2
|
|
+
|
|
+ # Set up initial MTU values
|
|
+ mtu "${ns_a}" veth_A-R1 2000
|
|
+ mtu "${ns_r1}" veth_R1-A 2000
|
|
+ mtu "${ns_r1}" veth_R1-B 1500
|
|
+ mtu "${ns_b}" veth_B-R1 1500
|
|
+
|
|
+ mtu "${ns_a}" veth_A-R2 2000
|
|
+ mtu "${ns_r2}" veth_R2-A 2000
|
|
+ mtu "${ns_r2}" veth_R2-B 1500
|
|
+ mtu "${ns_b}" veth_B-R2 1500
|
|
+
|
|
+ # Ping and expect two nexthop exceptions for two routes
|
|
+ run_cmd ${ns_a} ping -q -M want -i 0.1 -c 1 -s 1800 "${host4_b_addr}"
|
|
+
|
|
+ # Check that exceptions have been created with the correct PMTU
|
|
+ pmtu_a_R1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${host4_b_addr}" oif veth_A-R1)"
|
|
+ pmtu_a_R2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${host4_b_addr}" oif veth_A-R2)"
|
|
+
|
|
+ check_pmtu_value "1500" "${pmtu_a_R1}" "exceeding MTU (veth_A-R1)" || return 1
|
|
+ check_pmtu_value "1500" "${pmtu_a_R2}" "exceeding MTU (veth_A-R2)" || return 1
|
|
+}
|
|
+
|
|
usage() {
|
|
echo
|
|
echo "$0 [OPTIONS] [TEST]..."
|
|
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
|
|
index 488f4964365e73..855505c40ed8eb 100755
|
|
--- a/tools/testing/selftests/net/rtnetlink.sh
|
|
+++ b/tools/testing/selftests/net/rtnetlink.sh
|
|
@@ -921,10 +921,10 @@ kci_test_ipsec_offload()
|
|
# does driver have correct offload info
|
|
diff $sysfsf - << EOF
|
|
SA count=2 tx=3
|
|
-sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000
|
|
+sa[0] tx ipaddr=$dstip
|
|
sa[0] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
|
|
sa[0] key=0x34333231 38373635 32313039 36353433
|
|
-sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0
|
|
+sa[1] rx ipaddr=$srcip
|
|
sa[1] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
|
|
sa[1] key=0x34333231 38373635 32313039 36353433
|
|
EOF
|
|
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
|
|
index 1525e88c6cf968..a985e57954820e 100644
|
|
--- a/tools/tracing/rtla/src/timerlat_hist.c
|
|
+++ b/tools/tracing/rtla/src/timerlat_hist.c
|
|
@@ -952,6 +952,14 @@ static int stop_tracing;
|
|
static struct trace_instance *hist_inst = NULL;
|
|
static void stop_hist(int sig)
|
|
{
|
|
+ if (stop_tracing) {
|
|
+ /*
|
|
+ * Stop requested twice in a row; abort event processing and
|
|
+ * exit immediately
|
|
+ */
|
|
+ tracefs_iterate_stop(hist_inst->inst);
|
|
+ return;
|
|
+ }
|
|
stop_tracing = 1;
|
|
if (hist_inst)
|
|
trace_instance_stop(hist_inst);
|
|
diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
|
|
index 5a33789a375e3c..1fed4c8d8520f9 100644
|
|
--- a/tools/tracing/rtla/src/timerlat_top.c
|
|
+++ b/tools/tracing/rtla/src/timerlat_top.c
|
|
@@ -731,6 +731,14 @@ static int stop_tracing;
|
|
static struct trace_instance *top_inst = NULL;
|
|
static void stop_top(int sig)
|
|
{
|
|
+ if (stop_tracing) {
|
|
+ /*
|
|
+ * Stop requested twice in a row; abort event processing and
|
|
+ * exit immediately
|
|
+ */
|
|
+ tracefs_iterate_stop(top_inst->inst);
|
|
+ return;
|
|
+ }
|
|
stop_tracing = 1;
|
|
if (top_inst)
|
|
trace_instance_stop(top_inst);
|