mirror of
https://github.com/armbian/build.git
synced 2025-09-20 05:01:25 +02:00
6915 lines
234 KiB
Diff
6915 lines
234 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 82c958299e982..bdb965177db52 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 6
|
|
PATCHLEVEL = 1
|
|
-SUBLEVEL = 45
|
|
+SUBLEVEL = 46
|
|
EXTRAVERSION =
|
|
NAME = Curry Ramen
|
|
|
|
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
|
|
index 33bf3a6270027..45a920ba4921d 100644
|
|
--- a/arch/alpha/kernel/setup.c
|
|
+++ b/arch/alpha/kernel/setup.c
|
|
@@ -385,8 +385,7 @@ setup_memory(void *kernel_end)
|
|
#endif /* CONFIG_BLK_DEV_INITRD */
|
|
}
|
|
|
|
-int __init
|
|
-page_is_ram(unsigned long pfn)
|
|
+int page_is_ram(unsigned long pfn)
|
|
{
|
|
struct memclust_struct * cluster;
|
|
struct memdesc_struct * memdesc;
|
|
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
|
|
index 270742cc3ca49..e737dc8cd660c 100644
|
|
--- a/arch/loongarch/Kconfig
|
|
+++ b/arch/loongarch/Kconfig
|
|
@@ -10,7 +10,6 @@ config LOONGARCH
|
|
select ARCH_ENABLE_MEMORY_HOTPLUG
|
|
select ARCH_ENABLE_MEMORY_HOTREMOVE
|
|
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
|
- select ARCH_HAS_CPU_FINALIZE_INIT
|
|
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
|
select ARCH_HAS_PTE_SPECIAL
|
|
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
|
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
|
|
index 29725b37b35ca..ae436def7ee98 100644
|
|
--- a/arch/loongarch/kernel/setup.c
|
|
+++ b/arch/loongarch/kernel/setup.c
|
|
@@ -12,7 +12,6 @@
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/acpi.h>
|
|
-#include <linux/cpu.h>
|
|
#include <linux/dmi.h>
|
|
#include <linux/efi.h>
|
|
#include <linux/export.h>
|
|
@@ -81,11 +80,6 @@ const char *get_system_type(void)
|
|
return "generic-loongson-machine";
|
|
}
|
|
|
|
-void __init arch_cpu_finalize_init(void)
|
|
-{
|
|
- alternative_instructions();
|
|
-}
|
|
-
|
|
static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
|
|
{
|
|
const u8 *bp = ((u8 *) dm) + dm->length;
|
|
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
|
|
index aff6c33ab0c08..4c58ee7f95ecf 100644
|
|
--- a/arch/riscv/include/asm/mmio.h
|
|
+++ b/arch/riscv/include/asm/mmio.h
|
|
@@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
|
|
* Relaxed I/O memory access primitives. These follow the Device memory
|
|
* ordering rules but do not guarantee any ordering relative to Normal memory
|
|
* accesses. These are defined to order the indicated access (either a read or
|
|
- * write) with all other I/O memory accesses. Since the platform specification
|
|
- * defines that all I/O regions are strongly ordered on channel 2, no explicit
|
|
- * fences are required to enforce this ordering.
|
|
+ * write) with all other I/O memory accesses to the same peripheral. Since the
|
|
+ * platform specification defines that all I/O regions are strongly ordered on
|
|
+ * channel 0, no explicit fences are required to enforce this ordering.
|
|
*/
|
|
/* FIXME: These are now the same as asm-generic */
|
|
#define __io_rbr() do {} while (0)
|
|
@@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
|
|
#endif
|
|
|
|
/*
|
|
- * I/O memory access primitives. Reads are ordered relative to any
|
|
- * following Normal memory access. Writes are ordered relative to any prior
|
|
- * Normal memory access. The memory barriers here are necessary as RISC-V
|
|
+ * I/O memory access primitives. Reads are ordered relative to any following
|
|
+ * Normal memory read and delay() loop. Writes are ordered relative to any
|
|
+ * prior Normal memory write. The memory barriers here are necessary as RISC-V
|
|
* doesn't define any ordering between the memory space and the I/O space.
|
|
*/
|
|
#define __io_br() do {} while (0)
|
|
-#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory")
|
|
-#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory")
|
|
+#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
|
|
+#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
|
|
#define __io_aw() mmiowb_set_pending()
|
|
|
|
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
|
|
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
|
|
index 5372b708fae21..c08bb5c3b3857 100644
|
|
--- a/arch/riscv/kernel/elf_kexec.c
|
|
+++ b/arch/riscv/kernel/elf_kexec.c
|
|
@@ -281,7 +281,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
|
|
kbuf.buffer = initrd;
|
|
kbuf.bufsz = kbuf.memsz = initrd_len;
|
|
kbuf.buf_align = PAGE_SIZE;
|
|
- kbuf.top_down = false;
|
|
+ kbuf.top_down = true;
|
|
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
|
ret = kexec_add_buffer(&kbuf);
|
|
if (ret)
|
|
@@ -425,6 +425,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
|
* sym, instead of searching the whole relsec.
|
|
*/
|
|
case R_RISCV_PCREL_HI20:
|
|
+ case R_RISCV_CALL_PLT:
|
|
case R_RISCV_CALL:
|
|
*(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) |
|
|
ENCODE_UJTYPE_IMM(val - addr);
|
|
diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c
|
|
index 6debb816e83dc..3cdf94b414567 100644
|
|
--- a/arch/x86/boot/compressed/idt_64.c
|
|
+++ b/arch/x86/boot/compressed/idt_64.c
|
|
@@ -63,7 +63,14 @@ void load_stage2_idt(void)
|
|
set_idt_entry(X86_TRAP_PF, boot_page_fault);
|
|
|
|
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
|
- set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
|
|
+ /*
|
|
+ * Clear the second stage #VC handler in case guest types
|
|
+ * needing #VC have not been detected.
|
|
+ */
|
|
+ if (sev_status & BIT(1))
|
|
+ set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
|
|
+ else
|
|
+ set_idt_entry(X86_TRAP_VC, NULL);
|
|
#endif
|
|
|
|
load_boot_idt(&boot_idt_desc);
|
|
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
|
|
index d63ad8f99f83a..e65f0968e0d9d 100644
|
|
--- a/arch/x86/boot/compressed/sev.c
|
|
+++ b/arch/x86/boot/compressed/sev.c
|
|
@@ -354,13 +354,46 @@ void sev_enable(struct boot_params *bp)
|
|
if (bp)
|
|
bp->cc_blob_address = 0;
|
|
|
|
+ /*
|
|
+ * Do an initial SEV capability check before snp_init() which
|
|
+ * loads the CPUID page and the same checks afterwards are done
|
|
+ * without the hypervisor and are trustworthy.
|
|
+ *
|
|
+ * If the HV fakes SEV support, the guest will crash'n'burn
|
|
+ * which is good enough.
|
|
+ */
|
|
+
|
|
+ /* Check for the SME/SEV support leaf */
|
|
+ eax = 0x80000000;
|
|
+ ecx = 0;
|
|
+ native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
+ if (eax < 0x8000001f)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Check for the SME/SEV feature:
|
|
+ * CPUID Fn8000_001F[EAX]
|
|
+ * - Bit 0 - Secure Memory Encryption support
|
|
+ * - Bit 1 - Secure Encrypted Virtualization support
|
|
+ * CPUID Fn8000_001F[EBX]
|
|
+ * - Bits 5:0 - Pagetable bit position used to indicate encryption
|
|
+ */
|
|
+ eax = 0x8000001f;
|
|
+ ecx = 0;
|
|
+ native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
+ /* Check whether SEV is supported */
|
|
+ if (!(eax & BIT(1)))
|
|
+ return;
|
|
+
|
|
/*
|
|
* Setup/preliminary detection of SNP. This will be sanity-checked
|
|
* against CPUID/MSR values later.
|
|
*/
|
|
snp = snp_init(bp);
|
|
|
|
- /* Check for the SME/SEV support leaf */
|
|
+ /* Now repeat the checks with the SNP CPUID table. */
|
|
+
|
|
+ /* Recheck the SME/SEV support leaf */
|
|
eax = 0x80000000;
|
|
ecx = 0;
|
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
@@ -368,7 +401,7 @@ void sev_enable(struct boot_params *bp)
|
|
return;
|
|
|
|
/*
|
|
- * Check for the SME/SEV feature:
|
|
+ * Recheck for the SME/SEV feature:
|
|
* CPUID Fn8000_001F[EAX]
|
|
* - Bit 0 - Secure Memory Encryption support
|
|
* - Bit 1 - Secure Encrypted Virtualization support
|
|
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
|
|
index 311eae30e0894..1288661397f09 100644
|
|
--- a/arch/x86/entry/vdso/vma.c
|
|
+++ b/arch/x86/entry/vdso/vma.c
|
|
@@ -322,8 +322,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
|
|
|
|
/* Round the lowest possible end address up to a PMD boundary. */
|
|
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
|
|
- if (end >= TASK_SIZE_MAX)
|
|
- end = TASK_SIZE_MAX;
|
|
+ if (end >= DEFAULT_MAP_WINDOW)
|
|
+ end = DEFAULT_MAP_WINDOW;
|
|
end -= len;
|
|
|
|
if (end > start) {
|
|
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
|
|
index 45bf26862b99b..94ea13adb724a 100644
|
|
--- a/arch/x86/include/asm/processor.h
|
|
+++ b/arch/x86/include/asm/processor.h
|
|
@@ -867,4 +867,6 @@ bool arch_is_platform_page(u64 paddr);
|
|
#define arch_is_platform_page arch_is_platform_page
|
|
#endif
|
|
|
|
+extern bool gds_ucode_mitigated(void);
|
|
+
|
|
#endif /* _ASM_X86_PROCESSOR_H */
|
|
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
|
|
index 43910eb55b2e9..239b302973d7a 100644
|
|
--- a/arch/x86/kernel/cpu/amd.c
|
|
+++ b/arch/x86/kernel/cpu/amd.c
|
|
@@ -73,6 +73,7 @@ static const int amd_erratum_1054[] =
|
|
static const int amd_zenbleed[] =
|
|
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
|
|
AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
|
|
+ AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
|
|
AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
|
|
|
|
static const int amd_div0[] =
|
|
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
|
|
index fd03f5a1f0ef0..e6939ebb606ab 100644
|
|
--- a/arch/x86/kernel/vmlinux.lds.S
|
|
+++ b/arch/x86/kernel/vmlinux.lds.S
|
|
@@ -514,11 +514,17 @@ INIT_PER_CPU(irq_stack_backing_store);
|
|
|
|
#ifdef CONFIG_CPU_SRSO
|
|
/*
|
|
- * GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR
|
|
+ * GNU ld cannot do XOR until 2.41.
|
|
+ * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
|
|
+ *
|
|
+ * LLVM lld cannot do XOR until lld-17.
|
|
+ * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
|
|
+ *
|
|
+ * Instead do: (A | B) - (A & B) in order to compute the XOR
|
|
* of the two function addresses:
|
|
*/
|
|
-. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) -
|
|
- (srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
|
|
+. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
|
|
+ (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
|
|
"SRSO function pair won't alias");
|
|
#endif
|
|
|
|
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
|
|
index 4cb2e483db533..e0437acb5cf75 100644
|
|
--- a/arch/x86/kvm/svm/sev.c
|
|
+++ b/arch/x86/kvm/svm/sev.c
|
|
@@ -2410,15 +2410,18 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
|
|
*/
|
|
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
|
|
|
|
- vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
|
|
- vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
|
|
- vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
|
|
- vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
|
|
- vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
|
|
+ BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
|
|
+ memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
|
|
|
|
- svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
|
|
+ vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
|
|
+ vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
|
|
+ vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
|
|
+ vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
|
|
+ vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
|
|
|
|
- if (ghcb_xcr0_is_valid(ghcb)) {
|
|
+ svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
|
|
+
|
|
+ if (kvm_ghcb_xcr0_is_valid(svm)) {
|
|
vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
|
|
kvm_update_cpuid_runtime(vcpu);
|
|
}
|
|
@@ -2429,14 +2432,21 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
|
|
control->exit_code_hi = upper_32_bits(exit_code);
|
|
control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
|
|
control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
|
|
+ svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
|
|
|
|
/* Clear the valid entries fields */
|
|
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
|
|
}
|
|
|
|
+static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
|
|
+{
|
|
+ return (((u64)control->exit_code_hi) << 32) | control->exit_code;
|
|
+}
|
|
+
|
|
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
|
{
|
|
- struct kvm_vcpu *vcpu;
|
|
+ struct vmcb_control_area *control = &svm->vmcb->control;
|
|
+ struct kvm_vcpu *vcpu = &svm->vcpu;
|
|
struct ghcb *ghcb;
|
|
u64 exit_code;
|
|
u64 reason;
|
|
@@ -2447,7 +2457,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
|
* Retrieve the exit code now even though it may not be marked valid
|
|
* as it could help with debugging.
|
|
*/
|
|
- exit_code = ghcb_get_sw_exit_code(ghcb);
|
|
+ exit_code = kvm_ghcb_get_sw_exit_code(control);
|
|
|
|
/* Only GHCB Usage code 0 is supported */
|
|
if (ghcb->ghcb_usage) {
|
|
@@ -2457,56 +2467,56 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
|
|
|
reason = GHCB_ERR_MISSING_INPUT;
|
|
|
|
- if (!ghcb_sw_exit_code_is_valid(ghcb) ||
|
|
- !ghcb_sw_exit_info_1_is_valid(ghcb) ||
|
|
- !ghcb_sw_exit_info_2_is_valid(ghcb))
|
|
+ if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
|
|
+ !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
|
|
+ !kvm_ghcb_sw_exit_info_2_is_valid(svm))
|
|
goto vmgexit_err;
|
|
|
|
- switch (ghcb_get_sw_exit_code(ghcb)) {
|
|
+ switch (exit_code) {
|
|
case SVM_EXIT_READ_DR7:
|
|
break;
|
|
case SVM_EXIT_WRITE_DR7:
|
|
- if (!ghcb_rax_is_valid(ghcb))
|
|
+ if (!kvm_ghcb_rax_is_valid(svm))
|
|
goto vmgexit_err;
|
|
break;
|
|
case SVM_EXIT_RDTSC:
|
|
break;
|
|
case SVM_EXIT_RDPMC:
|
|
- if (!ghcb_rcx_is_valid(ghcb))
|
|
+ if (!kvm_ghcb_rcx_is_valid(svm))
|
|
goto vmgexit_err;
|
|
break;
|
|
case SVM_EXIT_CPUID:
|
|
- if (!ghcb_rax_is_valid(ghcb) ||
|
|
- !ghcb_rcx_is_valid(ghcb))
|
|
+ if (!kvm_ghcb_rax_is_valid(svm) ||
|
|
+ !kvm_ghcb_rcx_is_valid(svm))
|
|
goto vmgexit_err;
|
|
- if (ghcb_get_rax(ghcb) == 0xd)
|
|
- if (!ghcb_xcr0_is_valid(ghcb))
|
|
+ if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
|
|
+ if (!kvm_ghcb_xcr0_is_valid(svm))
|
|
goto vmgexit_err;
|
|
break;
|
|
case SVM_EXIT_INVD:
|
|
break;
|
|
case SVM_EXIT_IOIO:
|
|
- if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
|
|
- if (!ghcb_sw_scratch_is_valid(ghcb))
|
|
+ if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
|
|
+ if (!kvm_ghcb_sw_scratch_is_valid(svm))
|
|
goto vmgexit_err;
|
|
} else {
|
|
- if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
|
|
- if (!ghcb_rax_is_valid(ghcb))
|
|
+ if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
|
|
+ if (!kvm_ghcb_rax_is_valid(svm))
|
|
goto vmgexit_err;
|
|
}
|
|
break;
|
|
case SVM_EXIT_MSR:
|
|
- if (!ghcb_rcx_is_valid(ghcb))
|
|
+ if (!kvm_ghcb_rcx_is_valid(svm))
|
|
goto vmgexit_err;
|
|
- if (ghcb_get_sw_exit_info_1(ghcb)) {
|
|
- if (!ghcb_rax_is_valid(ghcb) ||
|
|
- !ghcb_rdx_is_valid(ghcb))
|
|
+ if (control->exit_info_1) {
|
|
+ if (!kvm_ghcb_rax_is_valid(svm) ||
|
|
+ !kvm_ghcb_rdx_is_valid(svm))
|
|
goto vmgexit_err;
|
|
}
|
|
break;
|
|
case SVM_EXIT_VMMCALL:
|
|
- if (!ghcb_rax_is_valid(ghcb) ||
|
|
- !ghcb_cpl_is_valid(ghcb))
|
|
+ if (!kvm_ghcb_rax_is_valid(svm) ||
|
|
+ !kvm_ghcb_cpl_is_valid(svm))
|
|
goto vmgexit_err;
|
|
break;
|
|
case SVM_EXIT_RDTSCP:
|
|
@@ -2514,19 +2524,19 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
|
case SVM_EXIT_WBINVD:
|
|
break;
|
|
case SVM_EXIT_MONITOR:
|
|
- if (!ghcb_rax_is_valid(ghcb) ||
|
|
- !ghcb_rcx_is_valid(ghcb) ||
|
|
- !ghcb_rdx_is_valid(ghcb))
|
|
+ if (!kvm_ghcb_rax_is_valid(svm) ||
|
|
+ !kvm_ghcb_rcx_is_valid(svm) ||
|
|
+ !kvm_ghcb_rdx_is_valid(svm))
|
|
goto vmgexit_err;
|
|
break;
|
|
case SVM_EXIT_MWAIT:
|
|
- if (!ghcb_rax_is_valid(ghcb) ||
|
|
- !ghcb_rcx_is_valid(ghcb))
|
|
+ if (!kvm_ghcb_rax_is_valid(svm) ||
|
|
+ !kvm_ghcb_rcx_is_valid(svm))
|
|
goto vmgexit_err;
|
|
break;
|
|
case SVM_VMGEXIT_MMIO_READ:
|
|
case SVM_VMGEXIT_MMIO_WRITE:
|
|
- if (!ghcb_sw_scratch_is_valid(ghcb))
|
|
+ if (!kvm_ghcb_sw_scratch_is_valid(svm))
|
|
goto vmgexit_err;
|
|
break;
|
|
case SVM_VMGEXIT_NMI_COMPLETE:
|
|
@@ -2542,8 +2552,6 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
|
return 0;
|
|
|
|
vmgexit_err:
|
|
- vcpu = &svm->vcpu;
|
|
-
|
|
if (reason == GHCB_ERR_INVALID_USAGE) {
|
|
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
|
|
ghcb->ghcb_usage);
|
|
@@ -2556,9 +2564,6 @@ vmgexit_err:
|
|
dump_ghcb(svm);
|
|
}
|
|
|
|
- /* Clear the valid entries fields */
|
|
- memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
|
|
-
|
|
ghcb_set_sw_exit_info_1(ghcb, 2);
|
|
ghcb_set_sw_exit_info_2(ghcb, reason);
|
|
|
|
@@ -2579,7 +2584,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
|
|
*/
|
|
if (svm->sev_es.ghcb_sa_sync) {
|
|
kvm_write_guest(svm->vcpu.kvm,
|
|
- ghcb_get_sw_scratch(svm->sev_es.ghcb),
|
|
+ svm->sev_es.sw_scratch,
|
|
svm->sev_es.ghcb_sa,
|
|
svm->sev_es.ghcb_sa_len);
|
|
svm->sev_es.ghcb_sa_sync = false;
|
|
@@ -2630,7 +2635,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
|
|
u64 scratch_gpa_beg, scratch_gpa_end;
|
|
void *scratch_va;
|
|
|
|
- scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
|
|
+ scratch_gpa_beg = svm->sev_es.sw_scratch;
|
|
if (!scratch_gpa_beg) {
|
|
pr_err("vmgexit: scratch gpa not provided\n");
|
|
goto e_scratch;
|
|
@@ -2844,16 +2849,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
|
|
|
trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
|
|
|
|
- exit_code = ghcb_get_sw_exit_code(ghcb);
|
|
-
|
|
+ sev_es_sync_from_ghcb(svm);
|
|
ret = sev_es_validate_vmgexit(svm);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- sev_es_sync_from_ghcb(svm);
|
|
ghcb_set_sw_exit_info_1(ghcb, 0);
|
|
ghcb_set_sw_exit_info_2(ghcb, 0);
|
|
|
|
+ exit_code = kvm_ghcb_get_sw_exit_code(control);
|
|
switch (exit_code) {
|
|
case SVM_VMGEXIT_MMIO_READ:
|
|
ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
|
|
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
|
|
index d0ed3f5952295..62f87492763e0 100644
|
|
--- a/arch/x86/kvm/svm/svm.h
|
|
+++ b/arch/x86/kvm/svm/svm.h
|
|
@@ -196,10 +196,12 @@ struct vcpu_sev_es_state {
|
|
/* SEV-ES support */
|
|
struct sev_es_save_area *vmsa;
|
|
struct ghcb *ghcb;
|
|
+ u8 valid_bitmap[16];
|
|
struct kvm_host_map ghcb_map;
|
|
bool received_first_sipi;
|
|
|
|
/* SEV-ES scratch area support */
|
|
+ u64 sw_scratch;
|
|
void *ghcb_sa;
|
|
u32 ghcb_sa_len;
|
|
bool ghcb_sa_sync;
|
|
@@ -688,4 +690,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
|
|
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
|
|
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
|
|
|
|
+#define DEFINE_KVM_GHCB_ACCESSORS(field) \
|
|
+ static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
|
|
+ { \
|
|
+ return test_bit(GHCB_BITMAP_IDX(field), \
|
|
+ (unsigned long *)&svm->sev_es.valid_bitmap); \
|
|
+ } \
|
|
+ \
|
|
+ static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
|
|
+ { \
|
|
+ return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
|
|
+ } \
|
|
+
|
|
+DEFINE_KVM_GHCB_ACCESSORS(cpl)
|
|
+DEFINE_KVM_GHCB_ACCESSORS(rax)
|
|
+DEFINE_KVM_GHCB_ACCESSORS(rcx)
|
|
+DEFINE_KVM_GHCB_ACCESSORS(rdx)
|
|
+DEFINE_KVM_GHCB_ACCESSORS(rbx)
|
|
+DEFINE_KVM_GHCB_ACCESSORS(rsi)
|
|
+DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
|
|
+DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
|
|
+DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
|
|
+DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
|
|
+DEFINE_KVM_GHCB_ACCESSORS(xcr0)
|
|
+
|
|
#endif
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index f4b12c3c30a01..1931d3fcbbe09 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -311,8 +311,6 @@ u64 __read_mostly host_xcr0;
|
|
|
|
static struct kmem_cache *x86_emulator_cache;
|
|
|
|
-extern bool gds_ucode_mitigated(void);
|
|
-
|
|
/*
|
|
* When called, it means the previous get/set msr reached an invalid msr.
|
|
* Return true if we want to ignore/silent this failed msr access.
|
|
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
|
|
index dbfa58e799e28..a0e347f6f97eb 100644
|
|
--- a/drivers/acpi/scan.c
|
|
+++ b/drivers/acpi/scan.c
|
|
@@ -1712,6 +1712,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
|
|
{"BSG1160", },
|
|
{"BSG2150", },
|
|
{"CSC3551", },
|
|
+ {"CSC3556", },
|
|
{"INT33FE", },
|
|
{"INT3515", },
|
|
/* Non-conforming _HID for Cirrus Logic already released */
|
|
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
|
|
index e374a8a2da46e..e4a6da81cd4b3 100644
|
|
--- a/drivers/android/binder.c
|
|
+++ b/drivers/android/binder.c
|
|
@@ -6602,6 +6602,7 @@ err_init_binder_device_failed:
|
|
|
|
err_alloc_device_names_failed:
|
|
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
|
|
+ binder_alloc_shrinker_exit();
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
|
|
index 4fb89ef067d57..cd87f12733f27 100644
|
|
--- a/drivers/android/binder_alloc.c
|
|
+++ b/drivers/android/binder_alloc.c
|
|
@@ -1087,6 +1087,12 @@ int binder_alloc_shrinker_init(void)
|
|
return ret;
|
|
}
|
|
|
|
+void binder_alloc_shrinker_exit(void)
|
|
+{
|
|
+ unregister_shrinker(&binder_shrinker);
|
|
+ list_lru_destroy(&binder_alloc_lru);
|
|
+}
|
|
+
|
|
/**
|
|
* check_buffer() - verify that buffer/offset is safe to access
|
|
* @alloc: binder_alloc for this proc
|
|
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
|
|
index 138d1d5af9ce3..dc1e2b01dd64d 100644
|
|
--- a/drivers/android/binder_alloc.h
|
|
+++ b/drivers/android/binder_alloc.h
|
|
@@ -129,6 +129,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
|
int pid);
|
|
extern void binder_alloc_init(struct binder_alloc *alloc);
|
|
extern int binder_alloc_shrinker_init(void);
|
|
+extern void binder_alloc_shrinker_exit(void);
|
|
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
|
|
extern struct binder_buffer *
|
|
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
|
|
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
|
|
index d7ef4d0a7409f..c0759d49fd145 100644
|
|
--- a/drivers/char/tpm/tpm-chip.c
|
|
+++ b/drivers/char/tpm/tpm-chip.c
|
|
@@ -507,70 +507,6 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
|
|
return 0;
|
|
}
|
|
|
|
-/*
|
|
- * Some AMD fTPM versions may cause stutter
|
|
- * https://www.amd.com/en/support/kb/faq/pa-410
|
|
- *
|
|
- * Fixes are available in two series of fTPM firmware:
|
|
- * 6.x.y.z series: 6.0.18.6 +
|
|
- * 3.x.y.z series: 3.57.y.5 +
|
|
- */
|
|
-#ifdef CONFIG_X86
|
|
-static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
|
|
-{
|
|
- u32 val1, val2;
|
|
- u64 version;
|
|
- int ret;
|
|
-
|
|
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
|
|
- return false;
|
|
-
|
|
- ret = tpm_request_locality(chip);
|
|
- if (ret)
|
|
- return false;
|
|
-
|
|
- ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL);
|
|
- if (ret)
|
|
- goto release;
|
|
- if (val1 != 0x414D4400U /* AMD */) {
|
|
- ret = -ENODEV;
|
|
- goto release;
|
|
- }
|
|
- ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL);
|
|
- if (ret)
|
|
- goto release;
|
|
- ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL);
|
|
-
|
|
-release:
|
|
- tpm_relinquish_locality(chip);
|
|
-
|
|
- if (ret)
|
|
- return false;
|
|
-
|
|
- version = ((u64)val1 << 32) | val2;
|
|
- if ((version >> 48) == 6) {
|
|
- if (version >= 0x0006000000180006ULL)
|
|
- return false;
|
|
- } else if ((version >> 48) == 3) {
|
|
- if (version >= 0x0003005700000005ULL)
|
|
- return false;
|
|
- } else {
|
|
- return false;
|
|
- }
|
|
-
|
|
- dev_warn(&chip->dev,
|
|
- "AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n",
|
|
- version);
|
|
-
|
|
- return true;
|
|
-}
|
|
-#else
|
|
-static inline bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
|
|
-{
|
|
- return false;
|
|
-}
|
|
-#endif /* CONFIG_X86 */
|
|
-
|
|
static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
|
{
|
|
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
|
|
@@ -582,10 +518,20 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
|
return tpm_get_random(chip, data, max);
|
|
}
|
|
|
|
+static bool tpm_is_hwrng_enabled(struct tpm_chip *chip)
|
|
+{
|
|
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
|
|
+ return false;
|
|
+ if (tpm_is_firmware_upgrade(chip))
|
|
+ return false;
|
|
+ if (chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED)
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
+
|
|
static int tpm_add_hwrng(struct tpm_chip *chip)
|
|
{
|
|
- if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) ||
|
|
- tpm_amd_is_rng_defective(chip))
|
|
+ if (!tpm_is_hwrng_enabled(chip))
|
|
return 0;
|
|
|
|
snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
|
|
@@ -690,7 +636,7 @@ int tpm_chip_register(struct tpm_chip *chip)
|
|
return 0;
|
|
|
|
out_hwrng:
|
|
- if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
|
|
+ if (tpm_is_hwrng_enabled(chip))
|
|
hwrng_unregister(&chip->hwrng);
|
|
out_ppi:
|
|
tpm_bios_log_teardown(chip);
|
|
@@ -715,8 +661,7 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
|
|
void tpm_chip_unregister(struct tpm_chip *chip)
|
|
{
|
|
tpm_del_legacy_sysfs(chip);
|
|
- if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) &&
|
|
- !tpm_amd_is_rng_defective(chip))
|
|
+ if (tpm_is_hwrng_enabled(chip))
|
|
hwrng_unregister(&chip->hwrng);
|
|
tpm_bios_log_teardown(chip);
|
|
if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
|
|
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
|
|
index cbbedf52607c0..7f7f3bded4535 100644
|
|
--- a/drivers/char/tpm/tpm_crb.c
|
|
+++ b/drivers/char/tpm/tpm_crb.c
|
|
@@ -463,6 +463,28 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
|
|
return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
|
|
}
|
|
|
|
+static int crb_check_flags(struct tpm_chip *chip)
|
|
+{
|
|
+ u32 val;
|
|
+ int ret;
|
|
+
|
|
+ ret = crb_request_locality(chip, 0);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
|
|
+ if (ret)
|
|
+ goto release;
|
|
+
|
|
+ if (val == 0x414D4400U /* AMD */)
|
|
+ chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
|
|
+
|
|
+release:
|
|
+ crb_relinquish_locality(chip, 0);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static const struct tpm_class_ops tpm_crb = {
|
|
.flags = TPM_OPS_AUTO_STARTUP,
|
|
.status = crb_status,
|
|
@@ -800,6 +822,14 @@ static int crb_acpi_add(struct acpi_device *device)
|
|
chip->acpi_dev_handle = device->handle;
|
|
chip->flags = TPM_CHIP_FLAG_TPM2;
|
|
|
|
+ rc = tpm_chip_bootstrap(chip);
|
|
+ if (rc)
|
|
+ goto out;
|
|
+
|
|
+ rc = crb_check_flags(chip);
|
|
+ if (rc)
|
|
+ goto out;
|
|
+
|
|
rc = tpm_chip_register(chip);
|
|
|
|
out:
|
|
diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c
|
|
index b37165514d4e7..1af63c189039e 100644
|
|
--- a/drivers/cpuidle/dt_idle_genpd.c
|
|
+++ b/drivers/cpuidle/dt_idle_genpd.c
|
|
@@ -152,6 +152,30 @@ int dt_idle_pd_init_topology(struct device_node *np)
|
|
return 0;
|
|
}
|
|
|
|
+int dt_idle_pd_remove_topology(struct device_node *np)
|
|
+{
|
|
+ struct device_node *node;
|
|
+ struct of_phandle_args child, parent;
|
|
+ int ret;
|
|
+
|
|
+ for_each_child_of_node(np, node) {
|
|
+ if (of_parse_phandle_with_args(node, "power-domains",
|
|
+ "#power-domain-cells", 0, &parent))
|
|
+ continue;
|
|
+
|
|
+ child.np = node;
|
|
+ child.args_count = 0;
|
|
+ ret = of_genpd_remove_subdomain(&parent, &child);
|
|
+ of_node_put(parent.np);
|
|
+ if (ret) {
|
|
+ of_node_put(node);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
struct device *dt_idle_attach_cpu(int cpu, const char *name)
|
|
{
|
|
struct device *dev;
|
|
diff --git a/drivers/cpuidle/dt_idle_genpd.h b/drivers/cpuidle/dt_idle_genpd.h
|
|
index a95483d08a02a..3be1f70f55b5c 100644
|
|
--- a/drivers/cpuidle/dt_idle_genpd.h
|
|
+++ b/drivers/cpuidle/dt_idle_genpd.h
|
|
@@ -14,6 +14,8 @@ struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
|
|
|
|
int dt_idle_pd_init_topology(struct device_node *np);
|
|
|
|
+int dt_idle_pd_remove_topology(struct device_node *np);
|
|
+
|
|
struct device *dt_idle_attach_cpu(int cpu, const char *name);
|
|
|
|
void dt_idle_detach_cpu(struct device *dev);
|
|
@@ -36,6 +38,11 @@ static inline int dt_idle_pd_init_topology(struct device_node *np)
|
|
return 0;
|
|
}
|
|
|
|
+static inline int dt_idle_pd_remove_topology(struct device_node *np)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static inline struct device *dt_idle_attach_cpu(int cpu, const char *name)
|
|
{
|
|
return NULL;
|
|
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
|
|
index e12b754e6398d..60d3c5f09ad67 100644
|
|
--- a/drivers/dma/mcf-edma.c
|
|
+++ b/drivers/dma/mcf-edma.c
|
|
@@ -191,7 +191,13 @@ static int mcf_edma_probe(struct platform_device *pdev)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- chans = pdata->dma_channels;
|
|
+ if (!pdata->dma_channels) {
|
|
+ dev_info(&pdev->dev, "setting default channel number to 64");
|
|
+ chans = 64;
|
|
+ } else {
|
|
+ chans = pdata->dma_channels;
|
|
+ }
|
|
+
|
|
len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
|
|
mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
|
|
if (!mcf_edma)
|
|
@@ -203,11 +209,6 @@ static int mcf_edma_probe(struct platform_device *pdev)
|
|
mcf_edma->drvdata = &mcf_data;
|
|
mcf_edma->big_endian = 1;
|
|
|
|
- if (!mcf_edma->n_chans) {
|
|
- dev_info(&pdev->dev, "setting default channel number to 64");
|
|
- mcf_edma->n_chans = 64;
|
|
- }
|
|
-
|
|
mutex_init(&mcf_edma->fsl_edma_mutex);
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
|
|
index 95a462a1f5111..b6e0ac8314e5c 100644
|
|
--- a/drivers/dma/owl-dma.c
|
|
+++ b/drivers/dma/owl-dma.c
|
|
@@ -192,7 +192,7 @@ struct owl_dma_pchan {
|
|
};
|
|
|
|
/**
|
|
- * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
|
|
+ * struct owl_dma_vchan - Wrapper for DMA ENGINE channel
|
|
* @vc: wrapped virtual channel
|
|
* @pchan: the physical channel utilized by this channel
|
|
* @txd: active transaction on this channel
|
|
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
|
|
index b4731fe6bbc14..3cf0b38387ae5 100644
|
|
--- a/drivers/dma/pl330.c
|
|
+++ b/drivers/dma/pl330.c
|
|
@@ -403,6 +403,12 @@ enum desc_status {
|
|
* of a channel can be BUSY at any time.
|
|
*/
|
|
BUSY,
|
|
+ /*
|
|
+ * Pause was called while descriptor was BUSY. Due to hardware
|
|
+ * limitations, only termination is possible for descriptors
|
|
+ * that have been paused.
|
|
+ */
|
|
+ PAUSED,
|
|
/*
|
|
* Sitting on the channel work_list but xfer done
|
|
* by PL330 core
|
|
@@ -2041,7 +2047,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
|
|
list_for_each_entry(desc, &pch->work_list, node) {
|
|
|
|
/* If already submitted */
|
|
- if (desc->status == BUSY)
|
|
+ if (desc->status == BUSY || desc->status == PAUSED)
|
|
continue;
|
|
|
|
ret = pl330_submit_req(pch->thread, desc);
|
|
@@ -2326,6 +2332,7 @@ static int pl330_pause(struct dma_chan *chan)
|
|
{
|
|
struct dma_pl330_chan *pch = to_pchan(chan);
|
|
struct pl330_dmac *pl330 = pch->dmac;
|
|
+ struct dma_pl330_desc *desc;
|
|
unsigned long flags;
|
|
|
|
pm_runtime_get_sync(pl330->ddma.dev);
|
|
@@ -2335,6 +2342,10 @@ static int pl330_pause(struct dma_chan *chan)
|
|
_stop(pch->thread);
|
|
spin_unlock(&pl330->lock);
|
|
|
|
+ list_for_each_entry(desc, &pch->work_list, node) {
|
|
+ if (desc->status == BUSY)
|
|
+ desc->status = PAUSED;
|
|
+ }
|
|
spin_unlock_irqrestore(&pch->lock, flags);
|
|
pm_runtime_mark_last_busy(pl330->ddma.dev);
|
|
pm_runtime_put_autosuspend(pl330->ddma.dev);
|
|
@@ -2425,7 +2436,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|
else if (running && desc == running)
|
|
transferred =
|
|
pl330_get_current_xferred_count(pch, desc);
|
|
- else if (desc->status == BUSY)
|
|
+ else if (desc->status == BUSY || desc->status == PAUSED)
|
|
/*
|
|
* Busy but not running means either just enqueued,
|
|
* or finished and not yet marked done
|
|
@@ -2442,6 +2453,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|
case DONE:
|
|
ret = DMA_COMPLETE;
|
|
break;
|
|
+ case PAUSED:
|
|
+ ret = DMA_PAUSED;
|
|
+ break;
|
|
case PREP:
|
|
case BUSY:
|
|
ret = DMA_IN_PROGRESS;
|
|
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
|
|
index 803676e307d73..fef12e57b1f13 100644
|
|
--- a/drivers/gpio/gpio-sim.c
|
|
+++ b/drivers/gpio/gpio-sim.c
|
|
@@ -425,6 +425,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
|
|
gc->set_config = gpio_sim_set_config;
|
|
gc->to_irq = gpio_sim_to_irq;
|
|
gc->free = gpio_sim_free;
|
|
+ gc->can_sleep = true;
|
|
|
|
ret = devm_gpiochip_add_data(dev, gc, chip);
|
|
if (ret)
|
|
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
|
|
index e73885a4dc328..afb42a8e916fe 100644
|
|
--- a/drivers/gpio/gpio-ws16c48.c
|
|
+++ b/drivers/gpio/gpio-ws16c48.c
|
|
@@ -18,7 +18,7 @@
|
|
#include <linux/spinlock.h>
|
|
#include <linux/types.h>
|
|
|
|
-#define WS16C48_EXTENT 10
|
|
+#define WS16C48_EXTENT 11
|
|
#define MAX_NUM_WS16C48 max_num_isa_dev(WS16C48_EXTENT)
|
|
|
|
static unsigned int base[MAX_NUM_WS16C48];
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
|
|
index c0e782a95e72e..0c962f996aff5 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
|
|
@@ -242,6 +242,7 @@ extern int amdgpu_num_kcq;
|
|
|
|
#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
|
|
extern int amdgpu_vcnfw_log;
|
|
+extern int amdgpu_sg_display;
|
|
|
|
#define AMDGPU_VM_MAX_NUM_CTX 4096
|
|
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
|
|
@@ -283,6 +284,9 @@ extern int amdgpu_vcnfw_log;
|
|
#define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
|
|
#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
|
|
|
|
+/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
|
|
+#define AMDGPU_SWCTF_EXTRA_DELAY 50
|
|
+
|
|
struct amdgpu_device;
|
|
struct amdgpu_irq_src;
|
|
struct amdgpu_fpriv;
|
|
@@ -1262,6 +1266,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
|
|
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
|
|
bool amdgpu_device_need_post(struct amdgpu_device *adev);
|
|
+bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
|
|
bool amdgpu_device_pcie_dynamic_switching_supported(void);
|
|
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
|
|
bool amdgpu_device_aspm_support_quirk(void);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
|
|
index b60b6e6149bf7..fdb53d4394f30 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
|
|
@@ -287,7 +287,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
|
|
|
if (!p->gang_size) {
|
|
ret = -EINVAL;
|
|
- goto free_partial_kdata;
|
|
+ goto free_all_kdata;
|
|
}
|
|
|
|
for (i = 0; i < p->gang_size; ++i) {
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
index 30c97ee375636..773383e660e8c 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
@@ -1333,6 +1333,32 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
|
|
return true;
|
|
}
|
|
|
|
+/*
|
|
+ * On APUs with >= 64GB white flickering has been observed w/ SG enabled.
|
|
+ * Disable S/G on such systems until we have a proper fix.
|
|
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2354
|
|
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2735
|
|
+ */
|
|
+bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
|
|
+{
|
|
+ switch (amdgpu_sg_display) {
|
|
+ case -1:
|
|
+ break;
|
|
+ case 0:
|
|
+ return false;
|
|
+ case 1:
|
|
+ return true;
|
|
+ default:
|
|
+ return false;
|
|
+ }
|
|
+ if ((totalram_pages() << (PAGE_SHIFT - 10)) +
|
|
+ (adev->gmc.real_vram_size / 1024) >= 64000000) {
|
|
+ DRM_WARN("Disabling S/G due to >=64GB RAM\n");
|
|
+ return false;
|
|
+ }
|
|
+ return true;
|
|
+}
|
|
+
|
|
/*
|
|
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
|
|
* speed switching. Until we have confirmation from Intel that a specific host
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
|
index 49a023f59b2fc..6e5bc74846952 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
|
@@ -185,6 +185,7 @@ int amdgpu_num_kcq = -1;
|
|
int amdgpu_smartshift_bias;
|
|
int amdgpu_use_xgmi_p2p = 1;
|
|
int amdgpu_vcnfw_log;
|
|
+int amdgpu_sg_display = -1; /* auto */
|
|
|
|
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
|
|
|
|
@@ -929,6 +930,16 @@ module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
|
|
MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = enable)");
|
|
module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
|
|
|
|
+/**
|
|
+ * DOC: sg_display (int)
|
|
+ * Disable S/G (scatter/gather) display (i.e., display from system memory).
|
|
+ * This option is only relevant on APUs. Set this option to 0 to disable
|
|
+ * S/G display if you experience flickering or other issues under memory
|
|
+ * pressure and report the issue.
|
|
+ */
|
|
+MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
|
|
+module_param_named(sg_display, amdgpu_sg_display, int, 0444);
|
|
+
|
|
/**
|
|
* DOC: smu_pptable_id (int)
|
|
* Used to override pptable id. id = 0 use VBIOS pptable.
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
index 86e07cc1d3dcc..9f718b98da1f7 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
@@ -1634,6 +1634,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|
}
|
|
break;
|
|
}
|
|
+ if (init_data.flags.gpu_vm_support)
|
|
+ init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
|
|
|
|
if (init_data.flags.gpu_vm_support)
|
|
adev->mode_info.gpu_vm_support = true;
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
|
|
index 8f9c60ed6f8b8..674ab6d9b31e4 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
|
|
@@ -1079,6 +1079,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
|
struct dc_state *dangling_context = dc_create_state(dc);
|
|
struct dc_state *current_ctx;
|
|
struct pipe_ctx *pipe;
|
|
+ struct timing_generator *tg;
|
|
|
|
if (dangling_context == NULL)
|
|
return;
|
|
@@ -1122,6 +1123,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
|
|
|
if (should_disable && old_stream) {
|
|
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
|
+ tg = pipe->stream_res.tg;
|
|
/* When disabling plane for a phantom pipe, we must turn on the
|
|
* phantom OTG so the disable programming gets the double buffer
|
|
* update. Otherwise the pipe will be left in a partially disabled
|
|
@@ -1129,7 +1131,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
|
* again for different use.
|
|
*/
|
|
if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
|
- pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
|
+ if (tg->funcs->enable_crtc)
|
|
+ tg->funcs->enable_crtc(tg);
|
|
}
|
|
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
|
|
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
|
|
@@ -1146,6 +1149,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
|
dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
|
|
dc->hwss.post_unlock_program_front_end(dc, dangling_context);
|
|
}
|
|
+ /* We need to put the phantom OTG back into it's default (disabled) state or we
|
|
+ * can get corruption when transition from one SubVP config to a different one.
|
|
+ * The OTG is set to disable on falling edge of VUPDATE so the plane disable
|
|
+ * will still get it's double buffer update.
|
|
+ */
|
|
+ if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
|
+ if (tg->funcs->disable_phantom_crtc)
|
|
+ tg->funcs->disable_phantom_crtc(tg);
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -1942,6 +1954,9 @@ enum dc_status dc_commit_streams(struct dc *dc,
|
|
struct pipe_ctx *pipe;
|
|
bool handle_exit_odm2to1 = false;
|
|
|
|
+ if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
|
|
+ return res;
|
|
+
|
|
if (!streams_changed(dc, streams, stream_count))
|
|
return res;
|
|
|
|
@@ -1984,21 +1999,33 @@ enum dc_status dc_commit_streams(struct dc *dc,
|
|
|
|
dc_resource_state_copy_construct_current(dc, context);
|
|
|
|
- /*
|
|
- * Previous validation was perfomred with fast_validation = true and
|
|
- * the full DML state required for hardware programming was skipped.
|
|
- *
|
|
- * Re-validate here to calculate these parameters / watermarks.
|
|
- */
|
|
- res = dc_validate_global_state(dc, context, false);
|
|
+ res = dc_validate_with_context(dc, set, stream_count, context, false);
|
|
if (res != DC_OK) {
|
|
- DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
|
|
- dc_status_to_str(res), res);
|
|
- return res;
|
|
+ BREAK_TO_DEBUGGER();
|
|
+ goto fail;
|
|
}
|
|
|
|
res = dc_commit_state_no_check(dc, context);
|
|
|
|
+ for (i = 0; i < stream_count; i++) {
|
|
+ for (j = 0; j < context->stream_count; j++) {
|
|
+ if (streams[i]->stream_id == context->streams[j]->stream_id)
|
|
+ streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
|
|
+
|
|
+ if (dc_is_embedded_signal(streams[i]->signal)) {
|
|
+ struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
|
|
+
|
|
+ if (dc->hwss.is_abm_supported)
|
|
+ status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
|
|
+ else
|
|
+ status->is_abm_supported = true;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+fail:
|
|
+ dc_release_state(context);
|
|
+
|
|
context_alloc_fail:
|
|
|
|
DC_LOG_DC("%s Finished.\n", __func__);
|
|
@@ -3122,6 +3149,19 @@ static bool update_planes_and_stream_state(struct dc *dc,
|
|
|
|
if (update_type == UPDATE_TYPE_FULL) {
|
|
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
|
|
+ /* For phantom pipes we remove and create a new set of phantom pipes
|
|
+ * for each full update (because we don't know if we'll need phantom
|
|
+ * pipes until after the first round of validation). However, if validation
|
|
+ * fails we need to keep the existing phantom pipes (because we don't update
|
|
+ * the dc->current_state).
|
|
+ *
|
|
+ * The phantom stream/plane refcount is decremented for validation because
|
|
+ * we assume it'll be removed (the free comes when the dc_state is freed),
|
|
+ * but if validation fails we have to increment back the refcount so it's
|
|
+ * consistent.
|
|
+ */
|
|
+ if (dc->res_pool->funcs->retain_phantom_pipes)
|
|
+ dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
|
|
BREAK_TO_DEBUGGER();
|
|
goto fail;
|
|
}
|
|
@@ -3987,6 +4027,18 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|
struct dc_context *dc_ctx = dc->ctx;
|
|
int i, j;
|
|
|
|
+ /* TODO: Since change commit sequence can have a huge impact,
|
|
+ * we decided to only enable it for DCN3x. However, as soon as
|
|
+ * we get more confident about this change we'll need to enable
|
|
+ * the new sequence for all ASICs.
|
|
+ */
|
|
+ if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
|
|
+ dc_update_planes_and_stream(dc, srf_updates,
|
|
+ surface_count, stream,
|
|
+ stream_update);
|
|
+ return;
|
|
+ }
|
|
+
|
|
stream_status = dc_stream_get_status(stream);
|
|
context = dc->current_state;
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
|
|
index c2c6c4587a5ce..bbaeb6c567d0d 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
|
|
@@ -1141,6 +1141,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
|
(link->dpcd_caps.dongle_type !=
|
|
DISPLAY_DONGLE_DP_HDMI_CONVERTER))
|
|
converter_disable_audio = true;
|
|
+
|
|
+ /* limited link rate to HBR3 for DPIA until we implement USB4 V2 */
|
|
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
|
|
+ link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
|
|
+ link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
|
|
break;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
|
|
index a26e52abc9898..66923f51037a3 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
|
|
@@ -2616,15 +2616,241 @@ bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
|
|
return dc->res_pool->res_cap->num_dsc > 0;
|
|
}
|
|
|
|
+static bool planes_changed_for_existing_stream(struct dc_state *context,
|
|
+ struct dc_stream_state *stream,
|
|
+ const struct dc_validation_set set[],
|
|
+ int set_count)
|
|
+{
|
|
+ int i, j;
|
|
+ struct dc_stream_status *stream_status = NULL;
|
|
+
|
|
+ for (i = 0; i < context->stream_count; i++) {
|
|
+ if (context->streams[i] == stream) {
|
|
+ stream_status = &context->stream_status[i];
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!stream_status)
|
|
+ ASSERT(0);
|
|
+
|
|
+ for (i = 0; i < set_count; i++)
|
|
+ if (set[i].stream == stream)
|
|
+ break;
|
|
+
|
|
+ if (i == set_count)
|
|
+ ASSERT(0);
|
|
+
|
|
+ if (set[i].plane_count != stream_status->plane_count)
|
|
+ return true;
|
|
+
|
|
+ for (j = 0; j < set[i].plane_count; j++)
|
|
+ if (set[i].plane_states[j] != stream_status->plane_states[j])
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * dc_validate_with_context - Validate and update the potential new stream in the context object
|
|
+ *
|
|
+ * @dc: Used to get the current state status
|
|
+ * @set: An array of dc_validation_set with all the current streams reference
|
|
+ * @set_count: Total of streams
|
|
+ * @context: New context
|
|
+ * @fast_validate: Enable or disable fast validation
|
|
+ *
|
|
+ * This function updates the potential new stream in the context object. It
|
|
+ * creates multiple lists for the add, remove, and unchanged streams. In
|
|
+ * particular, if the unchanged streams have a plane that changed, it is
|
|
+ * necessary to remove all planes from the unchanged streams. In summary, this
|
|
+ * function is responsible for validating the new context.
|
|
+ *
|
|
+ * Return:
|
|
+ * In case of success, return DC_OK (1), otherwise, return a DC error.
|
|
+ */
|
|
+enum dc_status dc_validate_with_context(struct dc *dc,
|
|
+ const struct dc_validation_set set[],
|
|
+ int set_count,
|
|
+ struct dc_state *context,
|
|
+ bool fast_validate)
|
|
+{
|
|
+ struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 };
|
|
+ struct dc_stream_state *del_streams[MAX_PIPES] = { 0 };
|
|
+ struct dc_stream_state *add_streams[MAX_PIPES] = { 0 };
|
|
+ int old_stream_count = context->stream_count;
|
|
+ enum dc_status res = DC_ERROR_UNEXPECTED;
|
|
+ int unchanged_streams_count = 0;
|
|
+ int del_streams_count = 0;
|
|
+ int add_streams_count = 0;
|
|
+ bool found = false;
|
|
+ int i, j, k;
|
|
+
|
|
+ DC_LOGGER_INIT(dc->ctx->logger);
|
|
+
|
|
+ /* First build a list of streams to be remove from current context */
|
|
+ for (i = 0; i < old_stream_count; i++) {
|
|
+ struct dc_stream_state *stream = context->streams[i];
|
|
+
|
|
+ for (j = 0; j < set_count; j++) {
|
|
+ if (stream == set[j].stream) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!found)
|
|
+ del_streams[del_streams_count++] = stream;
|
|
+
|
|
+ found = false;
|
|
+ }
|
|
+
|
|
+ /* Second, build a list of new streams */
|
|
+ for (i = 0; i < set_count; i++) {
|
|
+ struct dc_stream_state *stream = set[i].stream;
|
|
+
|
|
+ for (j = 0; j < old_stream_count; j++) {
|
|
+ if (stream == context->streams[j]) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!found)
|
|
+ add_streams[add_streams_count++] = stream;
|
|
+
|
|
+ found = false;
|
|
+ }
|
|
+
|
|
+ /* Build a list of unchanged streams which is necessary for handling
|
|
+ * planes change such as added, removed, and updated.
|
|
+ */
|
|
+ for (i = 0; i < set_count; i++) {
|
|
+ /* Check if stream is part of the delete list */
|
|
+ for (j = 0; j < del_streams_count; j++) {
|
|
+ if (set[i].stream == del_streams[j]) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!found) {
|
|
+ /* Check if stream is part of the add list */
|
|
+ for (j = 0; j < add_streams_count; j++) {
|
|
+ if (set[i].stream == add_streams[j]) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!found)
|
|
+ unchanged_streams[unchanged_streams_count++] = set[i].stream;
|
|
+
|
|
+ found = false;
|
|
+ }
|
|
+
|
|
+ /* Remove all planes for unchanged streams if planes changed */
|
|
+ for (i = 0; i < unchanged_streams_count; i++) {
|
|
+ if (planes_changed_for_existing_stream(context,
|
|
+ unchanged_streams[i],
|
|
+ set,
|
|
+ set_count)) {
|
|
+ if (!dc_rem_all_planes_for_stream(dc,
|
|
+ unchanged_streams[i],
|
|
+ context)) {
|
|
+ res = DC_FAIL_DETACH_SURFACES;
|
|
+ goto fail;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Remove all planes for removed streams and then remove the streams */
|
|
+ for (i = 0; i < del_streams_count; i++) {
|
|
+ /* Need to cpy the dwb data from the old stream in order to efc to work */
|
|
+ if (del_streams[i]->num_wb_info > 0) {
|
|
+ for (j = 0; j < add_streams_count; j++) {
|
|
+ if (del_streams[i]->sink == add_streams[j]->sink) {
|
|
+ add_streams[j]->num_wb_info = del_streams[i]->num_wb_info;
|
|
+ for (k = 0; k < del_streams[i]->num_wb_info; k++)
|
|
+ add_streams[j]->writeback_info[k] = del_streams[i]->writeback_info[k];
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
|
|
+ res = DC_FAIL_DETACH_SURFACES;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
|
|
+ if (res != DC_OK)
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ /* Swap seamless boot stream to pipe 0 (if needed) to ensure pipe_ctx
|
|
+ * matches. This may change in the future if seamless_boot_stream can be
|
|
+ * multiple.
|
|
+ */
|
|
+ for (i = 0; i < add_streams_count; i++) {
|
|
+ mark_seamless_boot_stream(dc, add_streams[i]);
|
|
+ if (add_streams[i]->apply_seamless_boot_optimization && i != 0) {
|
|
+ struct dc_stream_state *temp = add_streams[0];
|
|
+
|
|
+ add_streams[0] = add_streams[i];
|
|
+ add_streams[i] = temp;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Add new streams and then add all planes for the new stream */
|
|
+ for (i = 0; i < add_streams_count; i++) {
|
|
+ calculate_phy_pix_clks(add_streams[i]);
|
|
+ res = dc_add_stream_to_ctx(dc, context, add_streams[i]);
|
|
+ if (res != DC_OK)
|
|
+ goto fail;
|
|
+
|
|
+ if (!add_all_planes_for_stream(dc, add_streams[i], set, set_count, context)) {
|
|
+ res = DC_FAIL_ATTACH_SURFACES;
|
|
+ goto fail;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Add all planes for unchanged streams if planes changed */
|
|
+ for (i = 0; i < unchanged_streams_count; i++) {
|
|
+ if (planes_changed_for_existing_stream(context,
|
|
+ unchanged_streams[i],
|
|
+ set,
|
|
+ set_count)) {
|
|
+ if (!add_all_planes_for_stream(dc, unchanged_streams[i], set, set_count, context)) {
|
|
+ res = DC_FAIL_ATTACH_SURFACES;
|
|
+ goto fail;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ res = dc_validate_global_state(dc, context, fast_validate);
|
|
+
|
|
+fail:
|
|
+ if (res != DC_OK)
|
|
+ DC_LOG_WARNING("%s:resource validation failed, dc_status:%d\n",
|
|
+ __func__,
|
|
+ res);
|
|
+
|
|
+ return res;
|
|
+}
|
|
|
|
/**
|
|
- * dc_validate_global_state() - Determine if HW can support a given state
|
|
- * Checks HW resource availability and bandwidth requirement.
|
|
+ * dc_validate_global_state() - Determine if hardware can support a given state
|
|
+ *
|
|
* @dc: dc struct for this driver
|
|
* @new_ctx: state to be validated
|
|
* @fast_validate: set to true if only yes/no to support matters
|
|
*
|
|
- * Return: DC_OK if the result can be programmed. Otherwise, an error code.
|
|
+ * Checks hardware resource availability and bandwidth requirement.
|
|
+ *
|
|
+ * Return:
|
|
+ * DC_OK if the result can be programmed. Otherwise, an error code.
|
|
*/
|
|
enum dc_status dc_validate_global_state(
|
|
struct dc *dc,
|
|
@@ -3757,4 +3983,4 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
|
|
}
|
|
|
|
return true;
|
|
-}
|
|
\ No newline at end of file
|
|
+}
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
|
|
index 6409b8d8ff71e..a4540f83aae59 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dc.h
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
|
|
@@ -1298,6 +1298,12 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
|
|
|
|
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
|
|
|
|
+enum dc_status dc_validate_with_context(struct dc *dc,
|
|
+ const struct dc_validation_set set[],
|
|
+ int set_count,
|
|
+ struct dc_state *context,
|
|
+ bool fast_validate);
|
|
+
|
|
bool dc_set_generic_gpio_for_stereo(bool enable,
|
|
struct gpio_service *gpio_service);
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
index a6fde27d13479..3940271189632 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
@@ -2284,6 +2284,12 @@ void dcn10_enable_timing_synchronization(
|
|
opp = grouped_pipes[i]->stream_res.opp;
|
|
tg = grouped_pipes[i]->stream_res.tg;
|
|
tg->funcs->get_otg_active_size(tg, &width, &height);
|
|
+
|
|
+ if (!tg->funcs->is_tg_enabled(tg)) {
|
|
+ DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
if (opp->funcs->opp_program_dpg_dimensions)
|
|
opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
|
|
index e5b7ef7422b83..50dc834046446 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
|
|
@@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes(
|
|
int cur_rom_en = 0;
|
|
|
|
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
|
|
- color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
|
|
- cur_rom_en = 1;
|
|
+ color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
|
|
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
|
|
+ cur_rom_en = 1;
|
|
+ }
|
|
+ }
|
|
|
|
REG_UPDATE_3(CURSOR0_CONTROL,
|
|
CUR0_MODE, color_format,
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
|
|
index fe941b103de81..a974f86e718a8 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
|
|
@@ -167,6 +167,13 @@ static void optc32_phantom_crtc_post_enable(struct timing_generator *optc)
|
|
REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000);
|
|
}
|
|
|
|
+static void optc32_disable_phantom_otg(struct timing_generator *optc)
|
|
+{
|
|
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
|
+
|
|
+ REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
|
|
+}
|
|
+
|
|
static void optc32_set_odm_bypass(struct timing_generator *optc,
|
|
const struct dc_crtc_timing *dc_crtc_timing)
|
|
{
|
|
@@ -260,6 +267,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
|
|
.enable_crtc = optc32_enable_crtc,
|
|
.disable_crtc = optc32_disable_crtc,
|
|
.phantom_crtc_post_enable = optc32_phantom_crtc_post_enable,
|
|
+ .disable_phantom_crtc = optc32_disable_phantom_otg,
|
|
/* used by enable_timing_synchronization. Not need for FPGA */
|
|
.is_counter_moving = optc1_is_counter_moving,
|
|
.get_position = optc1_get_position,
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
|
|
index 814620e6638fd..2b8700b291a45 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
|
|
@@ -1719,6 +1719,27 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
|
|
return phantom_stream;
|
|
}
|
|
|
|
+void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context)
|
|
+{
|
|
+ int i;
|
|
+ struct dc_plane_state *phantom_plane = NULL;
|
|
+ struct dc_stream_state *phantom_stream = NULL;
|
|
+
|
|
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
|
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
|
+
|
|
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe &&
|
|
+ pipe->plane_state && pipe->stream &&
|
|
+ pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
|
+ phantom_plane = pipe->plane_state;
|
|
+ phantom_stream = pipe->stream;
|
|
+
|
|
+ dc_plane_state_retain(phantom_plane);
|
|
+ dc_stream_retain(phantom_stream);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
// return true if removed piped from ctx, false otherwise
|
|
bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
|
|
{
|
|
@@ -2035,6 +2056,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
|
|
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
|
|
.add_phantom_pipes = dcn32_add_phantom_pipes,
|
|
.remove_phantom_pipes = dcn32_remove_phantom_pipes,
|
|
+ .retain_phantom_pipes = dcn32_retain_phantom_pipes,
|
|
};
|
|
|
|
static uint32_t read_pipe_fuses(struct dc_context *ctx)
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
|
|
index 615244a1f95d5..026cf13d203fc 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
|
|
@@ -83,6 +83,9 @@ bool dcn32_release_post_bldn_3dlut(
|
|
bool dcn32_remove_phantom_pipes(struct dc *dc,
|
|
struct dc_state *context);
|
|
|
|
+void dcn32_retain_phantom_pipes(struct dc *dc,
|
|
+ struct dc_state *context);
|
|
+
|
|
void dcn32_add_phantom_pipes(struct dc *dc,
|
|
struct dc_state *context,
|
|
display_e2e_pipe_params_st *pipes,
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
|
|
index 213ff3672bd54..aed92ced7b762 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
|
|
@@ -1619,6 +1619,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
|
|
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
|
|
.add_phantom_pipes = dcn32_add_phantom_pipes,
|
|
.remove_phantom_pipes = dcn32_remove_phantom_pipes,
|
|
+ .retain_phantom_pipes = dcn32_retain_phantom_pipes,
|
|
};
|
|
|
|
static uint32_t read_pipe_fuses(struct dc_context *ctx)
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
|
|
index 9498105c98ab3..5fa7c4772af4f 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
|
|
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
|
|
@@ -234,6 +234,7 @@ struct resource_funcs {
|
|
unsigned int index);
|
|
|
|
bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context);
|
|
+ void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context);
|
|
void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
|
|
};
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
|
|
index f96fb425345e4..789cf9406ca5b 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
|
|
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
|
|
@@ -185,6 +185,7 @@ struct timing_generator_funcs {
|
|
#ifdef CONFIG_DRM_AMD_DC_DCN
|
|
void (*phantom_crtc_post_enable)(struct timing_generator *tg);
|
|
#endif
|
|
+ void (*disable_phantom_crtc)(struct timing_generator *tg);
|
|
bool (*immediate_disable_crtc)(struct timing_generator *tg);
|
|
bool (*is_counter_moving)(struct timing_generator *tg);
|
|
void (*get_position)(struct timing_generator *tg,
|
|
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
|
|
index d18162e9ed1da..f3d64c78feaa8 100644
|
|
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
|
|
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
|
|
@@ -139,6 +139,8 @@ enum amd_pp_sensors {
|
|
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
|
|
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
|
|
AMDGPU_PP_SENSOR_VCN_POWER_STATE,
|
|
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
|
|
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
|
|
};
|
|
|
|
enum amd_pp_task {
|
|
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
|
|
index cb5b9df78b4db..338fce249f5ab 100644
|
|
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
|
|
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
|
|
@@ -89,6 +89,8 @@ struct amdgpu_dpm_thermal {
|
|
int max_mem_crit_temp;
|
|
/* memory max emergency(shutdown) temp */
|
|
int max_mem_emergency_temp;
|
|
+ /* SWCTF threshold */
|
|
+ int sw_ctf_threshold;
|
|
/* was last interrupt low to high or high to low */
|
|
bool high_to_low;
|
|
/* interrupt source */
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
|
|
index 1159ae114dd02..179e1c593a53f 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/gfp.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/firmware.h>
|
|
+#include <linux/reboot.h>
|
|
#include "amd_shared.h"
|
|
#include "amd_powerplay.h"
|
|
#include "power_state.h"
|
|
@@ -91,6 +92,45 @@ static int pp_early_init(void *handle)
|
|
return 0;
|
|
}
|
|
|
|
+static void pp_swctf_delayed_work_handler(struct work_struct *work)
|
|
+{
|
|
+ struct pp_hwmgr *hwmgr =
|
|
+ container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
|
|
+ struct amdgpu_device *adev = hwmgr->adev;
|
|
+ struct amdgpu_dpm_thermal *range =
|
|
+ &adev->pm.dpm.thermal;
|
|
+ uint32_t gpu_temperature, size;
|
|
+ int ret;
|
|
+
|
|
+ /*
|
|
+ * If the hotspot/edge temperature is confirmed as below SW CTF setting point
|
|
+ * after the delay enforced, nothing will be done.
|
|
+ * Otherwise, a graceful shutdown will be performed to prevent further damage.
|
|
+ */
|
|
+ if (range->sw_ctf_threshold &&
|
|
+ hwmgr->hwmgr_func->read_sensor) {
|
|
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
|
|
+ AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
|
|
+ &gpu_temperature,
|
|
+ &size);
|
|
+ /*
|
|
+ * For some legacy ASICs, hotspot temperature retrieving might be not
|
|
+ * supported. Check the edge temperature instead then.
|
|
+ */
|
|
+ if (ret == -EOPNOTSUPP)
|
|
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
|
|
+ AMDGPU_PP_SENSOR_EDGE_TEMP,
|
|
+ &gpu_temperature,
|
|
+ &size);
|
|
+ if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
|
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
|
+ orderly_poweroff(true);
|
|
+}
|
|
+
|
|
static int pp_sw_init(void *handle)
|
|
{
|
|
struct amdgpu_device *adev = handle;
|
|
@@ -101,6 +141,10 @@ static int pp_sw_init(void *handle)
|
|
|
|
pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
|
|
|
|
+ if (!ret)
|
|
+ INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
|
|
+ pp_swctf_delayed_work_handler);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -136,6 +180,8 @@ static int pp_hw_fini(void *handle)
|
|
struct amdgpu_device *adev = handle;
|
|
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
|
|
|
|
+ cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
|
|
+
|
|
hwmgr_hw_fini(hwmgr);
|
|
|
|
return 0;
|
|
@@ -222,6 +268,8 @@ static int pp_suspend(void *handle)
|
|
struct amdgpu_device *adev = handle;
|
|
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
|
|
|
|
+ cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
|
|
+
|
|
return hwmgr_suspend(hwmgr);
|
|
}
|
|
|
|
@@ -769,10 +817,16 @@ static int pp_dpm_read_sensor(void *handle, int idx,
|
|
|
|
switch (idx) {
|
|
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
|
|
- *((uint32_t *)value) = hwmgr->pstate_sclk;
|
|
+ *((uint32_t *)value) = hwmgr->pstate_sclk * 100;
|
|
return 0;
|
|
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
|
|
- *((uint32_t *)value) = hwmgr->pstate_mclk;
|
|
+ *((uint32_t *)value) = hwmgr->pstate_mclk * 100;
|
|
+ return 0;
|
|
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
|
|
+ *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
|
|
+ return 0;
|
|
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
|
|
+ *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
|
|
return 0;
|
|
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
|
|
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
|
|
index 981dc8c7112d6..90452b66e1071 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
|
|
@@ -241,7 +241,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
|
|
TEMP_RANGE_MAX,
|
|
TEMP_RANGE_MIN,
|
|
TEMP_RANGE_MAX,
|
|
- TEMP_RANGE_MAX};
|
|
+ TEMP_RANGE_MAX,
|
|
+ 0};
|
|
struct amdgpu_device *adev = hwmgr->adev;
|
|
|
|
if (!hwmgr->not_vf)
|
|
@@ -265,6 +266,7 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
|
|
adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
|
|
adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
|
|
adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
|
|
+ adev->pm.dpm.thermal.sw_ctf_threshold = range.sw_ctf_threshold;
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
|
|
index ede71de2343dc..86d6e88c73862 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
|
|
@@ -375,6 +375,17 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
|
|
return 0;
|
|
}
|
|
|
|
+static void smu10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
|
+{
|
|
+ hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
|
|
+ hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
|
|
+
|
|
+ smum_send_msg_to_smc(hwmgr,
|
|
+ PPSMC_MSG_GetMaxGfxclkFrequency,
|
|
+ &hwmgr->pstate_sclk_peak);
|
|
+ hwmgr->pstate_mclk_peak = SMU10_UMD_PSTATE_PEAK_FCLK;
|
|
+}
|
|
+
|
|
static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
{
|
|
struct amdgpu_device *adev = hwmgr->adev;
|
|
@@ -398,6 +409,8 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
return ret;
|
|
}
|
|
|
|
+ smu10_populate_umdpstate_clocks(hwmgr);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -574,9 +587,6 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|
|
|
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
|
|
|
|
- hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
|
|
- hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
|
|
-
|
|
/* enable the pp_od_clk_voltage sysfs file */
|
|
hwmgr->od_enabled = 1;
|
|
/* disabled fine grain tuning function by default */
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
|
|
index 7ef7e81525a30..a31a62a1ce0b2 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
|
|
@@ -1501,6 +1501,67 @@ static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
|
|
return ret;
|
|
}
|
|
|
|
+static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
|
+{
|
|
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
|
+ struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
|
|
+ int32_t tmp_sclk, count, percentage;
|
|
+
|
|
+ if (golden_dpm_table->mclk_table.count == 1) {
|
|
+ percentage = 70;
|
|
+ hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value;
|
|
+ } else {
|
|
+ percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
|
|
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
|
|
+ hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
|
|
+ }
|
|
+
|
|
+ tmp_sclk = hwmgr->pstate_mclk * percentage / 100;
|
|
+
|
|
+ if (hwmgr->pp_table_version == PP_TABLE_V0) {
|
|
+ struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk =
|
|
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
|
|
+
|
|
+ for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) {
|
|
+ if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) {
|
|
+ hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (count < 0)
|
|
+ hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk;
|
|
+
|
|
+ hwmgr->pstate_sclk_peak =
|
|
+ vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk;
|
|
+ } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
|
|
+ struct phm_ppt_v1_information *table_info =
|
|
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
|
|
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk =
|
|
+ table_info->vdd_dep_on_sclk;
|
|
+
|
|
+ for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) {
|
|
+ if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) {
|
|
+ hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (count < 0)
|
|
+ hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk;
|
|
+
|
|
+ hwmgr->pstate_sclk_peak =
|
|
+ vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk;
|
|
+ }
|
|
+
|
|
+ hwmgr->pstate_mclk_peak =
|
|
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
|
|
+
|
|
+ /* make sure the output is in Mhz */
|
|
+ hwmgr->pstate_sclk /= 100;
|
|
+ hwmgr->pstate_mclk /= 100;
|
|
+ hwmgr->pstate_sclk_peak /= 100;
|
|
+ hwmgr->pstate_mclk_peak /= 100;
|
|
+}
|
|
+
|
|
static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
{
|
|
int tmp_result = 0;
|
|
@@ -1625,6 +1686,8 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
PP_ASSERT_WITH_CODE((0 == tmp_result),
|
|
"pcie performance request failed!", result = tmp_result);
|
|
|
|
+ smu7_populate_umdpstate_clocks(hwmgr);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -3143,15 +3206,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
|
|
for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
|
|
count >= 0; count--) {
|
|
if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
|
|
- tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
|
|
*sclk_mask = count;
|
|
break;
|
|
}
|
|
}
|
|
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
|
|
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
|
|
*sclk_mask = 0;
|
|
- tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
|
|
- }
|
|
|
|
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
|
*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
|
|
@@ -3161,15 +3221,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
|
|
|
|
for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
|
|
if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
|
|
- tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
|
|
*sclk_mask = count;
|
|
break;
|
|
}
|
|
}
|
|
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
|
|
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
|
|
*sclk_mask = 0;
|
|
- tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
|
|
- }
|
|
|
|
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
|
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
|
|
@@ -3181,8 +3238,6 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
|
|
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
|
|
|
|
*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
|
|
- hwmgr->pstate_sclk = tmp_sclk;
|
|
- hwmgr->pstate_mclk = tmp_mclk;
|
|
|
|
return 0;
|
|
}
|
|
@@ -3195,9 +3250,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
|
|
uint32_t mclk_mask = 0;
|
|
uint32_t pcie_mask = 0;
|
|
|
|
- if (hwmgr->pstate_sclk == 0)
|
|
- smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
|
|
-
|
|
switch (level) {
|
|
case AMD_DPM_FORCED_LEVEL_HIGH:
|
|
ret = smu7_force_dpm_highest(hwmgr);
|
|
@@ -5381,6 +5433,8 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|
thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
|
|
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
|
|
+ thermal_data->sw_ctf_threshold = thermal_data->max;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
|
|
index b50fd4a4a3d1a..b015a601b385a 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
|
|
@@ -1016,6 +1016,18 @@ static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
|
|
data->acp_boot_level = 0xff;
|
|
}
|
|
|
|
+static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
|
+{
|
|
+ struct phm_clock_voltage_dependency_table *table =
|
|
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
|
|
+
|
|
+ hwmgr->pstate_sclk = table->entries[0].clk / 100;
|
|
+ hwmgr->pstate_mclk = 0;
|
|
+
|
|
+ hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100;
|
|
+ hwmgr->pstate_mclk_peak = 0;
|
|
+}
|
|
+
|
|
static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
{
|
|
smu8_program_voting_clients(hwmgr);
|
|
@@ -1024,6 +1036,8 @@ static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
smu8_program_bootup_state(hwmgr);
|
|
smu8_reset_acp_boot_level(hwmgr);
|
|
|
|
+ smu8_populate_umdpstate_clocks(hwmgr);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -1167,8 +1181,6 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
|
|
|
|
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
|
|
data->sclk_dpm.hard_min_clk = table->entries[0].clk;
|
|
- hwmgr->pstate_sclk = table->entries[0].clk;
|
|
- hwmgr->pstate_mclk = 0;
|
|
|
|
level = smu8_get_max_sclk_level(hwmgr) - 1;
|
|
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
|
|
index bfe80ac0ad8c8..d0b1ab6c45231 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
|
|
@@ -603,21 +603,17 @@ int phm_irq_process(struct amdgpu_device *adev,
|
|
struct amdgpu_irq_src *source,
|
|
struct amdgpu_iv_entry *entry)
|
|
{
|
|
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
|
|
uint32_t client_id = entry->client_id;
|
|
uint32_t src_id = entry->src_id;
|
|
|
|
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
|
|
if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
|
|
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
|
- /*
|
|
- * SW CTF just occurred.
|
|
- * Try to do a graceful shutdown to prevent further damage.
|
|
- */
|
|
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
|
- orderly_poweroff(true);
|
|
- } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
|
|
+ schedule_delayed_work(&hwmgr->swctf_delayed_work,
|
|
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
|
|
+ } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) {
|
|
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
|
|
- else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
|
|
+ } else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
|
|
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
|
|
/*
|
|
* HW CTF just occurred. Shutdown to prevent further damage.
|
|
@@ -626,15 +622,10 @@ int phm_irq_process(struct amdgpu_device *adev,
|
|
orderly_poweroff(true);
|
|
}
|
|
} else if (client_id == SOC15_IH_CLIENTID_THM) {
|
|
- if (src_id == 0) {
|
|
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
|
- /*
|
|
- * SW CTF just occurred.
|
|
- * Try to do a graceful shutdown to prevent further damage.
|
|
- */
|
|
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
|
- orderly_poweroff(true);
|
|
- } else
|
|
+ if (src_id == 0)
|
|
+ schedule_delayed_work(&hwmgr->swctf_delayed_work,
|
|
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
|
|
+ else
|
|
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
|
|
} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
|
|
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
|
|
index c8c9fb827bda1..d8cd23438b762 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
|
|
@@ -3008,6 +3008,30 @@ static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool
|
|
return 0;
|
|
}
|
|
|
|
+static void vega10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
|
+{
|
|
+ struct phm_ppt_v2_information *table_info =
|
|
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
|
|
+
|
|
+ if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
|
|
+ table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
|
|
+ hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
|
|
+ hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
|
|
+ } else {
|
|
+ hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
|
|
+ hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[0].clk;
|
|
+ }
|
|
+
|
|
+ hwmgr->pstate_sclk_peak = table_info->vdd_dep_on_sclk->entries[table_info->vdd_dep_on_sclk->count - 1].clk;
|
|
+ hwmgr->pstate_mclk_peak = table_info->vdd_dep_on_mclk->entries[table_info->vdd_dep_on_mclk->count - 1].clk;
|
|
+
|
|
+ /* make sure the output is in Mhz */
|
|
+ hwmgr->pstate_sclk /= 100;
|
|
+ hwmgr->pstate_mclk /= 100;
|
|
+ hwmgr->pstate_sclk_peak /= 100;
|
|
+ hwmgr->pstate_mclk_peak /= 100;
|
|
+}
|
|
+
|
|
static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
{
|
|
struct vega10_hwmgr *data = hwmgr->backend;
|
|
@@ -3082,6 +3106,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
result = tmp_result);
|
|
}
|
|
|
|
+ vega10_populate_umdpstate_clocks(hwmgr);
|
|
+
|
|
return result;
|
|
}
|
|
|
|
@@ -4169,8 +4195,6 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
|
|
*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
|
|
*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
|
|
*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
|
|
- hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
|
|
- hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
|
|
}
|
|
|
|
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
|
|
@@ -4281,9 +4305,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
|
|
uint32_t mclk_mask = 0;
|
|
uint32_t soc_mask = 0;
|
|
|
|
- if (hwmgr->pstate_sclk == 0)
|
|
- vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
|
|
-
|
|
switch (level) {
|
|
case AMD_DPM_FORCED_LEVEL_HIGH:
|
|
ret = vega10_force_dpm_highest(hwmgr);
|
|
@@ -5221,6 +5242,9 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|
{
|
|
struct vega10_hwmgr *data = hwmgr->backend;
|
|
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
|
|
+ struct phm_ppt_v2_information *pp_table_info =
|
|
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
|
|
+ struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
|
|
|
|
memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
|
|
|
|
@@ -5237,6 +5261,13 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
|
|
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
|
|
+ if (tdp_table->usSoftwareShutdownTemp > pp_table->ThotspotLimit &&
|
|
+ tdp_table->usSoftwareShutdownTemp < VEGA10_THERMAL_MAXIMUM_ALERT_TEMP)
|
|
+ thermal_data->sw_ctf_threshold = tdp_table->usSoftwareShutdownTemp;
|
|
+ else
|
|
+ thermal_data->sw_ctf_threshold = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
|
|
+ thermal_data->sw_ctf_threshold *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
|
|
index a2f4d6773d458..1069eaaae2f82 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
|
|
@@ -1026,6 +1026,25 @@ static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
|
|
return 0;
|
|
}
|
|
|
|
+static void vega12_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
|
+{
|
|
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
|
|
+ struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
|
|
+ struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
|
|
+
|
|
+ if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
|
|
+ mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
|
|
+ hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
|
|
+ hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
|
|
+ } else {
|
|
+ hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[0].value;
|
|
+ hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[0].value;
|
|
+ }
|
|
+
|
|
+ hwmgr->pstate_sclk_peak = gfx_dpm_table->dpm_levels[gfx_dpm_table->count].value;
|
|
+ hwmgr->pstate_mclk_peak = mem_dpm_table->dpm_levels[mem_dpm_table->count].value;
|
|
+}
|
|
+
|
|
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
{
|
|
int tmp_result, result = 0;
|
|
@@ -1077,6 +1096,9 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
PP_ASSERT_WITH_CODE(!result,
|
|
"Failed to setup default DPM tables!",
|
|
return result);
|
|
+
|
|
+ vega12_populate_umdpstate_clocks(hwmgr);
|
|
+
|
|
return result;
|
|
}
|
|
|
|
@@ -2742,6 +2764,8 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
|
|
static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|
struct PP_TemperatureRange *thermal_data)
|
|
{
|
|
+ struct phm_ppt_v3_information *pptable_information =
|
|
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
|
|
struct vega12_hwmgr *data =
|
|
(struct vega12_hwmgr *)(hwmgr->backend);
|
|
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
|
|
@@ -2760,6 +2784,8 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
|
|
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
+ thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
|
|
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
|
|
index b30684c84e20e..ff77a3683efd5 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
|
|
@@ -1555,26 +1555,23 @@ static int vega20_set_mclk_od(
|
|
return 0;
|
|
}
|
|
|
|
-static int vega20_populate_umdpstate_clocks(
|
|
- struct pp_hwmgr *hwmgr)
|
|
+static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
|
|
{
|
|
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
|
|
struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
|
|
struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
|
|
|
|
- hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
|
|
- hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
|
|
-
|
|
if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
|
|
mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
|
|
hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
|
|
hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
|
|
+ } else {
|
|
+ hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
|
|
+ hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
|
|
}
|
|
|
|
- hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
|
|
- hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
|
|
-
|
|
- return 0;
|
|
+ hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value;
|
|
+ hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value;
|
|
}
|
|
|
|
static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
|
|
@@ -1753,10 +1750,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|
"[EnableDPMTasks] Failed to initialize odn settings!",
|
|
return result);
|
|
|
|
- result = vega20_populate_umdpstate_clocks(hwmgr);
|
|
- PP_ASSERT_WITH_CODE(!result,
|
|
- "[EnableDPMTasks] Failed to populate umdpstate clocks!",
|
|
- return result);
|
|
+ vega20_populate_umdpstate_clocks(hwmgr);
|
|
|
|
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
|
|
POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
|
|
@@ -4213,6 +4207,8 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
|
|
static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|
struct PP_TemperatureRange *thermal_data)
|
|
{
|
|
+ struct phm_ppt_v3_information *pptable_information =
|
|
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
|
|
struct vega20_hwmgr *data =
|
|
(struct vega20_hwmgr *)(hwmgr->backend);
|
|
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
|
|
@@ -4231,6 +4227,8 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
|
|
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
|
|
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
+ thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
|
|
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
|
|
index 27f8d0e0e6a8c..ec10643edea3e 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
|
|
@@ -809,6 +809,10 @@ struct pp_hwmgr {
|
|
uint32_t workload_prority[Workload_Policy_Max];
|
|
uint32_t workload_setting[Workload_Policy_Max];
|
|
bool gfxoff_state_changed_by_workload;
|
|
+ uint32_t pstate_sclk_peak;
|
|
+ uint32_t pstate_mclk_peak;
|
|
+
|
|
+ struct delayed_work swctf_delayed_work;
|
|
};
|
|
|
|
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
|
|
index a5f2227a3971c..0ffc2347829d0 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
|
|
@@ -131,6 +131,7 @@ struct PP_TemperatureRange {
|
|
int mem_min;
|
|
int mem_crit_max;
|
|
int mem_emergency_max;
|
|
+ int sw_ctf_threshold;
|
|
};
|
|
|
|
struct PP_StateValidationBlock {
|
|
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
|
|
index 91dfc229e34d7..d191ff52d4f06 100644
|
|
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
|
|
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
|
|
@@ -24,6 +24,7 @@
|
|
|
|
#include <linux/firmware.h>
|
|
#include <linux/pci.h>
|
|
+#include <linux/reboot.h>
|
|
|
|
#include "amdgpu.h"
|
|
#include "amdgpu_smu.h"
|
|
@@ -1061,6 +1062,34 @@ static void smu_interrupt_work_fn(struct work_struct *work)
|
|
smu->ppt_funcs->interrupt_work(smu);
|
|
}
|
|
|
|
+static void smu_swctf_delayed_work_handler(struct work_struct *work)
|
|
+{
|
|
+ struct smu_context *smu =
|
|
+ container_of(work, struct smu_context, swctf_delayed_work.work);
|
|
+ struct smu_temperature_range *range =
|
|
+ &smu->thermal_range;
|
|
+ struct amdgpu_device *adev = smu->adev;
|
|
+ uint32_t hotspot_tmp, size;
|
|
+
|
|
+ /*
|
|
+ * If the hotspot temperature is confirmed as below SW CTF setting point
|
|
+ * after the delay enforced, nothing will be done.
|
|
+ * Otherwise, a graceful shutdown will be performed to prevent further damage.
|
|
+ */
|
|
+ if (range->software_shutdown_temp &&
|
|
+ smu->ppt_funcs->read_sensor &&
|
|
+ !smu->ppt_funcs->read_sensor(smu,
|
|
+ AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
|
|
+ &hotspot_tmp,
|
|
+ &size) &&
|
|
+ hotspot_tmp / 1000 < range->software_shutdown_temp)
|
|
+ return;
|
|
+
|
|
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
|
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
|
+ orderly_poweroff(true);
|
|
+}
|
|
+
|
|
static int smu_sw_init(void *handle)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
@@ -1109,6 +1138,9 @@ static int smu_sw_init(void *handle)
|
|
return ret;
|
|
}
|
|
|
|
+ INIT_DELAYED_WORK(&smu->swctf_delayed_work,
|
|
+ smu_swctf_delayed_work_handler);
|
|
+
|
|
ret = smu_smc_table_sw_init(smu);
|
|
if (ret) {
|
|
dev_err(adev->dev, "Failed to sw init smc table!\n");
|
|
@@ -1581,6 +1613,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
|
|
return ret;
|
|
}
|
|
|
|
+ cancel_delayed_work_sync(&smu->swctf_delayed_work);
|
|
+
|
|
ret = smu_disable_dpms(smu);
|
|
if (ret) {
|
|
dev_err(adev->dev, "Fail to disable dpm features!\n");
|
|
@@ -2520,6 +2554,14 @@ static int smu_read_sensor(void *handle,
|
|
*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
|
|
*size = 4;
|
|
break;
|
|
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
|
|
+ *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
|
|
+ *size = 4;
|
|
+ break;
|
|
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
|
|
+ *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
|
|
+ *size = 4;
|
|
+ break;
|
|
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
|
|
ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
|
|
*size = 8;
|
|
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
|
|
index 3bc4128a22ac2..1ab77a6cdb653 100644
|
|
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
|
|
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
|
|
@@ -573,6 +573,8 @@ struct smu_context
|
|
u32 debug_param_reg;
|
|
u32 debug_msg_reg;
|
|
u32 debug_resp_reg;
|
|
+
|
|
+ struct delayed_work swctf_delayed_work;
|
|
};
|
|
|
|
struct i2c_adapter;
|
|
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
|
|
index ad5f6a15a1d7d..d490b571c8ffa 100644
|
|
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
|
|
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
|
|
@@ -1438,13 +1438,8 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
|
|
if (client_id == SOC15_IH_CLIENTID_THM) {
|
|
switch (src_id) {
|
|
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
|
|
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
|
- /*
|
|
- * SW CTF just occurred.
|
|
- * Try to do a graceful shutdown to prevent further damage.
|
|
- */
|
|
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
|
- orderly_poweroff(true);
|
|
+ schedule_delayed_work(&smu->swctf_delayed_work,
|
|
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
|
|
break;
|
|
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
|
|
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
|
|
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
|
|
index 47fafb1fa6088..3104d49379090 100644
|
|
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
|
|
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
|
|
@@ -1386,13 +1386,8 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
|
|
if (client_id == SOC15_IH_CLIENTID_THM) {
|
|
switch (src_id) {
|
|
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
|
|
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
|
- /*
|
|
- * SW CTF just occurred.
|
|
- * Try to do a graceful shutdown to prevent further damage.
|
|
- */
|
|
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
|
- orderly_poweroff(true);
|
|
+ schedule_delayed_work(&smu->swctf_delayed_work,
|
|
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
|
|
break;
|
|
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
|
|
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
|
|
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
|
|
index 5fdc608043e76..e33f06bb66eb4 100644
|
|
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
|
|
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
|
|
@@ -622,7 +622,13 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
|
|
int ret;
|
|
|
|
if (obj->import_attach) {
|
|
+ /* Reset both vm_ops and vm_private_data, so we don't end up with
|
|
+ * vm_ops pointing to our implementation if the dma-buf backend
|
|
+ * doesn't set those fields.
|
|
+ */
|
|
vma->vm_private_data = NULL;
|
|
+ vma->vm_ops = NULL;
|
|
+
|
|
ret = dma_buf_mmap(obj->dma_buf, vma, 0);
|
|
|
|
/* Drop the reference drm_gem_mmap_obj() acquired.*/
|
|
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
|
|
index f40310559d13f..49c5451cdfb16 100644
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
|
|
@@ -967,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
|
|
/* Determine display colour depth for everything except LVDS now,
|
|
* DP requires this before mode_valid() is called.
|
|
*/
|
|
- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
|
|
+ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
|
|
nouveau_connector_detect_depth(connector);
|
|
|
|
/* Find the native mode if this is a digital panel, if we didn't
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
|
|
index c1b3206f27e64..458f8efb19c6c 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
|
|
@@ -26,6 +26,8 @@
|
|
#include "head.h"
|
|
#include "ior.h"
|
|
|
|
+#include <drm/display/drm_dp.h>
|
|
+
|
|
#include <subdev/bios.h>
|
|
#include <subdev/bios/init.h>
|
|
#include <subdev/gpio.h>
|
|
@@ -474,6 +476,50 @@ nvkm_dp_train(struct nvkm_outp *outp, u32 dataKBps)
|
|
return ret;
|
|
}
|
|
|
|
+/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
|
|
+ * converted to work inside nvkm. This is a temporary holdover until we start
|
|
+ * passing the drm_dp_aux device through NVKM
|
|
+ */
|
|
+static int
|
|
+nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
|
|
+{
|
|
+ struct nvkm_i2c_aux *aux = outp->dp.aux;
|
|
+ u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
|
|
+ int ret;
|
|
+
|
|
+ ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * Prior to DP1.3 the bit represented by
|
|
+ * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
|
|
+ * If it is set DP_DPCD_REV at 0000h could be at a value less than
|
|
+ * the true capability of the panel. The only way to check is to
|
|
+ * then compare 0000h and 2200h.
|
|
+ */
|
|
+ if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
|
|
+ DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
|
|
+ return 0;
|
|
+
|
|
+ ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
|
|
+ OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
|
|
+ outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
|
|
+ return 0;
|
|
+
|
|
+ memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
void
|
|
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
|
|
{
|
|
@@ -630,7 +676,7 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool enable)
|
|
memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
|
|
}
|
|
|
|
- if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, sizeof(outp->dp.dpcd))) {
|
|
+ if (!nvkm_dp_read_dpcd_caps(outp)) {
|
|
const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
|
|
const u8 *rate;
|
|
int rate_max;
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
|
|
index 32bbddc0993e8..679aff79f4d6b 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
|
|
@@ -123,6 +123,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *);
|
|
|
|
extern const struct gf100_grctx_func gk110_grctx;
|
|
void gk110_grctx_generate_r419eb0(struct gf100_gr *);
|
|
+void gk110_grctx_generate_r419f78(struct gf100_gr *);
|
|
|
|
extern const struct gf100_grctx_func gk110b_grctx;
|
|
extern const struct gf100_grctx_func gk208_grctx;
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
|
|
index 304e9d268bad4..f894f82548242 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
|
|
@@ -916,7 +916,9 @@ static void
|
|
gk104_grctx_generate_r419f78(struct gf100_gr *gr)
|
|
{
|
|
struct nvkm_device *device = gr->base.engine.subdev.device;
|
|
- nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
|
|
+
|
|
+ /* bit 3 set disables loads in fp helper invocations, we need it enabled */
|
|
+ nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000);
|
|
}
|
|
|
|
void
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
|
|
index 86547cfc38dce..e88740d4e54d4 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
|
|
@@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr)
|
|
nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000);
|
|
}
|
|
|
|
+void
|
|
+gk110_grctx_generate_r419f78(struct gf100_gr *gr)
|
|
+{
|
|
+ struct nvkm_device *device = gr->base.engine.subdev.device;
|
|
+
|
|
+ /* bit 3 set disables loads in fp helper invocations, we need it enabled */
|
|
+ nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000);
|
|
+}
|
|
+
|
|
const struct gf100_grctx_func
|
|
gk110_grctx = {
|
|
.main = gf100_grctx_generate_main,
|
|
@@ -852,4 +861,5 @@ gk110_grctx = {
|
|
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
|
|
.r418800 = gk104_grctx_generate_r418800,
|
|
.r419eb0 = gk110_grctx_generate_r419eb0,
|
|
+ .r419f78 = gk110_grctx_generate_r419f78,
|
|
};
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
|
|
index ebb947bd1446b..086e4d49e1121 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
|
|
@@ -101,4 +101,5 @@ gk110b_grctx = {
|
|
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
|
|
.r418800 = gk104_grctx_generate_r418800,
|
|
.r419eb0 = gk110_grctx_generate_r419eb0,
|
|
+ .r419f78 = gk110_grctx_generate_r419f78,
|
|
};
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
|
|
index 4d40512b5c998..0bf438c3f7cbc 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
|
|
@@ -566,4 +566,5 @@ gk208_grctx = {
|
|
.dist_skip_table = gf117_grctx_generate_dist_skip_table,
|
|
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
|
|
.r418800 = gk104_grctx_generate_r418800,
|
|
+ .r419f78 = gk110_grctx_generate_r419f78,
|
|
};
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
|
|
index 0b3964e6b36e2..acdf0932a99e1 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
|
|
@@ -991,4 +991,5 @@ gm107_grctx = {
|
|
.r406500 = gm107_grctx_generate_r406500,
|
|
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
|
|
.r419e00 = gm107_grctx_generate_r419e00,
|
|
+ .r419f78 = gk110_grctx_generate_r419f78,
|
|
};
|
|
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
|
|
index 9fea03121247e..2e2e08f4359a8 100644
|
|
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
|
|
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
|
|
@@ -836,12 +836,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
|
|
* need align with 2 pixel.
|
|
*/
|
|
if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) {
|
|
- DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
|
|
+ DRM_DEBUG_KMS("Invalid Source: Yuv format not support odd xpos\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) {
|
|
- DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n");
|
|
+ DRM_DEBUG_KMS("Invalid Source: Yuv format does not support this rotation\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -849,7 +849,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
|
|
struct vop *vop = to_vop(crtc);
|
|
|
|
if (!vop->data->afbc) {
|
|
- DRM_ERROR("vop does not support AFBC\n");
|
|
+ DRM_DEBUG_KMS("vop does not support AFBC\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -858,15 +858,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
|
|
return ret;
|
|
|
|
if (new_plane_state->src.x1 || new_plane_state->src.y1) {
|
|
- DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n",
|
|
- new_plane_state->src.x1,
|
|
- new_plane_state->src.y1, fb->offsets[0]);
|
|
+ DRM_DEBUG_KMS("AFBC does not support offset display, " \
|
|
+ "xpos=%d, ypos=%d, offset=%d\n",
|
|
+ new_plane_state->src.x1, new_plane_state->src.y1,
|
|
+ fb->offsets[0]);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) {
|
|
- DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
|
|
- new_plane_state->rotation);
|
|
+ DRM_DEBUG_KMS("No rotation support in AFBC, rotation=%d\n",
|
|
+ new_plane_state->rotation);
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c
|
|
index 4100eefb7ac32..61c195f8fd3b8 100644
|
|
--- a/drivers/hwmon/pmbus/bel-pfe.c
|
|
+++ b/drivers/hwmon/pmbus/bel-pfe.c
|
|
@@ -17,12 +17,13 @@
|
|
enum chips {pfe1100, pfe3000};
|
|
|
|
/*
|
|
- * Disable status check for pfe3000 devices, because some devices report
|
|
- * communication error (invalid command) for VOUT_MODE command (0x20)
|
|
- * although correct VOUT_MODE (0x16) is returned: it leads to incorrect
|
|
- * exponent in linear mode.
|
|
+ * Disable status check because some devices report communication error
|
|
+ * (invalid command) for VOUT_MODE command (0x20) although the correct
|
|
+ * VOUT_MODE (0x16) is returned: it leads to incorrect exponent in linear
|
|
+ * mode.
|
|
+ * This affects both pfe3000 and pfe1100.
|
|
*/
|
|
-static struct pmbus_platform_data pfe3000_plat_data = {
|
|
+static struct pmbus_platform_data pfe_plat_data = {
|
|
.flags = PMBUS_SKIP_STATUS_CHECK,
|
|
};
|
|
|
|
@@ -94,16 +95,15 @@ static int pfe_pmbus_probe(struct i2c_client *client)
|
|
int model;
|
|
|
|
model = (int)i2c_match_id(pfe_device_id, client)->driver_data;
|
|
+ client->dev.platform_data = &pfe_plat_data;
|
|
|
|
/*
|
|
* PFE3000-12-069RA devices may not stay in page 0 during device
|
|
* probe which leads to probe failure (read status word failed).
|
|
* So let's set the device to page 0 at the beginning.
|
|
*/
|
|
- if (model == pfe3000) {
|
|
- client->dev.platform_data = &pfe3000_plat_data;
|
|
+ if (model == pfe3000)
|
|
i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
|
|
- }
|
|
|
|
return pmbus_do_probe(client, &pfe_driver_info[model]);
|
|
}
|
|
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
|
|
index 8720ac43a4a4a..80eff7090f14a 100644
|
|
--- a/drivers/iio/adc/ad7192.c
|
|
+++ b/drivers/iio/adc/ad7192.c
|
|
@@ -62,7 +62,6 @@
|
|
#define AD7192_MODE_STA_MASK BIT(20) /* Status Register transmission Mask */
|
|
#define AD7192_MODE_CLKSRC(x) (((x) & 0x3) << 18) /* Clock Source Select */
|
|
#define AD7192_MODE_SINC3 BIT(15) /* SINC3 Filter Select */
|
|
-#define AD7192_MODE_ACX BIT(14) /* AC excitation enable(AD7195 only)*/
|
|
#define AD7192_MODE_ENPAR BIT(13) /* Parity Enable */
|
|
#define AD7192_MODE_CLKDIV BIT(12) /* Clock divide by 2 (AD7190/2 only)*/
|
|
#define AD7192_MODE_SCYCLE BIT(11) /* Single cycle conversion */
|
|
@@ -91,6 +90,7 @@
|
|
/* Configuration Register Bit Designations (AD7192_REG_CONF) */
|
|
|
|
#define AD7192_CONF_CHOP BIT(23) /* CHOP enable */
|
|
+#define AD7192_CONF_ACX BIT(22) /* AC excitation enable(AD7195 only) */
|
|
#define AD7192_CONF_REFSEL BIT(20) /* REFIN1/REFIN2 Reference Select */
|
|
#define AD7192_CONF_CHAN(x) ((x) << 8) /* Channel select */
|
|
#define AD7192_CONF_CHAN_MASK (0x7FF << 8) /* Channel select mask */
|
|
@@ -473,7 +473,7 @@ static ssize_t ad7192_show_ac_excitation(struct device *dev,
|
|
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
|
|
struct ad7192_state *st = iio_priv(indio_dev);
|
|
|
|
- return sysfs_emit(buf, "%d\n", !!(st->mode & AD7192_MODE_ACX));
|
|
+ return sysfs_emit(buf, "%d\n", !!(st->conf & AD7192_CONF_ACX));
|
|
}
|
|
|
|
static ssize_t ad7192_show_bridge_switch(struct device *dev,
|
|
@@ -514,13 +514,13 @@ static ssize_t ad7192_set(struct device *dev,
|
|
|
|
ad_sd_write_reg(&st->sd, AD7192_REG_GPOCON, 1, st->gpocon);
|
|
break;
|
|
- case AD7192_REG_MODE:
|
|
+ case AD7192_REG_CONF:
|
|
if (val)
|
|
- st->mode |= AD7192_MODE_ACX;
|
|
+ st->conf |= AD7192_CONF_ACX;
|
|
else
|
|
- st->mode &= ~AD7192_MODE_ACX;
|
|
+ st->conf &= ~AD7192_CONF_ACX;
|
|
|
|
- ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
|
|
+ ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
@@ -580,12 +580,11 @@ static IIO_DEVICE_ATTR(bridge_switch_en, 0644,
|
|
|
|
static IIO_DEVICE_ATTR(ac_excitation_en, 0644,
|
|
ad7192_show_ac_excitation, ad7192_set,
|
|
- AD7192_REG_MODE);
|
|
+ AD7192_REG_CONF);
|
|
|
|
static struct attribute *ad7192_attributes[] = {
|
|
&iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
|
|
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
|
|
- &iio_dev_attr_ac_excitation_en.dev_attr.attr,
|
|
NULL
|
|
};
|
|
|
|
@@ -596,6 +595,7 @@ static const struct attribute_group ad7192_attribute_group = {
|
|
static struct attribute *ad7195_attributes[] = {
|
|
&iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
|
|
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
|
|
+ &iio_dev_attr_ac_excitation_en.dev_attr.attr,
|
|
NULL
|
|
};
|
|
|
|
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
|
|
index 910e7e965fc48..74092f3836b83 100644
|
|
--- a/drivers/iio/adc/ina2xx-adc.c
|
|
+++ b/drivers/iio/adc/ina2xx-adc.c
|
|
@@ -124,6 +124,7 @@ static const struct regmap_config ina2xx_regmap_config = {
|
|
enum ina2xx_ids { ina219, ina226 };
|
|
|
|
struct ina2xx_config {
|
|
+ const char *name;
|
|
u16 config_default;
|
|
int calibration_value;
|
|
int shunt_voltage_lsb; /* nV */
|
|
@@ -155,6 +156,7 @@ struct ina2xx_chip_info {
|
|
|
|
static const struct ina2xx_config ina2xx_config[] = {
|
|
[ina219] = {
|
|
+ .name = "ina219",
|
|
.config_default = INA219_CONFIG_DEFAULT,
|
|
.calibration_value = 4096,
|
|
.shunt_voltage_lsb = 10000,
|
|
@@ -164,6 +166,7 @@ static const struct ina2xx_config ina2xx_config[] = {
|
|
.chip_id = ina219,
|
|
},
|
|
[ina226] = {
|
|
+ .name = "ina226",
|
|
.config_default = INA226_CONFIG_DEFAULT,
|
|
.calibration_value = 2048,
|
|
.shunt_voltage_lsb = 2500,
|
|
@@ -996,7 +999,7 @@ static int ina2xx_probe(struct i2c_client *client,
|
|
/* Patch the current config register with default. */
|
|
val = chip->config->config_default;
|
|
|
|
- if (id->driver_data == ina226) {
|
|
+ if (type == ina226) {
|
|
ina226_set_average(chip, INA226_DEFAULT_AVG, &val);
|
|
ina226_set_int_time_vbus(chip, INA226_DEFAULT_IT, &val);
|
|
ina226_set_int_time_vshunt(chip, INA226_DEFAULT_IT, &val);
|
|
@@ -1015,7 +1018,7 @@ static int ina2xx_probe(struct i2c_client *client,
|
|
}
|
|
|
|
indio_dev->modes = INDIO_DIRECT_MODE;
|
|
- if (id->driver_data == ina226) {
|
|
+ if (type == ina226) {
|
|
indio_dev->channels = ina226_channels;
|
|
indio_dev->num_channels = ARRAY_SIZE(ina226_channels);
|
|
indio_dev->info = &ina226_info;
|
|
@@ -1024,7 +1027,7 @@ static int ina2xx_probe(struct i2c_client *client,
|
|
indio_dev->num_channels = ARRAY_SIZE(ina219_channels);
|
|
indio_dev->info = &ina219_info;
|
|
}
|
|
- indio_dev->name = id->name;
|
|
+ indio_dev->name = id ? id->name : chip->config->name;
|
|
|
|
ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
|
|
&ina2xx_setup_ops);
|
|
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
|
|
index 05a28d353e343..d98f7e4d202c1 100644
|
|
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
|
|
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
|
|
@@ -253,7 +253,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
|
|
platform_set_drvdata(pdev, indio_dev);
|
|
|
|
state->ec = ec->ec_dev;
|
|
- state->msg = devm_kzalloc(&pdev->dev,
|
|
+ state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) +
|
|
max((u16)sizeof(struct ec_params_motion_sense),
|
|
state->ec->max_response), GFP_KERNEL);
|
|
if (!state->msg)
|
|
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
|
|
index ed81672713586..e6311213f3e89 100644
|
|
--- a/drivers/iio/frequency/admv1013.c
|
|
+++ b/drivers/iio/frequency/admv1013.c
|
|
@@ -344,9 +344,12 @@ static int admv1013_update_quad_filters(struct admv1013_state *st)
|
|
|
|
static int admv1013_update_mixer_vgate(struct admv1013_state *st)
|
|
{
|
|
- unsigned int vcm, mixer_vgate;
|
|
+ unsigned int mixer_vgate;
|
|
+ int vcm;
|
|
|
|
vcm = regulator_get_voltage(st->reg);
|
|
+ if (vcm < 0)
|
|
+ return vcm;
|
|
|
|
if (vcm < 1800000)
|
|
mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
|
|
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
|
|
index 151ff39933548..f3f8392623a46 100644
|
|
--- a/drivers/iio/industrialio-core.c
|
|
+++ b/drivers/iio/industrialio-core.c
|
|
@@ -1916,7 +1916,7 @@ static const struct iio_buffer_setup_ops noop_ring_setup_ops;
|
|
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
|
|
{
|
|
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
|
|
- struct fwnode_handle *fwnode;
|
|
+ struct fwnode_handle *fwnode = NULL;
|
|
int ret;
|
|
|
|
if (!indio_dev->info)
|
|
@@ -1927,7 +1927,8 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
|
|
/* If the calling driver did not initialize firmware node, do it here */
|
|
if (dev_fwnode(&indio_dev->dev))
|
|
fwnode = dev_fwnode(&indio_dev->dev);
|
|
- else
|
|
+ /* The default dummy IIO device has no parent */
|
|
+ else if (indio_dev->dev.parent)
|
|
fwnode = dev_fwnode(indio_dev->dev.parent);
|
|
device_set_node(&indio_dev->dev, fwnode);
|
|
|
|
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
|
|
index 86d479772fbc6..957634eceba8f 100644
|
|
--- a/drivers/infiniband/core/umem.c
|
|
+++ b/drivers/infiniband/core/umem.c
|
|
@@ -85,6 +85,8 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
|
|
dma_addr_t mask;
|
|
int i;
|
|
|
|
+ umem->iova = va = virt;
|
|
+
|
|
if (umem->is_odp) {
|
|
unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
|
|
|
|
@@ -100,7 +102,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
|
|
*/
|
|
pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
|
|
|
|
- umem->iova = va = virt;
|
|
/* The best result is the smallest page size that results in the minimum
|
|
* number of required pages. Compute the largest page size that could
|
|
* work based on VA address bits that don't change.
|
|
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
|
|
index 90b672feed83d..194cac40da653 100644
|
|
--- a/drivers/infiniband/hw/hfi1/chip.c
|
|
+++ b/drivers/infiniband/hw/hfi1/chip.c
|
|
@@ -12307,6 +12307,7 @@ static void free_cntrs(struct hfi1_devdata *dd)
|
|
|
|
if (dd->synth_stats_timer.function)
|
|
del_timer_sync(&dd->synth_stats_timer);
|
|
+ cancel_work_sync(&dd->update_cntr_work);
|
|
ppd = (struct hfi1_pportdata *)(dd + 1);
|
|
for (i = 0; i < dd->num_pports; i++, ppd++) {
|
|
kfree(ppd->cntrs);
|
|
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
|
|
index 8f385f9c2dd38..d5f2a6b5376bd 100644
|
|
--- a/drivers/interconnect/qcom/bcm-voter.c
|
|
+++ b/drivers/interconnect/qcom/bcm-voter.c
|
|
@@ -83,6 +83,11 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
|
|
|
|
temp = agg_peak[bucket] * bcm->vote_scale;
|
|
bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
|
|
+
|
|
+ if (bcm->enable_mask && (bcm->vote_x[bucket] || bcm->vote_y[bucket])) {
|
|
+ bcm->vote_x[bucket] = 0;
|
|
+ bcm->vote_y[bucket] = bcm->enable_mask;
|
|
+ }
|
|
}
|
|
|
|
if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
|
|
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
|
|
index 04391c1ba465c..7843d8864d6ba 100644
|
|
--- a/drivers/interconnect/qcom/icc-rpmh.h
|
|
+++ b/drivers/interconnect/qcom/icc-rpmh.h
|
|
@@ -81,6 +81,7 @@ struct qcom_icc_node {
|
|
* @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
|
|
* @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
|
|
* @vote_scale: scaling factor for vote_x and vote_y
|
|
+ * @enable_mask: optional mask to send as vote instead of vote_x/vote_y
|
|
* @dirty: flag used to indicate whether the bcm needs to be committed
|
|
* @keepalive: flag used to indicate whether a keepalive is required
|
|
* @aux_data: auxiliary data used when calculating threshold values and
|
|
@@ -97,6 +98,7 @@ struct qcom_icc_bcm {
|
|
u64 vote_x[QCOM_ICC_NUM_BUCKETS];
|
|
u64 vote_y[QCOM_ICC_NUM_BUCKETS];
|
|
u64 vote_scale;
|
|
+ u32 enable_mask;
|
|
bool dirty;
|
|
bool keepalive;
|
|
struct bcm_db aux_data;
|
|
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
|
|
index 2d7a8e7b85ec2..e64c214b40209 100644
|
|
--- a/drivers/interconnect/qcom/sm8450.c
|
|
+++ b/drivers/interconnect/qcom/sm8450.c
|
|
@@ -1337,6 +1337,7 @@ static struct qcom_icc_node qns_mem_noc_sf_disp = {
|
|
|
|
static struct qcom_icc_bcm bcm_acv = {
|
|
.name = "ACV",
|
|
+ .enable_mask = 0x8,
|
|
.num_nodes = 1,
|
|
.nodes = { &ebi },
|
|
};
|
|
@@ -1349,6 +1350,7 @@ static struct qcom_icc_bcm bcm_ce0 = {
|
|
|
|
static struct qcom_icc_bcm bcm_cn0 = {
|
|
.name = "CN0",
|
|
+ .enable_mask = 0x1,
|
|
.keepalive = true,
|
|
.num_nodes = 55,
|
|
.nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie,
|
|
@@ -1383,6 +1385,7 @@ static struct qcom_icc_bcm bcm_cn0 = {
|
|
|
|
static struct qcom_icc_bcm bcm_co0 = {
|
|
.name = "CO0",
|
|
+ .enable_mask = 0x1,
|
|
.num_nodes = 2,
|
|
.nodes = { &qxm_nsp, &qns_nsp_gemnoc },
|
|
};
|
|
@@ -1403,6 +1406,7 @@ static struct qcom_icc_bcm bcm_mm0 = {
|
|
|
|
static struct qcom_icc_bcm bcm_mm1 = {
|
|
.name = "MM1",
|
|
+ .enable_mask = 0x1,
|
|
.num_nodes = 12,
|
|
.nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
|
|
&qnm_camnoc_sf, &qnm_mdp,
|
|
@@ -1445,6 +1449,7 @@ static struct qcom_icc_bcm bcm_sh0 = {
|
|
|
|
static struct qcom_icc_bcm bcm_sh1 = {
|
|
.name = "SH1",
|
|
+ .enable_mask = 0x1,
|
|
.num_nodes = 7,
|
|
.nodes = { &alm_gpu_tcu, &alm_sys_tcu,
|
|
&qnm_nsp_gemnoc, &qnm_pcie,
|
|
@@ -1461,6 +1466,7 @@ static struct qcom_icc_bcm bcm_sn0 = {
|
|
|
|
static struct qcom_icc_bcm bcm_sn1 = {
|
|
.name = "SN1",
|
|
+ .enable_mask = 0x1,
|
|
.num_nodes = 4,
|
|
.nodes = { &qhm_gic, &qxm_pimem,
|
|
&xm_gic, &qns_gemnoc_gc },
|
|
@@ -1492,6 +1498,7 @@ static struct qcom_icc_bcm bcm_sn7 = {
|
|
|
|
static struct qcom_icc_bcm bcm_acv_disp = {
|
|
.name = "ACV",
|
|
+ .enable_mask = 0x1,
|
|
.num_nodes = 1,
|
|
.nodes = { &ebi_disp },
|
|
};
|
|
@@ -1510,6 +1517,7 @@ static struct qcom_icc_bcm bcm_mm0_disp = {
|
|
|
|
static struct qcom_icc_bcm bcm_mm1_disp = {
|
|
.name = "MM1",
|
|
+ .enable_mask = 0x1,
|
|
.num_nodes = 3,
|
|
.nodes = { &qnm_mdp_disp, &qnm_rot_disp,
|
|
&qns_mem_noc_sf_disp },
|
|
@@ -1523,6 +1531,7 @@ static struct qcom_icc_bcm bcm_sh0_disp = {
|
|
|
|
static struct qcom_icc_bcm bcm_sh1_disp = {
|
|
.name = "SH1",
|
|
+ .enable_mask = 0x1,
|
|
.num_nodes = 1,
|
|
.nodes = { &qnm_pcie_disp },
|
|
};
|
|
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
|
|
index fa09d511a8eda..baf31258f5c90 100644
|
|
--- a/drivers/isdn/mISDN/dsp.h
|
|
+++ b/drivers/isdn/mISDN/dsp.h
|
|
@@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp);
|
|
extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id);
|
|
extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb);
|
|
extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb);
|
|
-extern void dsp_cmx_send(void *arg);
|
|
+extern void dsp_cmx_send(struct timer_list *arg);
|
|
extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb);
|
|
extern int dsp_cmx_del_conf_member(struct dsp *dsp);
|
|
extern int dsp_cmx_del_conf(struct dsp_conf *conf);
|
|
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
|
|
index 6d2088fbaf69c..1b73af5013976 100644
|
|
--- a/drivers/isdn/mISDN/dsp_cmx.c
|
|
+++ b/drivers/isdn/mISDN/dsp_cmx.c
|
|
@@ -1625,7 +1625,7 @@ static u16 dsp_count; /* last sample count */
|
|
static int dsp_count_valid; /* if we have last sample count */
|
|
|
|
void
|
|
-dsp_cmx_send(void *arg)
|
|
+dsp_cmx_send(struct timer_list *arg)
|
|
{
|
|
struct dsp_conf *conf;
|
|
struct dsp_conf_member *member;
|
|
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
|
|
index 386084530c2f8..fae95f1666883 100644
|
|
--- a/drivers/isdn/mISDN/dsp_core.c
|
|
+++ b/drivers/isdn/mISDN/dsp_core.c
|
|
@@ -1195,7 +1195,7 @@ static int __init dsp_init(void)
|
|
}
|
|
|
|
/* set sample timer */
|
|
- timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0);
|
|
+ timer_setup(&dsp_spl_tl, dsp_cmx_send, 0);
|
|
dsp_spl_tl.expires = jiffies + dsp_tics;
|
|
dsp_spl_jiffies = dsp_spl_tl.expires;
|
|
add_timer(&dsp_spl_tl);
|
|
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
|
|
index d676cf63a9669..3dae5e3a16976 100644
|
|
--- a/drivers/misc/cardreader/rts5227.c
|
|
+++ b/drivers/misc/cardreader/rts5227.c
|
|
@@ -195,7 +195,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
|
|
}
|
|
}
|
|
|
|
- if (option->force_clkreq_0)
|
|
+ if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
|
|
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
|
|
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
|
|
else
|
|
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
|
|
index cfebad51d1d80..f4ab09439da70 100644
|
|
--- a/drivers/misc/cardreader/rts5228.c
|
|
+++ b/drivers/misc/cardreader/rts5228.c
|
|
@@ -435,17 +435,10 @@ static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
|
|
option->ltr_enabled = false;
|
|
}
|
|
}
|
|
-
|
|
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
|
|
- | PM_L1_1_EN | PM_L1_2_EN))
|
|
- option->force_clkreq_0 = false;
|
|
- else
|
|
- option->force_clkreq_0 = true;
|
|
}
|
|
|
|
static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
|
|
{
|
|
- struct rtsx_cr_option *option = &pcr->option;
|
|
|
|
rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
|
|
CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
|
|
@@ -476,17 +469,6 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
|
|
else
|
|
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
|
|
|
|
- /*
|
|
- * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
|
|
- * to drive low, and we forcibly request clock.
|
|
- */
|
|
- if (option->force_clkreq_0)
|
|
- rtsx_pci_write_register(pcr, PETXCFG,
|
|
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
|
|
- else
|
|
- rtsx_pci_write_register(pcr, PETXCFG,
|
|
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
|
|
-
|
|
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
|
|
|
|
if (pcr->rtd3_en) {
|
|
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
|
|
index 91d240dd68faa..47ab72a43256b 100644
|
|
--- a/drivers/misc/cardreader/rts5249.c
|
|
+++ b/drivers/misc/cardreader/rts5249.c
|
|
@@ -327,12 +327,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
|
|
}
|
|
}
|
|
|
|
-
|
|
/*
|
|
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
|
|
* to drive low, and we forcibly request clock.
|
|
*/
|
|
- if (option->force_clkreq_0)
|
|
+ if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
|
|
rtsx_pci_write_register(pcr, PETXCFG,
|
|
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
|
|
else
|
|
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
|
|
index 9b42b20a3e5ae..79b18f6f73a8a 100644
|
|
--- a/drivers/misc/cardreader/rts5260.c
|
|
+++ b/drivers/misc/cardreader/rts5260.c
|
|
@@ -517,17 +517,10 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
|
|
option->ltr_enabled = false;
|
|
}
|
|
}
|
|
-
|
|
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
|
|
- | PM_L1_1_EN | PM_L1_2_EN))
|
|
- option->force_clkreq_0 = false;
|
|
- else
|
|
- option->force_clkreq_0 = true;
|
|
}
|
|
|
|
static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
|
|
{
|
|
- struct rtsx_cr_option *option = &pcr->option;
|
|
|
|
/* Set mcu_cnt to 7 to ensure data can be sampled properly */
|
|
rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
|
|
@@ -546,17 +539,6 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
|
|
|
|
rts5260_init_hw(pcr);
|
|
|
|
- /*
|
|
- * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
|
|
- * to drive low, and we forcibly request clock.
|
|
- */
|
|
- if (option->force_clkreq_0)
|
|
- rtsx_pci_write_register(pcr, PETXCFG,
|
|
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
|
|
- else
|
|
- rtsx_pci_write_register(pcr, PETXCFG,
|
|
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
|
|
-
|
|
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
|
|
|
|
return 0;
|
|
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
|
|
index b1e76030cafda..94af6bf8a25a6 100644
|
|
--- a/drivers/misc/cardreader/rts5261.c
|
|
+++ b/drivers/misc/cardreader/rts5261.c
|
|
@@ -498,17 +498,10 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
|
|
option->ltr_enabled = false;
|
|
}
|
|
}
|
|
-
|
|
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
|
|
- | PM_L1_1_EN | PM_L1_2_EN))
|
|
- option->force_clkreq_0 = false;
|
|
- else
|
|
- option->force_clkreq_0 = true;
|
|
}
|
|
|
|
static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
|
|
{
|
|
- struct rtsx_cr_option *option = &pcr->option;
|
|
u32 val;
|
|
|
|
rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
|
|
@@ -554,17 +547,6 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
|
|
else
|
|
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
|
|
|
|
- /*
|
|
- * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
|
|
- * to drive low, and we forcibly request clock.
|
|
- */
|
|
- if (option->force_clkreq_0)
|
|
- rtsx_pci_write_register(pcr, PETXCFG,
|
|
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
|
|
- else
|
|
- rtsx_pci_write_register(pcr, PETXCFG,
|
|
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
|
|
-
|
|
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
|
|
|
|
if (pcr->rtd3_en) {
|
|
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
|
|
index 32b7783e9d4fa..a3f4b52bb159f 100644
|
|
--- a/drivers/misc/cardreader/rtsx_pcr.c
|
|
+++ b/drivers/misc/cardreader/rtsx_pcr.c
|
|
@@ -1326,8 +1326,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
|
|
return err;
|
|
}
|
|
|
|
- if (pcr->aspm_mode == ASPM_MODE_REG)
|
|
+ if (pcr->aspm_mode == ASPM_MODE_REG) {
|
|
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
|
|
+ rtsx_pci_write_register(pcr, PETXCFG,
|
|
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
|
|
+ }
|
|
|
|
/* No CD interrupt if probing driver with card inserted.
|
|
* So we need to initialize pcr->card_exist here.
|
|
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
|
|
index 52ed30f2d9f4f..94e9a08bc90e1 100644
|
|
--- a/drivers/mmc/host/moxart-mmc.c
|
|
+++ b/drivers/mmc/host/moxart-mmc.c
|
|
@@ -338,13 +338,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
|
|
return;
|
|
}
|
|
for (len = 0; len < remain && len < host->fifo_width;) {
|
|
- /* SCR data must be read in big endian. */
|
|
- if (data->mrq->cmd->opcode == SD_APP_SEND_SCR)
|
|
- *sgp = ioread32be(host->base +
|
|
- REG_DATA_WINDOW);
|
|
- else
|
|
- *sgp = ioread32(host->base +
|
|
- REG_DATA_WINDOW);
|
|
+ *sgp = ioread32(host->base + REG_DATA_WINDOW);
|
|
sgp++;
|
|
len += 4;
|
|
}
|
|
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
|
|
index 576370f89c755..7a3c7a74af04a 100644
|
|
--- a/drivers/net/bonding/bond_main.c
|
|
+++ b/drivers/net/bonding/bond_main.c
|
|
@@ -5839,7 +5839,9 @@ void bond_setup(struct net_device *bond_dev)
|
|
|
|
bond_dev->hw_features = BOND_VLAN_FEATURES |
|
|
NETIF_F_HW_VLAN_CTAG_RX |
|
|
- NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
+ NETIF_F_HW_VLAN_CTAG_FILTER |
|
|
+ NETIF_F_HW_VLAN_STAG_RX |
|
|
+ NETIF_F_HW_VLAN_STAG_FILTER;
|
|
|
|
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
|
|
bond_dev->features |= bond_dev->hw_features;
|
|
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
|
|
index 4faabc4364aa7..2d2c6f941272c 100644
|
|
--- a/drivers/net/dsa/ocelot/felix.c
|
|
+++ b/drivers/net/dsa/ocelot/felix.c
|
|
@@ -1606,8 +1606,10 @@ static void felix_teardown(struct dsa_switch *ds)
|
|
struct felix *felix = ocelot_to_felix(ocelot);
|
|
struct dsa_port *dp;
|
|
|
|
+ rtnl_lock();
|
|
if (felix->tag_proto_ops)
|
|
felix->tag_proto_ops->teardown(ds);
|
|
+ rtnl_unlock();
|
|
|
|
dsa_switch_for_each_available_port(dp, ds)
|
|
ocelot_deinit_port(ocelot, dp->index);
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
|
|
index f6ededec5a4fa..69d1549e63a98 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
|
|
@@ -458,9 +458,9 @@ static void hns3_dbg_fill_content(char *content, u16 len,
|
|
if (result) {
|
|
if (item_len < strlen(result[i]))
|
|
break;
|
|
- strscpy(pos, result[i], strlen(result[i]));
|
|
+ memcpy(pos, result[i], strlen(result[i]));
|
|
} else {
|
|
- strscpy(pos, items[i].name, strlen(items[i].name));
|
|
+ memcpy(pos, items[i].name, strlen(items[i].name));
|
|
}
|
|
pos += item_len;
|
|
len -= item_len;
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
|
|
index 248f15dac86ba..61f833d61f583 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
|
|
@@ -5854,6 +5854,9 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
|
|
if (!if_running)
|
|
return;
|
|
|
|
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
|
|
+ return;
|
|
+
|
|
netif_carrier_off(ndev);
|
|
netif_tx_disable(ndev);
|
|
|
|
@@ -5882,7 +5885,16 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
|
|
if (!if_running)
|
|
return;
|
|
|
|
- hns3_nic_reset_all_ring(priv->ae_handle);
|
|
+ if (hns3_nic_resetting(ndev))
|
|
+ return;
|
|
+
|
|
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
|
|
+ return;
|
|
+
|
|
+ if (hns3_nic_reset_all_ring(priv->ae_handle))
|
|
+ return;
|
|
+
|
|
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
|
|
|
for (i = 0; i < priv->vector_num; i++)
|
|
hns3_vector_enable(&priv->tqp_vector[i]);
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
|
|
index 726062e512939..5cb8f1818e51c 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
|
|
@@ -110,9 +110,9 @@ static void hclge_dbg_fill_content(char *content, u16 len,
|
|
if (result) {
|
|
if (item_len < strlen(result[i]))
|
|
break;
|
|
- strscpy(pos, result[i], strlen(result[i]));
|
|
+ memcpy(pos, result[i], strlen(result[i]));
|
|
} else {
|
|
- strscpy(pos, items[i].name, strlen(items[i].name));
|
|
+ memcpy(pos, items[i].name, strlen(items[i].name));
|
|
}
|
|
pos += item_len;
|
|
len -= item_len;
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
index 50e956d6c3b25..6af2273f227c2 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
@@ -72,6 +72,8 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev);
|
|
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
|
|
static void hclge_sync_fd_table(struct hclge_dev *hdev);
|
|
static void hclge_update_fec_stats(struct hclge_dev *hdev);
|
|
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
|
|
+ int wait_cnt);
|
|
|
|
static struct hnae3_ae_algo ae_algo;
|
|
|
|
@@ -7567,6 +7569,8 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
|
|
|
|
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
|
|
{
|
|
+#define HCLGE_LINK_STATUS_WAIT_CNT 3
|
|
+
|
|
struct hclge_desc desc;
|
|
struct hclge_config_mac_mode_cmd *req =
|
|
(struct hclge_config_mac_mode_cmd *)desc.data;
|
|
@@ -7591,9 +7595,15 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
|
|
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
|
|
|
|
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
dev_err(&hdev->pdev->dev,
|
|
"mac enable fail, ret =%d.\n", ret);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!enable)
|
|
+ hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
|
|
+ HCLGE_LINK_STATUS_WAIT_CNT);
|
|
}
|
|
|
|
static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
|
|
@@ -7656,10 +7666,9 @@ static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
|
|
} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
|
|
}
|
|
|
|
-static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
|
|
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
|
|
+ int wait_cnt)
|
|
{
|
|
-#define HCLGE_MAC_LINK_STATUS_NUM 100
|
|
-
|
|
int link_status;
|
|
int i = 0;
|
|
int ret;
|
|
@@ -7672,13 +7681,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
|
|
return 0;
|
|
|
|
msleep(HCLGE_LINK_STATUS_MS);
|
|
- } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
|
|
+ } while (++i < wait_cnt);
|
|
return -EBUSY;
|
|
}
|
|
|
|
static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
|
|
bool is_phy)
|
|
{
|
|
+#define HCLGE_MAC_LINK_STATUS_NUM 100
|
|
+
|
|
int link_ret;
|
|
|
|
link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
|
|
@@ -7686,7 +7697,8 @@ static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
|
|
if (is_phy)
|
|
hclge_phy_link_status_wait(hdev, link_ret);
|
|
|
|
- return hclge_mac_link_status_wait(hdev, link_ret);
|
|
+ return hclge_mac_link_status_wait(hdev, link_ret,
|
|
+ HCLGE_MAC_LINK_STATUS_NUM);
|
|
}
|
|
|
|
static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
|
|
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
|
|
index bc97f24b08270..157be4e9be4b7 100644
|
|
--- a/drivers/net/ethernet/ibm/ibmvnic.c
|
|
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
|
|
@@ -96,6 +96,8 @@ static int pending_scrq(struct ibmvnic_adapter *,
|
|
static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
|
|
struct ibmvnic_sub_crq_queue *);
|
|
static int ibmvnic_poll(struct napi_struct *napi, int data);
|
|
+static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter);
|
|
+static inline void reinit_init_done(struct ibmvnic_adapter *adapter);
|
|
static void send_query_map(struct ibmvnic_adapter *adapter);
|
|
static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
|
|
static int send_request_unmap(struct ibmvnic_adapter *, u8);
|
|
@@ -113,6 +115,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
|
|
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
|
|
struct ibmvnic_long_term_buff *ltb);
|
|
static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
|
|
+static void flush_reset_queue(struct ibmvnic_adapter *adapter);
|
|
|
|
struct ibmvnic_stat {
|
|
char name[ETH_GSTRING_LEN];
|
|
@@ -1314,8 +1317,8 @@ static const char *adapter_state_to_string(enum vnic_state state)
|
|
|
|
static int ibmvnic_login(struct net_device *netdev)
|
|
{
|
|
+ unsigned long flags, timeout = msecs_to_jiffies(20000);
|
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
- unsigned long timeout = msecs_to_jiffies(20000);
|
|
int retry_count = 0;
|
|
int retries = 10;
|
|
bool retry;
|
|
@@ -1336,11 +1339,9 @@ static int ibmvnic_login(struct net_device *netdev)
|
|
|
|
if (!wait_for_completion_timeout(&adapter->init_done,
|
|
timeout)) {
|
|
- netdev_warn(netdev, "Login timed out, retrying...\n");
|
|
- retry = true;
|
|
- adapter->init_done_rc = 0;
|
|
- retry_count++;
|
|
- continue;
|
|
+ netdev_warn(netdev, "Login timed out\n");
|
|
+ adapter->login_pending = false;
|
|
+ goto partial_reset;
|
|
}
|
|
|
|
if (adapter->init_done_rc == ABORTED) {
|
|
@@ -1382,10 +1383,69 @@ static int ibmvnic_login(struct net_device *netdev)
|
|
"SCRQ irq initialization failed\n");
|
|
return rc;
|
|
}
|
|
+ /* Default/timeout error handling, reset and start fresh */
|
|
} else if (adapter->init_done_rc) {
|
|
netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
|
|
adapter->init_done_rc);
|
|
- return -EIO;
|
|
+
|
|
+partial_reset:
|
|
+ /* adapter login failed, so free any CRQs or sub-CRQs
|
|
+ * and register again before attempting to login again.
|
|
+ * If we don't do this then the VIOS may think that
|
|
+ * we are already logged in and reject any subsequent
|
|
+ * attempts
|
|
+ */
|
|
+ netdev_warn(netdev,
|
|
+ "Freeing and re-registering CRQs before attempting to login again\n");
|
|
+ retry = true;
|
|
+ adapter->init_done_rc = 0;
|
|
+ release_sub_crqs(adapter, true);
|
|
+ /* Much of this is similar logic as ibmvnic_probe(),
|
|
+ * we are essentially re-initializing communication
|
|
+ * with the server. We really should not run any
|
|
+ * resets/failovers here because this is already a form
|
|
+ * of reset and we do not want parallel resets occurring
|
|
+ */
|
|
+ do {
|
|
+ reinit_init_done(adapter);
|
|
+ /* Clear any failovers we got in the previous
|
|
+ * pass since we are re-initializing the CRQ
|
|
+ */
|
|
+ adapter->failover_pending = false;
|
|
+ release_crq_queue(adapter);
|
|
+ /* If we don't sleep here then we risk an
|
|
+ * unnecessary failover event from the VIOS.
|
|
+ * This is a known VIOS issue caused by a vnic
|
|
+ * device freeing and registering a CRQ too
|
|
+ * quickly.
|
|
+ */
|
|
+ msleep(1500);
|
|
+ /* Avoid any resets, since we are currently
|
|
+ * resetting.
|
|
+ */
|
|
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
|
|
+ flush_reset_queue(adapter);
|
|
+ spin_unlock_irqrestore(&adapter->rwi_lock,
|
|
+ flags);
|
|
+
|
|
+ rc = init_crq_queue(adapter);
|
|
+ if (rc) {
|
|
+ netdev_err(netdev, "login recovery: init CRQ failed %d\n",
|
|
+ rc);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ rc = ibmvnic_reset_init(adapter, false);
|
|
+ if (rc)
|
|
+ netdev_err(netdev, "login recovery: Reset init failed %d\n",
|
|
+ rc);
|
|
+ /* IBMVNIC_CRQ_INIT will return EAGAIN if it
|
|
+ * fails, since ibmvnic_reset_init will free
|
|
+ * irq's in failure, we won't be able to receive
|
|
+ * new CRQs so we need to keep trying. probe()
|
|
+ * handles this similarly.
|
|
+ */
|
|
+ } while (rc == -EAGAIN && retry_count++ < retries);
|
|
}
|
|
} while (retry);
|
|
|
|
@@ -1397,12 +1457,22 @@ static int ibmvnic_login(struct net_device *netdev)
|
|
|
|
static void release_login_buffer(struct ibmvnic_adapter *adapter)
|
|
{
|
|
+ if (!adapter->login_buf)
|
|
+ return;
|
|
+
|
|
+ dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
|
|
+ adapter->login_buf_sz, DMA_TO_DEVICE);
|
|
kfree(adapter->login_buf);
|
|
adapter->login_buf = NULL;
|
|
}
|
|
|
|
static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
|
|
{
|
|
+ if (!adapter->login_rsp_buf)
|
|
+ return;
|
|
+
|
|
+ dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
|
|
+ adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
|
|
kfree(adapter->login_rsp_buf);
|
|
adapter->login_rsp_buf = NULL;
|
|
}
|
|
@@ -4626,11 +4696,14 @@ static int send_login(struct ibmvnic_adapter *adapter)
|
|
if (rc) {
|
|
adapter->login_pending = false;
|
|
netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
|
|
- goto buf_rsp_map_failed;
|
|
+ goto buf_send_failed;
|
|
}
|
|
|
|
return 0;
|
|
|
|
+buf_send_failed:
|
|
+ dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
|
|
+ DMA_FROM_DEVICE);
|
|
buf_rsp_map_failed:
|
|
kfree(login_rsp_buffer);
|
|
adapter->login_rsp_buf = NULL;
|
|
@@ -5192,6 +5265,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
|
int num_tx_pools;
|
|
int num_rx_pools;
|
|
u64 *size_array;
|
|
+ u32 rsp_len;
|
|
int i;
|
|
|
|
/* CHECK: Test/set of login_pending does not need to be atomic
|
|
@@ -5203,11 +5277,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
|
}
|
|
adapter->login_pending = false;
|
|
|
|
- dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
|
|
- DMA_TO_DEVICE);
|
|
- dma_unmap_single(dev, adapter->login_rsp_buf_token,
|
|
- adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
|
|
-
|
|
/* If the number of queues requested can't be allocated by the
|
|
* server, the login response will return with code 1. We will need
|
|
* to resend the login buffer with fewer queues requested.
|
|
@@ -5243,6 +5312,23 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
|
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
|
|
return -EIO;
|
|
}
|
|
+
|
|
+ rsp_len = be32_to_cpu(login_rsp->len);
|
|
+ if (be32_to_cpu(login->login_rsp_len) < rsp_len ||
|
|
+ rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) ||
|
|
+ rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) ||
|
|
+ rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) ||
|
|
+ rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) {
|
|
+ /* This can happen if a login request times out and there are
|
|
+ * 2 outstanding login requests sent, the LOGIN_RSP crq
|
|
+ * could have been for the older login request. So we are
|
|
+ * parsing the newer response buffer which may be incomplete
|
|
+ */
|
|
+ dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n");
|
|
+ ibmvnic_reset(adapter, VNIC_RESET_FATAL);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
|
|
be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
|
|
/* variable buffer sizes are not supported, so just read the
|
|
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
|
|
index fd6d6f6263f66..f544d2b0abdbd 100644
|
|
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
|
|
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
|
|
@@ -1401,14 +1401,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
|
|
if (fsp->flow_type & FLOW_MAC_EXT)
|
|
return -EINVAL;
|
|
|
|
+ spin_lock_bh(&adapter->fdir_fltr_lock);
|
|
if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
|
|
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
|
|
dev_err(&adapter->pdev->dev,
|
|
"Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
|
|
IAVF_MAX_FDIR_FILTERS);
|
|
return -ENOSPC;
|
|
}
|
|
|
|
- spin_lock_bh(&adapter->fdir_fltr_lock);
|
|
if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
|
|
dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
|
|
spin_unlock_bh(&adapter->fdir_fltr_lock);
|
|
@@ -1781,7 +1782,9 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
|
case ETHTOOL_GRXCLSRLCNT:
|
|
if (!FDIR_FLTR_SUPPORT(adapter))
|
|
break;
|
|
+ spin_lock_bh(&adapter->fdir_fltr_lock);
|
|
cmd->rule_cnt = adapter->fdir_active_fltr;
|
|
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
|
|
cmd->data = IAVF_MAX_FDIR_FILTERS;
|
|
ret = 0;
|
|
break;
|
|
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
|
|
index 6146203efd84a..505e82ebafe47 100644
|
|
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
|
|
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
|
|
@@ -722,7 +722,9 @@ void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *f
|
|
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
|
|
{
|
|
struct iavf_fdir_fltr *tmp;
|
|
+ bool ret = false;
|
|
|
|
+ spin_lock_bh(&adapter->fdir_fltr_lock);
|
|
list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
|
|
if (tmp->flow_type != fltr->flow_type)
|
|
continue;
|
|
@@ -732,11 +734,14 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
|
|
!memcmp(&tmp->ip_data, &fltr->ip_data,
|
|
sizeof(fltr->ip_data)) &&
|
|
!memcmp(&tmp->ext_data, &fltr->ext_data,
|
|
- sizeof(fltr->ext_data)))
|
|
- return true;
|
|
+ sizeof(fltr->ext_data))) {
|
|
+ ret = true;
|
|
+ break;
|
|
+ }
|
|
}
|
|
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
|
|
|
|
- return false;
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
|
|
index a9a1028cb17bb..de317179a7dcc 100644
|
|
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
|
|
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
|
|
@@ -166,11 +166,11 @@ prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n,
|
|
|
|
static bool __prestera_fi_is_direct(struct fib_info *fi)
|
|
{
|
|
- struct fib_nh *fib_nh;
|
|
+ struct fib_nh_common *fib_nhc;
|
|
|
|
if (fib_info_num_path(fi) == 1) {
|
|
- fib_nh = fib_info_nh(fi, 0);
|
|
- if (fib_nh->fib_nh_gw_family == AF_UNSPEC)
|
|
+ fib_nhc = fib_info_nhc(fi, 0);
|
|
+ if (fib_nhc->nhc_gw_family == AF_UNSPEC)
|
|
return true;
|
|
}
|
|
|
|
@@ -261,7 +261,7 @@ static bool
|
|
__prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
|
|
struct net_device *dev)
|
|
{
|
|
- struct fib_nh *fib_nh;
|
|
+ struct fib_nh_common *fib_nhc;
|
|
struct fib_result res;
|
|
bool reachable;
|
|
|
|
@@ -269,8 +269,8 @@ __prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
|
|
|
|
if (!prestera_util_kern_get_route(&res, tb_id, addr))
|
|
if (prestera_fi_is_direct(res.fi)) {
|
|
- fib_nh = fib_info_nh(res.fi, 0);
|
|
- if (dev == fib_nh->fib_nh_dev)
|
|
+ fib_nhc = fib_info_nhc(res.fi, 0);
|
|
+ if (dev == fib_nhc->nhc_dev)
|
|
reachable = true;
|
|
}
|
|
|
|
@@ -324,7 +324,7 @@ prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n)
|
|
if (info->family == AF_INET) {
|
|
fen4_info = container_of(info, struct fib_entry_notifier_info,
|
|
info);
|
|
- return &fib_info_nh(fen4_info->fi, n)->nh_common;
|
|
+ return fib_info_nhc(fen4_info->fi, n);
|
|
} else if (info->family == AF_INET6) {
|
|
fen6_info = container_of(info, struct fib6_entry_notifier_info,
|
|
info);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
|
|
index d3a3fe4ce6702..7d9bbb494d95b 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
|
|
@@ -574,7 +574,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
|
|
for (i = 0; i < ldev->ports; i++) {
|
|
for (j = 0; j < ldev->buckets; j++) {
|
|
idx = i * ldev->buckets + j;
|
|
- if (ldev->v2p_map[i] == ports[i])
|
|
+ if (ldev->v2p_map[idx] == ports[idx])
|
|
continue;
|
|
|
|
dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev,
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
index d7ddfc489536e..2ac255bb918ba 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
@@ -198,10 +198,15 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
|
|
clock = container_of(timer, struct mlx5_clock, timer);
|
|
mdev = container_of(clock, struct mlx5_core_dev, clock);
|
|
|
|
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
|
+ goto out;
|
|
+
|
|
write_seqlock_irqsave(&clock->lock, flags);
|
|
timecounter_read(&timer->tc);
|
|
mlx5_update_clock_info_page(mdev);
|
|
write_sequnlock_irqrestore(&clock->lock, flags);
|
|
+
|
|
+out:
|
|
schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
index 077204929fe4a..6ab0642e9de78 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
@@ -1794,7 +1794,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
|
|
|
|
mlx5_enter_error_state(dev, false);
|
|
mlx5_error_sw_reset(dev);
|
|
- mlx5_unload_one(dev, true);
|
|
+ mlx5_unload_one(dev, false);
|
|
mlx5_drain_health_wq(dev);
|
|
mlx5_pci_disable_device(dev);
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
|
|
index 20d7662c10fb6..5f2195e65dd62 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
|
|
@@ -264,8 +264,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
|
|
host_total_vfs = MLX5_GET(query_esw_functions_out, out,
|
|
host_params_context.host_total_vfs);
|
|
kvfree(out);
|
|
- if (host_total_vfs)
|
|
- return host_total_vfs;
|
|
+ return host_total_vfs;
|
|
}
|
|
|
|
done:
|
|
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
|
|
index 27a0f3af8aab4..4f4204432aaa3 100644
|
|
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
|
|
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
|
|
@@ -8,6 +8,7 @@
|
|
#include <linux/ethtool.h>
|
|
#include <linux/filter.h>
|
|
#include <linux/mm.h>
|
|
+#include <linux/pci.h>
|
|
|
|
#include <net/checksum.h>
|
|
#include <net/ip6_checksum.h>
|
|
@@ -1972,9 +1973,12 @@ int mana_attach(struct net_device *ndev)
|
|
static int mana_dealloc_queues(struct net_device *ndev)
|
|
{
|
|
struct mana_port_context *apc = netdev_priv(ndev);
|
|
+ unsigned long timeout = jiffies + 120 * HZ;
|
|
struct gdma_dev *gd = apc->ac->gdma_dev;
|
|
struct mana_txq *txq;
|
|
+ struct sk_buff *skb;
|
|
int i, err;
|
|
+ u32 tsleep;
|
|
|
|
if (apc->port_is_up)
|
|
return -EINVAL;
|
|
@@ -1990,15 +1994,40 @@ static int mana_dealloc_queues(struct net_device *ndev)
|
|
* to false, but it doesn't matter since mana_start_xmit() drops any
|
|
* new packets due to apc->port_is_up being false.
|
|
*
|
|
- * Drain all the in-flight TX packets
|
|
+ * Drain all the in-flight TX packets.
|
|
+ * A timeout of 120 seconds for all the queues is used.
|
|
+ * This will break the while loop when h/w is not responding.
|
|
+ * This value of 120 has been decided here considering max
|
|
+ * number of queues.
|
|
*/
|
|
+
|
|
for (i = 0; i < apc->num_queues; i++) {
|
|
txq = &apc->tx_qp[i].txq;
|
|
-
|
|
- while (atomic_read(&txq->pending_sends) > 0)
|
|
- usleep_range(1000, 2000);
|
|
+ tsleep = 1000;
|
|
+ while (atomic_read(&txq->pending_sends) > 0 &&
|
|
+ time_before(jiffies, timeout)) {
|
|
+ usleep_range(tsleep, tsleep + 1000);
|
|
+ tsleep <<= 1;
|
|
+ }
|
|
+ if (atomic_read(&txq->pending_sends)) {
|
|
+ err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
|
|
+ if (err) {
|
|
+ netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
|
|
+ err, atomic_read(&txq->pending_sends),
|
|
+ txq->gdma_txq_id);
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
}
|
|
|
|
+ for (i = 0; i < apc->num_queues; i++) {
|
|
+ txq = &apc->tx_qp[i].txq;
|
|
+ while ((skb = skb_dequeue(&txq->pending_skbs))) {
|
|
+ mana_unmap_skb(skb, apc);
|
|
+ dev_kfree_skb_any(skb);
|
|
+ }
|
|
+ atomic_set(&txq->pending_sends, 0);
|
|
+ }
|
|
/* We're 100% sure the queues can no longer be woken up, because
|
|
* we're sure now mana_poll_tx_cq() can't be running.
|
|
*/
|
|
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
|
|
index 983cabf9a0f67..6a7965ed63001 100644
|
|
--- a/drivers/net/macsec.c
|
|
+++ b/drivers/net/macsec.c
|
|
@@ -743,7 +743,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
rxsc_stats->stats.InPktsLate++;
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
|
- secy->netdev->stats.rx_dropped++;
|
|
+ DEV_STATS_INC(secy->netdev, rx_dropped);
|
|
return false;
|
|
}
|
|
|
|
@@ -767,7 +767,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
|
|
rxsc_stats->stats.InPktsNotValid++;
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
|
this_cpu_inc(rx_sa->stats->InPktsNotValid);
|
|
- secy->netdev->stats.rx_errors++;
|
|
+ DEV_STATS_INC(secy->netdev, rx_errors);
|
|
return false;
|
|
}
|
|
|
|
@@ -1059,7 +1059,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
|
|
u64_stats_update_begin(&secy_stats->syncp);
|
|
secy_stats->stats.InPktsNoTag++;
|
|
u64_stats_update_end(&secy_stats->syncp);
|
|
- macsec->secy.netdev->stats.rx_dropped++;
|
|
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
|
|
continue;
|
|
}
|
|
|
|
@@ -1169,7 +1169,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
|
u64_stats_update_begin(&secy_stats->syncp);
|
|
secy_stats->stats.InPktsBadTag++;
|
|
u64_stats_update_end(&secy_stats->syncp);
|
|
- secy->netdev->stats.rx_errors++;
|
|
+ DEV_STATS_INC(secy->netdev, rx_errors);
|
|
goto drop_nosa;
|
|
}
|
|
|
|
@@ -1186,7 +1186,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
rxsc_stats->stats.InPktsNotUsingSA++;
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
|
- secy->netdev->stats.rx_errors++;
|
|
+ DEV_STATS_INC(secy->netdev, rx_errors);
|
|
if (active_rx_sa)
|
|
this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
|
|
goto drop_nosa;
|
|
@@ -1220,7 +1220,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
rxsc_stats->stats.InPktsLate++;
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
|
- macsec->secy.netdev->stats.rx_dropped++;
|
|
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
|
|
goto drop;
|
|
}
|
|
}
|
|
@@ -1261,7 +1261,7 @@ deliver:
|
|
if (ret == NET_RX_SUCCESS)
|
|
count_rx(dev, len);
|
|
else
|
|
- macsec->secy.netdev->stats.rx_dropped++;
|
|
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
|
|
|
|
rcu_read_unlock();
|
|
|
|
@@ -1298,7 +1298,7 @@ nosci:
|
|
u64_stats_update_begin(&secy_stats->syncp);
|
|
secy_stats->stats.InPktsNoSCI++;
|
|
u64_stats_update_end(&secy_stats->syncp);
|
|
- macsec->secy.netdev->stats.rx_errors++;
|
|
+ DEV_STATS_INC(macsec->secy.netdev, rx_errors);
|
|
continue;
|
|
}
|
|
|
|
@@ -1317,7 +1317,7 @@ nosci:
|
|
secy_stats->stats.InPktsUnknownSCI++;
|
|
u64_stats_update_end(&secy_stats->syncp);
|
|
} else {
|
|
- macsec->secy.netdev->stats.rx_dropped++;
|
|
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
|
|
}
|
|
}
|
|
|
|
@@ -3418,7 +3418,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
|
|
|
|
if (!secy->operational) {
|
|
kfree_skb(skb);
|
|
- dev->stats.tx_dropped++;
|
|
+ DEV_STATS_INC(dev, tx_dropped);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
@@ -3426,7 +3426,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
|
|
skb = macsec_encrypt(skb, dev);
|
|
if (IS_ERR(skb)) {
|
|
if (PTR_ERR(skb) != -EINPROGRESS)
|
|
- dev->stats.tx_dropped++;
|
|
+ DEV_STATS_INC(dev, tx_dropped);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
@@ -3663,9 +3663,9 @@ static void macsec_get_stats64(struct net_device *dev,
|
|
|
|
dev_fetch_sw_netstats(s, dev->tstats);
|
|
|
|
- s->rx_dropped = dev->stats.rx_dropped;
|
|
- s->tx_dropped = dev->stats.tx_dropped;
|
|
- s->rx_errors = dev->stats.rx_errors;
|
|
+ s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
|
|
+ s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
|
|
+ s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
|
|
}
|
|
|
|
static int macsec_get_iflink(const struct net_device *dev)
|
|
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
|
|
index d499659075614..61824a463df85 100644
|
|
--- a/drivers/net/phy/at803x.c
|
|
+++ b/drivers/net/phy/at803x.c
|
|
@@ -2087,8 +2087,6 @@ static struct phy_driver at803x_driver[] = {
|
|
.flags = PHY_POLL_CABLE_TEST,
|
|
.config_init = at803x_config_init,
|
|
.link_change_notify = at803x_link_change_notify,
|
|
- .set_wol = at803x_set_wol,
|
|
- .get_wol = at803x_get_wol,
|
|
.suspend = at803x_suspend,
|
|
.resume = at803x_resume,
|
|
/* PHY_BASIC_FEATURES */
|
|
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
|
|
index 228f5f9ef1dde..7544df1ff50ec 100644
|
|
--- a/drivers/net/tun.c
|
|
+++ b/drivers/net/tun.c
|
|
@@ -1588,7 +1588,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
|
|
if (zerocopy)
|
|
return false;
|
|
|
|
- if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
|
|
+ if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
|
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
|
|
return false;
|
|
|
|
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
|
|
index 3e04af4c5daa1..c5cf55030158f 100644
|
|
--- a/drivers/net/vxlan/vxlan_vnifilter.c
|
|
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
|
|
@@ -713,6 +713,12 @@ static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan,
|
|
return vninode;
|
|
}
|
|
|
|
+static void vxlan_vni_free(struct vxlan_vni_node *vninode)
|
|
+{
|
|
+ free_percpu(vninode->stats);
|
|
+ kfree(vninode);
|
|
+}
|
|
+
|
|
static int vxlan_vni_add(struct vxlan_dev *vxlan,
|
|
struct vxlan_vni_group *vg,
|
|
u32 vni, union vxlan_addr *group,
|
|
@@ -740,7 +746,7 @@ static int vxlan_vni_add(struct vxlan_dev *vxlan,
|
|
&vninode->vnode,
|
|
vxlan_vni_rht_params);
|
|
if (err) {
|
|
- kfree(vninode);
|
|
+ vxlan_vni_free(vninode);
|
|
return err;
|
|
}
|
|
|
|
@@ -763,8 +769,7 @@ static void vxlan_vni_node_rcu_free(struct rcu_head *rcu)
|
|
struct vxlan_vni_node *v;
|
|
|
|
v = container_of(rcu, struct vxlan_vni_node, rcu);
|
|
- free_percpu(v->stats);
|
|
- kfree(v);
|
|
+ vxlan_vni_free(v);
|
|
}
|
|
|
|
static int vxlan_vni_del(struct vxlan_dev *vxlan,
|
|
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
|
|
index 5bf7822c53f18..0ba714ca5185c 100644
|
|
--- a/drivers/net/wireguard/allowedips.c
|
|
+++ b/drivers/net/wireguard/allowedips.c
|
|
@@ -6,7 +6,7 @@
|
|
#include "allowedips.h"
|
|
#include "peer.h"
|
|
|
|
-enum { MAX_ALLOWEDIPS_BITS = 128 };
|
|
+enum { MAX_ALLOWEDIPS_DEPTH = 129 };
|
|
|
|
static struct kmem_cache *node_cache;
|
|
|
|
@@ -42,7 +42,7 @@ static void push_rcu(struct allowedips_node **stack,
|
|
struct allowedips_node __rcu *p, unsigned int *len)
|
|
{
|
|
if (rcu_access_pointer(p)) {
|
|
- if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS))
|
|
+ if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH))
|
|
return;
|
|
stack[(*len)++] = rcu_dereference_raw(p);
|
|
}
|
|
@@ -55,7 +55,7 @@ static void node_free_rcu(struct rcu_head *rcu)
|
|
|
|
static void root_free_rcu(struct rcu_head *rcu)
|
|
{
|
|
- struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = {
|
|
+ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = {
|
|
container_of(rcu, struct allowedips_node, rcu) };
|
|
unsigned int len = 1;
|
|
|
|
@@ -68,7 +68,7 @@ static void root_free_rcu(struct rcu_head *rcu)
|
|
|
|
static void root_remove_peer_lists(struct allowedips_node *root)
|
|
{
|
|
- struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root };
|
|
+ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root };
|
|
unsigned int len = 1;
|
|
|
|
while (len > 0 && (node = stack[--len])) {
|
|
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
|
|
index 19eac00b23814..c51c794e70a0e 100644
|
|
--- a/drivers/net/wireguard/selftest/allowedips.c
|
|
+++ b/drivers/net/wireguard/selftest/allowedips.c
|
|
@@ -593,16 +593,20 @@ bool __init wg_allowedips_selftest(void)
|
|
wg_allowedips_remove_by_peer(&t, a, &mutex);
|
|
test_negative(4, a, 192, 168, 0, 1);
|
|
|
|
- /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node
|
|
+ /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node
|
|
* if something goes wrong.
|
|
*/
|
|
- for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) {
|
|
- part = cpu_to_be64(~(1LLU << (i % 64)));
|
|
- memset(&ip, 0xff, 16);
|
|
- memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
|
|
+ for (i = 0; i < 64; ++i) {
|
|
+ part = cpu_to_be64(~0LLU << i);
|
|
+ memset(&ip, 0xff, 8);
|
|
+ memcpy((u8 *)&ip + 8, &part, 8);
|
|
+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
|
|
+ memcpy(&ip, &part, 8);
|
|
+ memset((u8 *)&ip + 8, 0, 8);
|
|
wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
|
|
}
|
|
-
|
|
+ memset(&ip, 0, 16);
|
|
+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
|
|
wg_allowedips_free(&t, &mutex);
|
|
|
|
wg_allowedips_init(&t);
|
|
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
|
|
index 077fddc5fa1ea..4a1c9e18c5301 100644
|
|
--- a/drivers/net/wireless/realtek/rtw89/mac.c
|
|
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
|
|
@@ -2209,7 +2209,7 @@ static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx)
|
|
u32 reg;
|
|
int ret;
|
|
|
|
- if (chip_id != RTL8852A && chip_id != RTL8852B)
|
|
+ if (chip_id != RTL8852B)
|
|
return 0;
|
|
|
|
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
|
|
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
|
|
index ce2e628f94a05..b30269f5e68fb 100644
|
|
--- a/drivers/nvme/host/pci.c
|
|
+++ b/drivers/nvme/host/pci.c
|
|
@@ -3504,7 +3504,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
|
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
|
|
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
|
{ PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */
|
|
- .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
|
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES |
|
|
+ NVME_QUIRK_BOGUS_NID, },
|
|
{ PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */
|
|
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
|
{ PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */
|
|
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
|
|
index 80383213b8828..c478480f54aa2 100644
|
|
--- a/drivers/nvme/host/rdma.c
|
|
+++ b/drivers/nvme/host/rdma.c
|
|
@@ -923,6 +923,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
|
|
goto out_cleanup_tagset;
|
|
|
|
if (!new) {
|
|
+ nvme_start_freeze(&ctrl->ctrl);
|
|
nvme_start_queues(&ctrl->ctrl);
|
|
if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
|
|
/*
|
|
@@ -931,6 +932,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
|
|
* to be safe.
|
|
*/
|
|
ret = -ENODEV;
|
|
+ nvme_unfreeze(&ctrl->ctrl);
|
|
goto out_wait_freeze_timed_out;
|
|
}
|
|
blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
|
|
@@ -980,7 +982,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
|
|
bool remove)
|
|
{
|
|
if (ctrl->ctrl.queue_count > 1) {
|
|
- nvme_start_freeze(&ctrl->ctrl);
|
|
nvme_stop_queues(&ctrl->ctrl);
|
|
nvme_sync_io_queues(&ctrl->ctrl);
|
|
nvme_rdma_stop_io_queues(ctrl);
|
|
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
|
|
index 8f17cbec5a0e4..f2fedd25915f9 100644
|
|
--- a/drivers/nvme/host/tcp.c
|
|
+++ b/drivers/nvme/host/tcp.c
|
|
@@ -1890,6 +1890,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
|
|
goto out_cleanup_connect_q;
|
|
|
|
if (!new) {
|
|
+ nvme_start_freeze(ctrl);
|
|
nvme_start_queues(ctrl);
|
|
if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
|
|
/*
|
|
@@ -1898,6 +1899,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
|
|
* to be safe.
|
|
*/
|
|
ret = -ENODEV;
|
|
+ nvme_unfreeze(ctrl);
|
|
goto out_wait_freeze_timed_out;
|
|
}
|
|
blk_mq_update_nr_hw_queues(ctrl->tagset,
|
|
@@ -2002,7 +2004,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
|
|
if (ctrl->queue_count <= 1)
|
|
return;
|
|
nvme_stop_admin_queue(ctrl);
|
|
- nvme_start_freeze(ctrl);
|
|
nvme_stop_queues(ctrl);
|
|
nvme_sync_io_queues(ctrl);
|
|
nvme_tcp_stop_io_queues(ctrl);
|
|
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
|
|
index 5362f1a7b77c5..7a9c758e95472 100644
|
|
--- a/drivers/platform/x86/serial-multi-instantiate.c
|
|
+++ b/drivers/platform/x86/serial-multi-instantiate.c
|
|
@@ -21,6 +21,7 @@
|
|
#define IRQ_RESOURCE_NONE 0
|
|
#define IRQ_RESOURCE_GPIO 1
|
|
#define IRQ_RESOURCE_APIC 2
|
|
+#define IRQ_RESOURCE_AUTO 3
|
|
|
|
enum smi_bus_type {
|
|
SMI_I2C,
|
|
@@ -52,6 +53,18 @@ static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
|
|
int ret;
|
|
|
|
switch (inst->flags & IRQ_RESOURCE_TYPE) {
|
|
+ case IRQ_RESOURCE_AUTO:
|
|
+ ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
|
|
+ if (ret > 0) {
|
|
+ dev_dbg(&pdev->dev, "Using gpio irq\n");
|
|
+ break;
|
|
+ }
|
|
+ ret = platform_get_irq(pdev, inst->irq_idx);
|
|
+ if (ret > 0) {
|
|
+ dev_dbg(&pdev->dev, "Using platform irq\n");
|
|
+ break;
|
|
+ }
|
|
+ break;
|
|
case IRQ_RESOURCE_GPIO:
|
|
ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
|
|
break;
|
|
@@ -308,10 +321,23 @@ static const struct smi_node int3515_data = {
|
|
|
|
static const struct smi_node cs35l41_hda = {
|
|
.instances = {
|
|
- { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
|
|
- { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
|
|
- { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
|
|
- { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
|
|
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
|
|
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
|
|
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
|
|
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
|
|
+ {}
|
|
+ },
|
|
+ .bus_type = SMI_AUTO_DETECT,
|
|
+};
|
|
+
|
|
+static const struct smi_node cs35l56_hda = {
|
|
+ .instances = {
|
|
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
|
|
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
|
|
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
|
|
+ { "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
|
|
+ /* a 5th entry is an alias address, not a real device */
|
|
+ { "cs35l56-hda_dummy_dev" },
|
|
{}
|
|
},
|
|
.bus_type = SMI_AUTO_DETECT,
|
|
@@ -325,6 +351,7 @@ static const struct acpi_device_id smi_acpi_ids[] = {
|
|
{ "BSG1160", (unsigned long)&bsg1160_data },
|
|
{ "BSG2150", (unsigned long)&bsg2150_data },
|
|
{ "CSC3551", (unsigned long)&cs35l41_hda },
|
|
+ { "CSC3556", (unsigned long)&cs35l56_hda },
|
|
{ "INT3515", (unsigned long)&int3515_data },
|
|
/* Non-conforming _HID for Cirrus Logic already released */
|
|
{ "CLSA0100", (unsigned long)&cs35l41_hda },
|
|
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
|
|
index e1e4f9d108879..857be0f3ae5b9 100644
|
|
--- a/drivers/scsi/53c700.c
|
|
+++ b/drivers/scsi/53c700.c
|
|
@@ -1598,7 +1598,7 @@ NCR_700_intr(int irq, void *dev_id)
|
|
printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
|
|
#endif
|
|
resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
|
|
- } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
|
|
+ } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) &&
|
|
dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
|
|
int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
|
|
int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
|
|
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
|
|
index d82de34f6fd73..e51e92f932fa8 100644
|
|
--- a/drivers/scsi/fnic/fnic.h
|
|
+++ b/drivers/scsi/fnic/fnic.h
|
|
@@ -27,7 +27,7 @@
|
|
|
|
#define DRV_NAME "fnic"
|
|
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
|
-#define DRV_VERSION "1.6.0.54"
|
|
+#define DRV_VERSION "1.6.0.55"
|
|
#define PFX DRV_NAME ": "
|
|
#define DFX DRV_NAME "%d: "
|
|
|
|
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
|
|
index 26dbd347156ef..be89ce96df46c 100644
|
|
--- a/drivers/scsi/fnic/fnic_scsi.c
|
|
+++ b/drivers/scsi/fnic/fnic_scsi.c
|
|
@@ -2139,7 +2139,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
|
|
bool new_sc)
|
|
|
|
{
|
|
- int ret = SUCCESS;
|
|
+ int ret = 0;
|
|
struct fnic_pending_aborts_iter_data iter_data = {
|
|
.fnic = fnic,
|
|
.lun_dev = lr_sc->device,
|
|
@@ -2159,9 +2159,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
|
|
|
|
/* walk again to check, if IOs are still pending in fw */
|
|
if (fnic_is_abts_pending(fnic, lr_sc))
|
|
- ret = FAILED;
|
|
+ ret = 1;
|
|
|
|
clean_pending_aborts_end:
|
|
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
|
+ "%s: exit status: %d\n", __func__, ret);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
|
|
index ecff2ec83a002..c4f293d39f228 100644
|
|
--- a/drivers/scsi/qedf/qedf_main.c
|
|
+++ b/drivers/scsi/qedf/qedf_main.c
|
|
@@ -31,6 +31,7 @@ static void qedf_remove(struct pci_dev *pdev);
|
|
static void qedf_shutdown(struct pci_dev *pdev);
|
|
static void qedf_schedule_recovery_handler(void *dev);
|
|
static void qedf_recovery_handler(struct work_struct *work);
|
|
+static int qedf_suspend(struct pci_dev *pdev, pm_message_t state);
|
|
|
|
/*
|
|
* Driver module parameters.
|
|
@@ -3276,6 +3277,7 @@ static struct pci_driver qedf_pci_driver = {
|
|
.probe = qedf_probe,
|
|
.remove = qedf_remove,
|
|
.shutdown = qedf_shutdown,
|
|
+ .suspend = qedf_suspend,
|
|
};
|
|
|
|
static int __qedf_probe(struct pci_dev *pdev, int mode)
|
|
@@ -4005,6 +4007,22 @@ static void qedf_shutdown(struct pci_dev *pdev)
|
|
__qedf_remove(pdev, QEDF_MODE_NORMAL);
|
|
}
|
|
|
|
+static int qedf_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
+{
|
|
+ struct qedf_ctx *qedf;
|
|
+
|
|
+ if (!pdev) {
|
|
+ QEDF_ERR(NULL, "pdev is NULL.\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ qedf = pci_get_drvdata(pdev);
|
|
+
|
|
+ QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
|
|
+
|
|
+ return -EPERM;
|
|
+}
|
|
+
|
|
/*
|
|
* Recovery handler code
|
|
*/
|
|
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
|
|
index f530bb0364939..9fd68d362698f 100644
|
|
--- a/drivers/scsi/qedi/qedi_main.c
|
|
+++ b/drivers/scsi/qedi/qedi_main.c
|
|
@@ -69,6 +69,7 @@ static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
|
|
static void qedi_recovery_handler(struct work_struct *work);
|
|
static void qedi_schedule_hw_err_handler(void *dev,
|
|
enum qed_hw_err_type err_type);
|
|
+static int qedi_suspend(struct pci_dev *pdev, pm_message_t state);
|
|
|
|
static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
|
|
{
|
|
@@ -2510,6 +2511,22 @@ static void qedi_shutdown(struct pci_dev *pdev)
|
|
__qedi_remove(pdev, QEDI_MODE_SHUTDOWN);
|
|
}
|
|
|
|
+static int qedi_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
+{
|
|
+ struct qedi_ctx *qedi;
|
|
+
|
|
+ if (!pdev) {
|
|
+ QEDI_ERR(NULL, "pdev is NULL.\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ qedi = pci_get_drvdata(pdev);
|
|
+
|
|
+ QEDI_ERR(&qedi->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
|
|
+
|
|
+ return -EPERM;
|
|
+}
|
|
+
|
|
static int __qedi_probe(struct pci_dev *pdev, int mode)
|
|
{
|
|
struct qedi_ctx *qedi;
|
|
@@ -2868,6 +2885,7 @@ static struct pci_driver qedi_pci_driver = {
|
|
.remove = qedi_remove,
|
|
.shutdown = qedi_shutdown,
|
|
.err_handler = &qedi_err_handler,
|
|
+ .suspend = qedi_suspend,
|
|
};
|
|
|
|
static int __init qedi_init(void)
|
|
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
|
|
index 898a0bdf8df67..711252e52d8e1 100644
|
|
--- a/drivers/scsi/raid_class.c
|
|
+++ b/drivers/scsi/raid_class.c
|
|
@@ -248,6 +248,7 @@ int raid_component_add(struct raid_template *r,struct device *raid_dev,
|
|
return 0;
|
|
|
|
err_out:
|
|
+ put_device(&rc->dev);
|
|
list_del(&rc->node);
|
|
rd->component_count--;
|
|
put_device(component_dev);
|
|
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
|
|
index 95aee1ad13834..7184e169a4a51 100644
|
|
--- a/drivers/scsi/scsi_proc.c
|
|
+++ b/drivers/scsi/scsi_proc.c
|
|
@@ -311,7 +311,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
|
|
size_t length, loff_t *ppos)
|
|
{
|
|
int host, channel, id, lun;
|
|
- char *buffer, *p;
|
|
+ char *buffer, *end, *p;
|
|
int err;
|
|
|
|
if (!buf || length > PAGE_SIZE)
|
|
@@ -326,10 +326,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
|
|
goto out;
|
|
|
|
err = -EINVAL;
|
|
- if (length < PAGE_SIZE)
|
|
- buffer[length] = '\0';
|
|
- else if (buffer[PAGE_SIZE-1])
|
|
- goto out;
|
|
+ if (length < PAGE_SIZE) {
|
|
+ end = buffer + length;
|
|
+ *end = '\0';
|
|
+ } else {
|
|
+ end = buffer + PAGE_SIZE - 1;
|
|
+ if (*end)
|
|
+ goto out;
|
|
+ }
|
|
|
|
/*
|
|
* Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
|
|
@@ -338,10 +342,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
|
|
if (!strncmp("scsi add-single-device", buffer, 22)) {
|
|
p = buffer + 23;
|
|
|
|
- host = simple_strtoul(p, &p, 0);
|
|
- channel = simple_strtoul(p + 1, &p, 0);
|
|
- id = simple_strtoul(p + 1, &p, 0);
|
|
- lun = simple_strtoul(p + 1, &p, 0);
|
|
+ host = (p < end) ? simple_strtoul(p, &p, 0) : 0;
|
|
+ channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
|
|
+ id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
|
|
+ lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
|
|
|
|
err = scsi_add_single_device(host, channel, id, lun);
|
|
|
|
@@ -352,10 +356,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
|
|
} else if (!strncmp("scsi remove-single-device", buffer, 25)) {
|
|
p = buffer + 26;
|
|
|
|
- host = simple_strtoul(p, &p, 0);
|
|
- channel = simple_strtoul(p + 1, &p, 0);
|
|
- id = simple_strtoul(p + 1, &p, 0);
|
|
- lun = simple_strtoul(p + 1, &p, 0);
|
|
+ host = (p < end) ? simple_strtoul(p, &p, 0) : 0;
|
|
+ channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
|
|
+ id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
|
|
+ lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
|
|
|
|
err = scsi_remove_single_device(host, channel, id, lun);
|
|
}
|
|
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
|
|
index 8fbf3c1b1311d..cd27562ec922e 100644
|
|
--- a/drivers/scsi/snic/snic_disc.c
|
|
+++ b/drivers/scsi/snic/snic_disc.c
|
|
@@ -303,6 +303,7 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
|
|
"Snic Tgt: device_add, with err = %d\n",
|
|
ret);
|
|
|
|
+ put_device(&tgt->dev);
|
|
put_device(&snic->shost->shost_gendev);
|
|
spin_lock_irqsave(snic->shost->host_lock, flags);
|
|
list_del(&tgt->list);
|
|
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
|
|
index 54a1b8514f04b..83d09c2009280 100644
|
|
--- a/drivers/scsi/storvsc_drv.c
|
|
+++ b/drivers/scsi/storvsc_drv.c
|
|
@@ -1670,10 +1670,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
|
|
*/
|
|
static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
|
|
{
|
|
-#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
|
|
- if (scmnd->device->host->transportt == fc_transport_template)
|
|
- return fc_eh_timed_out(scmnd);
|
|
-#endif
|
|
return BLK_EH_RESET_TIMER;
|
|
}
|
|
|
|
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
|
|
index f8a5e79ed3b4e..ab0652d8705ac 100644
|
|
--- a/drivers/ufs/host/ufs-renesas.c
|
|
+++ b/drivers/ufs/host/ufs-renesas.c
|
|
@@ -359,7 +359,7 @@ static int ufs_renesas_init(struct ufs_hba *hba)
|
|
{
|
|
struct ufs_renesas_priv *priv;
|
|
|
|
- priv = devm_kmalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
|
|
+ priv = devm_kzalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
|
|
if (!priv)
|
|
return -ENOMEM;
|
|
ufshcd_set_variant(hba, priv);
|
|
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
|
|
index e20874caba363..3f5180d64931b 100644
|
|
--- a/drivers/usb/common/usb-conn-gpio.c
|
|
+++ b/drivers/usb/common/usb-conn-gpio.c
|
|
@@ -42,6 +42,7 @@ struct usb_conn_info {
|
|
|
|
struct power_supply_desc desc;
|
|
struct power_supply *charger;
|
|
+ bool initial_detection;
|
|
};
|
|
|
|
/*
|
|
@@ -86,11 +87,13 @@ static void usb_conn_detect_cable(struct work_struct *work)
|
|
dev_dbg(info->dev, "role %s -> %s, gpios: id %d, vbus %d\n",
|
|
usb_role_string(info->last_role), usb_role_string(role), id, vbus);
|
|
|
|
- if (info->last_role == role) {
|
|
+ if (!info->initial_detection && info->last_role == role) {
|
|
dev_warn(info->dev, "repeated role: %s\n", usb_role_string(role));
|
|
return;
|
|
}
|
|
|
|
+ info->initial_detection = false;
|
|
+
|
|
if (info->last_role == USB_ROLE_HOST && info->vbus)
|
|
regulator_disable(info->vbus);
|
|
|
|
@@ -258,6 +261,7 @@ static int usb_conn_probe(struct platform_device *pdev)
|
|
device_set_wakeup_capable(&pdev->dev, true);
|
|
|
|
/* Perform initial detection */
|
|
+ info->initial_detection = true;
|
|
usb_conn_queue_dwork(info, 0);
|
|
|
|
return 0;
|
|
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
|
index c3590a0c42035..137602d9076fd 100644
|
|
--- a/drivers/usb/dwc3/gadget.c
|
|
+++ b/drivers/usb/dwc3/gadget.c
|
|
@@ -4342,9 +4342,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
|
|
u32 count;
|
|
|
|
if (pm_runtime_suspended(dwc->dev)) {
|
|
+ dwc->pending_events = true;
|
|
+ /*
|
|
+ * Trigger runtime resume. The get() function will be balanced
|
|
+ * after processing the pending events in dwc3_process_pending
|
|
+ * events().
|
|
+ */
|
|
pm_runtime_get(dwc->dev);
|
|
disable_irq_nosync(dwc->irq_gadget);
|
|
- dwc->pending_events = true;
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
@@ -4609,6 +4614,8 @@ void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
|
|
{
|
|
if (dwc->pending_events) {
|
|
dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
|
|
+ dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
|
|
+ pm_runtime_put(dwc->dev);
|
|
dwc->pending_events = false;
|
|
enable_irq(dwc->irq_gadget);
|
|
}
|
|
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
|
|
index 139f471894fb5..316e9cc3987be 100644
|
|
--- a/drivers/usb/gadget/udc/core.c
|
|
+++ b/drivers/usb/gadget/udc/core.c
|
|
@@ -795,6 +795,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
|
|
* usb_gadget_activate() is called. For example, user mode components may
|
|
* need to be activated before the system can talk to hosts.
|
|
*
|
|
+ * This routine may sleep; it must not be called in interrupt context
|
|
+ * (such as from within a gadget driver's disconnect() callback).
|
|
+ *
|
|
* Returns zero on success, else negative errno.
|
|
*/
|
|
int usb_gadget_deactivate(struct usb_gadget *gadget)
|
|
@@ -833,6 +836,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_deactivate);
|
|
* This routine activates gadget which was previously deactivated with
|
|
* usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
|
|
*
|
|
+ * This routine may sleep; it must not be called in interrupt context.
|
|
+ *
|
|
* Returns zero on success, else negative errno.
|
|
*/
|
|
int usb_gadget_activate(struct usb_gadget *gadget)
|
|
@@ -1611,7 +1616,11 @@ static void gadget_unbind_driver(struct device *dev)
|
|
usb_gadget_disable_async_callbacks(udc);
|
|
if (gadget->irq)
|
|
synchronize_irq(gadget->irq);
|
|
+ mutex_unlock(&udc->connect_lock);
|
|
+
|
|
udc->driver->unbind(gadget);
|
|
+
|
|
+ mutex_lock(&udc->connect_lock);
|
|
usb_gadget_udc_stop_locked(udc);
|
|
mutex_unlock(&udc->connect_lock);
|
|
|
|
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
|
|
index 5e912dd29b4c9..115f05a6201a1 100644
|
|
--- a/drivers/usb/storage/alauda.c
|
|
+++ b/drivers/usb/storage/alauda.c
|
|
@@ -318,7 +318,8 @@ static int alauda_get_media_status(struct us_data *us, unsigned char *data)
|
|
rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe,
|
|
command, 0xc0, 0, 1, data, 2);
|
|
|
|
- usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
|
|
+ if (rc == USB_STOR_XFER_GOOD)
|
|
+ usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
|
|
|
|
return rc;
|
|
}
|
|
@@ -454,9 +455,14 @@ static int alauda_init_media(struct us_data *us)
|
|
static int alauda_check_media(struct us_data *us)
|
|
{
|
|
struct alauda_info *info = (struct alauda_info *) us->extra;
|
|
- unsigned char status[2];
|
|
+ unsigned char *status = us->iobuf;
|
|
+ int rc;
|
|
|
|
- alauda_get_media_status(us, status);
|
|
+ rc = alauda_get_media_status(us, status);
|
|
+ if (rc != USB_STOR_XFER_GOOD) {
|
|
+ status[0] = 0xF0; /* Pretend there's no media */
|
|
+ status[1] = 0;
|
|
+ }
|
|
|
|
/* Check for no media or door open */
|
|
if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10)
|
|
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
|
|
index 7cdf83f4c811b..7a3caf556dae9 100644
|
|
--- a/drivers/usb/typec/altmodes/displayport.c
|
|
+++ b/drivers/usb/typec/altmodes/displayport.c
|
|
@@ -60,6 +60,7 @@ struct dp_altmode {
|
|
|
|
enum dp_state state;
|
|
bool hpd;
|
|
+ bool pending_hpd;
|
|
|
|
struct mutex lock; /* device lock */
|
|
struct work_struct work;
|
|
@@ -144,8 +145,13 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
|
|
dp->state = DP_STATE_EXIT;
|
|
} else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) {
|
|
ret = dp_altmode_configure(dp, con);
|
|
- if (!ret)
|
|
+ if (!ret) {
|
|
dp->state = DP_STATE_CONFIGURE;
|
|
+ if (dp->hpd != hpd) {
|
|
+ dp->hpd = hpd;
|
|
+ dp->pending_hpd = true;
|
|
+ }
|
|
+ }
|
|
} else {
|
|
if (dp->hpd != hpd) {
|
|
drm_connector_oob_hotplug_event(dp->connector_fwnode);
|
|
@@ -160,6 +166,16 @@ static int dp_altmode_configured(struct dp_altmode *dp)
|
|
{
|
|
sysfs_notify(&dp->alt->dev.kobj, "displayport", "configuration");
|
|
sysfs_notify(&dp->alt->dev.kobj, "displayport", "pin_assignment");
|
|
+ /*
|
|
+ * If the DFP_D/UFP_D sends a change in HPD when first notifying the
|
|
+ * DisplayPort driver that it is connected, then we wait until
|
|
+ * configuration is complete to signal HPD.
|
|
+ */
|
|
+ if (dp->pending_hpd) {
|
|
+ drm_connector_oob_hotplug_event(dp->connector_fwnode);
|
|
+ sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
|
|
+ dp->pending_hpd = false;
|
|
+ }
|
|
|
|
return dp_altmode_notify(dp);
|
|
}
|
|
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
|
|
index 524099634a1d4..d5950ef9d1f35 100644
|
|
--- a/drivers/usb/typec/tcpm/tcpm.c
|
|
+++ b/drivers/usb/typec/tcpm/tcpm.c
|
|
@@ -5322,6 +5322,10 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
|
|
/* Do nothing, vbus drop expected */
|
|
break;
|
|
|
|
+ case SNK_HARD_RESET_WAIT_VBUS:
|
|
+ /* Do nothing, its OK to receive vbus off events */
|
|
+ break;
|
|
+
|
|
default:
|
|
if (port->pwr_role == TYPEC_SINK && port->attached)
|
|
tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
|
|
@@ -5368,6 +5372,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
|
|
case SNK_DEBOUNCED:
|
|
/*Do nothing, still waiting for VSAFE5V for connect */
|
|
break;
|
|
+ case SNK_HARD_RESET_WAIT_VBUS:
|
|
+ /* Do nothing, its OK to receive vbus off events */
|
|
+ break;
|
|
default:
|
|
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
|
|
tcpm_set_state(port, SNK_UNATTACHED, 0);
|
|
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
|
|
index d7aad5e8ee377..3495bc775afa3 100644
|
|
--- a/fs/btrfs/block-group.c
|
|
+++ b/fs/btrfs/block-group.c
|
|
@@ -436,13 +436,23 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
|
|
u64 num_bytes)
|
|
{
|
|
struct btrfs_caching_control *caching_ctl;
|
|
+ int progress;
|
|
|
|
caching_ctl = btrfs_get_caching_control(cache);
|
|
if (!caching_ctl)
|
|
return;
|
|
|
|
+ /*
|
|
+ * We've already failed to allocate from this block group, so even if
|
|
+ * there's enough space in the block group it isn't contiguous enough to
|
|
+ * allow for an allocation, so wait for at least the next wakeup tick,
|
|
+ * or for the thing to be done.
|
|
+ */
|
|
+ progress = atomic_read(&caching_ctl->progress);
|
|
+
|
|
wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
|
|
- (cache->free_space_ctl->free_space >= num_bytes));
|
|
+ (progress != atomic_read(&caching_ctl->progress) &&
|
|
+ (cache->free_space_ctl->free_space >= num_bytes)));
|
|
|
|
btrfs_put_caching_control(caching_ctl);
|
|
}
|
|
@@ -660,8 +670,10 @@ next:
|
|
|
|
if (total_found > CACHING_CTL_WAKE_UP) {
|
|
total_found = 0;
|
|
- if (wakeup)
|
|
+ if (wakeup) {
|
|
+ atomic_inc(&caching_ctl->progress);
|
|
wake_up(&caching_ctl->wait);
|
|
+ }
|
|
}
|
|
}
|
|
path->slots[0]++;
|
|
@@ -767,6 +779,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
|
|
init_waitqueue_head(&caching_ctl->wait);
|
|
caching_ctl->block_group = cache;
|
|
refcount_set(&caching_ctl->count, 2);
|
|
+ atomic_set(&caching_ctl->progress, 0);
|
|
btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
|
|
|
|
spin_lock(&cache->lock);
|
|
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
|
|
index 0a3d386823583..debd42aeae0f1 100644
|
|
--- a/fs/btrfs/block-group.h
|
|
+++ b/fs/btrfs/block-group.h
|
|
@@ -70,6 +70,8 @@ struct btrfs_caching_control {
|
|
wait_queue_head_t wait;
|
|
struct btrfs_work work;
|
|
struct btrfs_block_group *block_group;
|
|
+ /* Track progress of caching during allocation. */
|
|
+ atomic_t progress;
|
|
refcount_t count;
|
|
};
|
|
|
|
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
|
|
index f22e00dfec6c4..96369c44863a1 100644
|
|
--- a/fs/btrfs/disk-io.c
|
|
+++ b/fs/btrfs/disk-io.c
|
|
@@ -1455,7 +1455,8 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
|
|
goto fail;
|
|
|
|
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
|
|
- !btrfs_is_data_reloc_root(root)) {
|
|
+ !btrfs_is_data_reloc_root(root) &&
|
|
+ is_fstree(root->root_key.objectid)) {
|
|
set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
|
|
btrfs_check_and_init_root_item(&root->root_item);
|
|
}
|
|
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
|
|
index 571fcc5ae4dcf..f2ee70c03f0d5 100644
|
|
--- a/fs/btrfs/extent-tree.c
|
|
+++ b/fs/btrfs/extent-tree.c
|
|
@@ -4411,8 +4411,11 @@ have_block_group:
|
|
ret = 0;
|
|
}
|
|
|
|
- if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
|
|
+ if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) {
|
|
+ if (!cache_block_group_error)
|
|
+ cache_block_group_error = -EIO;
|
|
goto loop;
|
|
+ }
|
|
|
|
bg_ret = NULL;
|
|
ret = do_allocation(block_group, ffe_ctl, &bg_ret);
|
|
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
|
|
index 58785dc7080ad..0ad69041954ff 100644
|
|
--- a/fs/btrfs/extent_io.c
|
|
+++ b/fs/btrfs/extent_io.c
|
|
@@ -3015,11 +3015,12 @@ retry:
|
|
}
|
|
|
|
/*
|
|
- * the filesystem may choose to bump up nr_to_write.
|
|
+ * The filesystem may choose to bump up nr_to_write.
|
|
* We have to make sure to honor the new nr_to_write
|
|
- * at any time
|
|
+ * at any time.
|
|
*/
|
|
- nr_to_write_done = wbc->nr_to_write <= 0;
|
|
+ nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
|
|
+ wbc->nr_to_write <= 0);
|
|
}
|
|
pagevec_release(&pvec);
|
|
cond_resched();
|
|
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
|
|
index 3c48273cd7a5a..28bcba2e05908 100644
|
|
--- a/fs/btrfs/inode.c
|
|
+++ b/fs/btrfs/inode.c
|
|
@@ -1429,8 +1429,6 @@ out_unlock:
|
|
clear_bits,
|
|
page_ops);
|
|
start += cur_alloc_size;
|
|
- if (start >= end)
|
|
- return ret;
|
|
}
|
|
|
|
/*
|
|
@@ -1439,9 +1437,11 @@ out_unlock:
|
|
* space_info's bytes_may_use counter, reserved in
|
|
* btrfs_check_data_free_space().
|
|
*/
|
|
- extent_clear_unlock_delalloc(inode, start, end, locked_page,
|
|
- clear_bits | EXTENT_CLEAR_DATA_RESV,
|
|
- page_ops);
|
|
+ if (start < end) {
|
|
+ clear_bits |= EXTENT_CLEAR_DATA_RESV;
|
|
+ extent_clear_unlock_delalloc(inode, start, end, locked_page,
|
|
+ clear_bits, page_ops);
|
|
+ }
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
|
|
index 666a37a0ee897..d3591c7f166ad 100644
|
|
--- a/fs/btrfs/relocation.c
|
|
+++ b/fs/btrfs/relocation.c
|
|
@@ -1902,7 +1902,39 @@ again:
|
|
err = PTR_ERR(root);
|
|
break;
|
|
}
|
|
- ASSERT(root->reloc_root == reloc_root);
|
|
+
|
|
+ if (unlikely(root->reloc_root != reloc_root)) {
|
|
+ if (root->reloc_root) {
|
|
+ btrfs_err(fs_info,
|
|
+"reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu",
|
|
+ root->root_key.objectid,
|
|
+ root->reloc_root->root_key.objectid,
|
|
+ root->reloc_root->root_key.type,
|
|
+ root->reloc_root->root_key.offset,
|
|
+ btrfs_root_generation(
|
|
+ &root->reloc_root->root_item),
|
|
+ reloc_root->root_key.objectid,
|
|
+ reloc_root->root_key.type,
|
|
+ reloc_root->root_key.offset,
|
|
+ btrfs_root_generation(
|
|
+ &reloc_root->root_item));
|
|
+ } else {
|
|
+ btrfs_err(fs_info,
|
|
+"reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu",
|
|
+ root->root_key.objectid,
|
|
+ reloc_root->root_key.objectid,
|
|
+ reloc_root->root_key.type,
|
|
+ reloc_root->root_key.offset,
|
|
+ btrfs_root_generation(
|
|
+ &reloc_root->root_item));
|
|
+ }
|
|
+ list_add(&reloc_root->root_list, &reloc_roots);
|
|
+ btrfs_put_root(root);
|
|
+ btrfs_abort_transaction(trans, -EUCLEAN);
|
|
+ if (!err)
|
|
+ err = -EUCLEAN;
|
|
+ break;
|
|
+ }
|
|
|
|
/*
|
|
* set reference count to 1, so btrfs_recover_relocation
|
|
@@ -1975,7 +2007,7 @@ again:
|
|
root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
|
|
false);
|
|
if (btrfs_root_refs(&reloc_root->root_item) > 0) {
|
|
- if (IS_ERR(root)) {
|
|
+ if (WARN_ON(IS_ERR(root))) {
|
|
/*
|
|
* For recovery we read the fs roots on mount,
|
|
* and if we didn't find the root then we marked
|
|
@@ -1984,17 +2016,14 @@ again:
|
|
* memory. However there's no reason we can't
|
|
* handle the error properly here just in case.
|
|
*/
|
|
- ASSERT(0);
|
|
ret = PTR_ERR(root);
|
|
goto out;
|
|
}
|
|
- if (root->reloc_root != reloc_root) {
|
|
+ if (WARN_ON(root->reloc_root != reloc_root)) {
|
|
/*
|
|
- * This is actually impossible without something
|
|
- * going really wrong (like weird race condition
|
|
- * or cosmic rays).
|
|
+ * This can happen if on-disk metadata has some
|
|
+ * corruption, e.g. bad reloc tree key offset.
|
|
*/
|
|
- ASSERT(0);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
|
|
index 43f905ab0a18d..2b39c7f9226fe 100644
|
|
--- a/fs/btrfs/tree-checker.c
|
|
+++ b/fs/btrfs/tree-checker.c
|
|
@@ -442,6 +442,20 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
|
|
btrfs_item_key_to_cpu(leaf, &item_key, slot);
|
|
is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY);
|
|
|
|
+ /*
|
|
+ * Bad rootid for reloc trees.
|
|
+ *
|
|
+ * Reloc trees are only for subvolume trees, other trees only need
|
|
+ * to be COWed to be relocated.
|
|
+ */
|
|
+ if (unlikely(is_root_item && key->objectid == BTRFS_TREE_RELOC_OBJECTID &&
|
|
+ !is_fstree(key->offset))) {
|
|
+ generic_err(leaf, slot,
|
|
+ "invalid reloc tree for root %lld, root id is not a subvolume tree",
|
|
+ key->offset);
|
|
+ return -EUCLEAN;
|
|
+ }
|
|
+
|
|
/* No such tree id */
|
|
if (unlikely(key->objectid == 0)) {
|
|
if (is_root_item)
|
|
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
|
|
index f6e44efb58e15..d4c0895a88116 100644
|
|
--- a/fs/nilfs2/inode.c
|
|
+++ b/fs/nilfs2/inode.c
|
|
@@ -1101,9 +1101,17 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
|
|
|
|
int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
|
|
{
|
|
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
|
struct buffer_head *ibh;
|
|
int err;
|
|
|
|
+ /*
|
|
+ * Do not dirty inodes after the log writer has been detached
|
|
+ * and its nilfs_root struct has been freed.
|
|
+ */
|
|
+ if (unlikely(nilfs_purging(nilfs)))
|
|
+ return 0;
|
|
+
|
|
err = nilfs_load_inode_block(inode, &ibh);
|
|
if (unlikely(err)) {
|
|
nilfs_warn(inode->i_sb,
|
|
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
|
|
index 6cf64023be31e..21e8260112c8f 100644
|
|
--- a/fs/nilfs2/segment.c
|
|
+++ b/fs/nilfs2/segment.c
|
|
@@ -2843,6 +2843,7 @@ void nilfs_detach_log_writer(struct super_block *sb)
|
|
nilfs_segctor_destroy(nilfs->ns_writer);
|
|
nilfs->ns_writer = NULL;
|
|
}
|
|
+ set_nilfs_purging(nilfs);
|
|
|
|
/* Force to free the list of dirty files */
|
|
spin_lock(&nilfs->ns_inode_lock);
|
|
@@ -2855,4 +2856,5 @@ void nilfs_detach_log_writer(struct super_block *sb)
|
|
up_write(&nilfs->ns_segctor_sem);
|
|
|
|
nilfs_dispose_list(nilfs, &garbage_list, 1);
|
|
+ clear_nilfs_purging(nilfs);
|
|
}
|
|
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
|
|
index 47c7dfbb7ea58..cd4ae1b8ae165 100644
|
|
--- a/fs/nilfs2/the_nilfs.h
|
|
+++ b/fs/nilfs2/the_nilfs.h
|
|
@@ -29,6 +29,7 @@ enum {
|
|
THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */
|
|
THE_NILFS_GC_RUNNING, /* gc process is running */
|
|
THE_NILFS_SB_DIRTY, /* super block is dirty */
|
|
+ THE_NILFS_PURGING, /* disposing dirty files for cleanup */
|
|
};
|
|
|
|
/**
|
|
@@ -208,6 +209,7 @@ THE_NILFS_FNS(INIT, init)
|
|
THE_NILFS_FNS(DISCONTINUED, discontinued)
|
|
THE_NILFS_FNS(GC_RUNNING, gc_running)
|
|
THE_NILFS_FNS(SB_DIRTY, sb_dirty)
|
|
+THE_NILFS_FNS(PURGING, purging)
|
|
|
|
/*
|
|
* Mount option operations
|
|
diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c
|
|
index 33b7e6c4ceffb..e881df1d10cbd 100644
|
|
--- a/fs/smb/server/smb2misc.c
|
|
+++ b/fs/smb/server/smb2misc.c
|
|
@@ -380,13 +380,13 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
|
|
}
|
|
|
|
if (smb2_req_struct_sizes[command] != pdu->StructureSize2) {
|
|
- if (command == SMB2_OPLOCK_BREAK_HE &&
|
|
- le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 &&
|
|
- le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) {
|
|
+ if (!(command == SMB2_OPLOCK_BREAK_HE &&
|
|
+ (le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_20 ||
|
|
+ le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_21))) {
|
|
/* special case for SMB2.1 lease break message */
|
|
ksmbd_debug(SMB,
|
|
- "Illegal request size %d for oplock break\n",
|
|
- le16_to_cpu(pdu->StructureSize2));
|
|
+ "Illegal request size %u for command %d\n",
|
|
+ le16_to_cpu(pdu->StructureSize2), command);
|
|
return 1;
|
|
}
|
|
}
|
|
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
|
|
index 26cf73d664f94..f8ca44622d909 100644
|
|
--- a/fs/smb/server/smb2pdu.c
|
|
+++ b/fs/smb/server/smb2pdu.c
|
|
@@ -2340,9 +2340,16 @@ next:
|
|
break;
|
|
buf_len -= next;
|
|
eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
|
|
- if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))
|
|
+ if (buf_len < sizeof(struct smb2_ea_info)) {
|
|
+ rc = -EINVAL;
|
|
break;
|
|
+ }
|
|
|
|
+ if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
|
|
+ le16_to_cpu(eabuf->EaValueLength)) {
|
|
+ rc = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
} while (next != 0);
|
|
|
|
kfree(attr_name);
|
|
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
|
|
index f98cfe9f188f5..008bfa68cfabc 100644
|
|
--- a/include/linux/cpu.h
|
|
+++ b/include/linux/cpu.h
|
|
@@ -72,6 +72,8 @@ extern ssize_t cpu_show_retbleed(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
+extern ssize_t cpu_show_gds(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf);
|
|
|
|
extern __printf(4, 5)
|
|
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
|
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
|
|
index 054d7911bfc9f..c1637515a8a41 100644
|
|
--- a/include/linux/skmsg.h
|
|
+++ b/include/linux/skmsg.h
|
|
@@ -62,6 +62,7 @@ struct sk_psock_progs {
|
|
|
|
enum sk_psock_state_bits {
|
|
SK_PSOCK_TX_ENABLED,
|
|
+ SK_PSOCK_RX_STRP_ENABLED,
|
|
};
|
|
|
|
struct sk_psock_link {
|
|
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
|
|
index 4e22e4f4cec85..df5cd4245f299 100644
|
|
--- a/include/linux/tpm.h
|
|
+++ b/include/linux/tpm.h
|
|
@@ -282,6 +282,7 @@ enum tpm_chip_flags {
|
|
TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6),
|
|
TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7),
|
|
TPM_CHIP_FLAG_SUSPENDED = BIT(8),
|
|
+ TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9),
|
|
};
|
|
|
|
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
|
|
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
|
|
index e09ff87146c1c..5976545aa26b9 100644
|
|
--- a/include/net/cfg80211.h
|
|
+++ b/include/net/cfg80211.h
|
|
@@ -562,6 +562,9 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
|
|
if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
|
|
return NULL;
|
|
|
|
+ if (iftype == NL80211_IFTYPE_AP_VLAN)
|
|
+ iftype = NL80211_IFTYPE_AP;
|
|
+
|
|
for (i = 0; i < sband->n_iftype_data; i++) {
|
|
const struct ieee80211_sband_iftype_data *data =
|
|
&sband->iftype_data[i];
|
|
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
|
|
index f3a37cacb32c3..c752b6f509791 100644
|
|
--- a/include/net/netfilter/nf_tables.h
|
|
+++ b/include/net/netfilter/nf_tables.h
|
|
@@ -1192,6 +1192,29 @@ int __nft_release_basechain(struct nft_ctx *ctx);
|
|
|
|
unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
|
|
|
|
+static inline bool nft_use_inc(u32 *use)
|
|
+{
|
|
+ if (*use == UINT_MAX)
|
|
+ return false;
|
|
+
|
|
+ (*use)++;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static inline void nft_use_dec(u32 *use)
|
|
+{
|
|
+ WARN_ON_ONCE((*use)-- == 0);
|
|
+}
|
|
+
|
|
+/* For error and abort path: restore use counter to previous state. */
|
|
+static inline void nft_use_inc_restore(u32 *use)
|
|
+{
|
|
+ WARN_ON_ONCE(!nft_use_inc(use));
|
|
+}
|
|
+
|
|
+#define nft_use_dec_restore nft_use_dec
|
|
+
|
|
/**
|
|
* struct nft_table - nf_tables table
|
|
*
|
|
@@ -1275,8 +1298,8 @@ struct nft_object {
|
|
struct list_head list;
|
|
struct rhlist_head rhlhead;
|
|
struct nft_object_hash_key key;
|
|
- u32 genmask:2,
|
|
- use:30;
|
|
+ u32 genmask:2;
|
|
+ u32 use;
|
|
u64 handle;
|
|
u16 udlen;
|
|
u8 *udata;
|
|
@@ -1378,8 +1401,8 @@ struct nft_flowtable {
|
|
char *name;
|
|
int hooknum;
|
|
int ops_len;
|
|
- u32 genmask:2,
|
|
- use:30;
|
|
+ u32 genmask:2;
|
|
+ u32 use;
|
|
u64 handle;
|
|
/* runtime data below here */
|
|
struct list_head hook_list ____cacheline_aligned;
|
|
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
|
|
index 901b440238d5f..0a14c474c6f95 100644
|
|
--- a/include/trace/events/tcp.h
|
|
+++ b/include/trace/events/tcp.h
|
|
@@ -381,6 +381,7 @@ TRACE_EVENT(tcp_cong_state_set,
|
|
__field(const void *, skaddr)
|
|
__field(__u16, sport)
|
|
__field(__u16, dport)
|
|
+ __field(__u16, family)
|
|
__array(__u8, saddr, 4)
|
|
__array(__u8, daddr, 4)
|
|
__array(__u8, saddr_v6, 16)
|
|
@@ -396,6 +397,7 @@ TRACE_EVENT(tcp_cong_state_set,
|
|
|
|
__entry->sport = ntohs(inet->inet_sport);
|
|
__entry->dport = ntohs(inet->inet_dport);
|
|
+ __entry->family = sk->sk_family;
|
|
|
|
p32 = (__be32 *) __entry->saddr;
|
|
*p32 = inet->inet_saddr;
|
|
@@ -409,7 +411,8 @@ TRACE_EVENT(tcp_cong_state_set,
|
|
__entry->cong_state = ca_state;
|
|
),
|
|
|
|
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
|
|
+ TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
|
|
+ show_family_name(__entry->family),
|
|
__entry->sport, __entry->dport,
|
|
__entry->saddr, __entry->daddr,
|
|
__entry->saddr_v6, __entry->daddr_v6,
|
|
diff --git a/io_uring/openclose.c b/io_uring/openclose.c
|
|
index 67178e4bb282d..008990e581806 100644
|
|
--- a/io_uring/openclose.c
|
|
+++ b/io_uring/openclose.c
|
|
@@ -110,9 +110,11 @@ int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
|
|
if (issue_flags & IO_URING_F_NONBLOCK) {
|
|
/*
|
|
* Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
|
|
- * it'll always -EAGAIN
|
|
+ * it'll always -EAGAIN. Note that we test for __O_TMPFILE
|
|
+ * because O_TMPFILE includes O_DIRECTORY, which isn't a flag
|
|
+ * we need to force async for.
|
|
*/
|
|
- if (open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
|
|
+ if (open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE))
|
|
return -EAGAIN;
|
|
op.lookup_flags |= LOOKUP_CACHED;
|
|
op.open_flag |= O_NONBLOCK;
|
|
diff --git a/net/core/filter.c b/net/core/filter.c
|
|
index da71e6812ab51..419ce7c61bd6b 100644
|
|
--- a/net/core/filter.c
|
|
+++ b/net/core/filter.c
|
|
@@ -4064,12 +4064,6 @@ BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
|
|
if (unlikely(data_end > data_hard_end))
|
|
return -EINVAL;
|
|
|
|
- /* ALL drivers MUST init xdp->frame_sz, chicken check below */
|
|
- if (unlikely(xdp->frame_sz > PAGE_SIZE)) {
|
|
- WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
if (unlikely(data_end < xdp->data + ETH_HLEN))
|
|
return -EINVAL;
|
|
|
|
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
|
|
index 65fb6f5b21b28..296e45b6c3c0d 100644
|
|
--- a/net/core/skmsg.c
|
|
+++ b/net/core/skmsg.c
|
|
@@ -1117,13 +1117,19 @@ static void sk_psock_strp_data_ready(struct sock *sk)
|
|
|
|
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
|
|
{
|
|
+ int ret;
|
|
+
|
|
static const struct strp_callbacks cb = {
|
|
.rcv_msg = sk_psock_strp_read,
|
|
.read_sock_done = sk_psock_strp_read_done,
|
|
.parse_msg = sk_psock_strp_parse,
|
|
};
|
|
|
|
- return strp_init(&psock->strp, sk, &cb);
|
|
+ ret = strp_init(&psock->strp, sk, &cb);
|
|
+ if (!ret)
|
|
+ sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
|
|
@@ -1151,7 +1157,7 @@ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
|
|
static void sk_psock_done_strp(struct sk_psock *psock)
|
|
{
|
|
/* Parser has been stopped */
|
|
- if (psock->progs.stream_parser)
|
|
+ if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
|
|
strp_done(&psock->strp);
|
|
}
|
|
#else
|
|
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
|
|
index c84e5073c0b66..96db7409baa12 100644
|
|
--- a/net/core/sock_map.c
|
|
+++ b/net/core/sock_map.c
|
|
@@ -148,13 +148,13 @@ static void sock_map_del_link(struct sock *sk,
|
|
list_for_each_entry_safe(link, tmp, &psock->link, list) {
|
|
if (link->link_raw == link_raw) {
|
|
struct bpf_map *map = link->map;
|
|
- struct bpf_stab *stab = container_of(map, struct bpf_stab,
|
|
- map);
|
|
- if (psock->saved_data_ready && stab->progs.stream_parser)
|
|
+ struct sk_psock_progs *progs = sock_map_progs(map);
|
|
+
|
|
+ if (psock->saved_data_ready && progs->stream_parser)
|
|
strp_stop = true;
|
|
- if (psock->saved_data_ready && stab->progs.stream_verdict)
|
|
+ if (psock->saved_data_ready && progs->stream_verdict)
|
|
verdict_stop = true;
|
|
- if (psock->saved_data_ready && stab->progs.skb_verdict)
|
|
+ if (psock->saved_data_ready && progs->skb_verdict)
|
|
verdict_stop = true;
|
|
list_del(&link->list);
|
|
sk_psock_free_link(link);
|
|
diff --git a/net/dccp/output.c b/net/dccp/output.c
|
|
index b8a24734385ef..fd2eb148d24de 100644
|
|
--- a/net/dccp/output.c
|
|
+++ b/net/dccp/output.c
|
|
@@ -187,7 +187,7 @@ unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
|
|
|
|
/* And store cached results */
|
|
icsk->icsk_pmtu_cookie = pmtu;
|
|
- dp->dccps_mss_cache = cur_mps;
|
|
+ WRITE_ONCE(dp->dccps_mss_cache, cur_mps);
|
|
|
|
return cur_mps;
|
|
}
|
|
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
|
|
index a06b5641287a2..abc02d25edc14 100644
|
|
--- a/net/dccp/proto.c
|
|
+++ b/net/dccp/proto.c
|
|
@@ -627,7 +627,7 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
|
|
return dccp_getsockopt_service(sk, len,
|
|
(__be32 __user *)optval, optlen);
|
|
case DCCP_SOCKOPT_GET_CUR_MPS:
|
|
- val = dp->dccps_mss_cache;
|
|
+ val = READ_ONCE(dp->dccps_mss_cache);
|
|
break;
|
|
case DCCP_SOCKOPT_AVAILABLE_CCIDS:
|
|
return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
|
|
@@ -736,7 +736,7 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|
|
|
trace_dccp_probe(sk, len);
|
|
|
|
- if (len > dp->dccps_mss_cache)
|
|
+ if (len > READ_ONCE(dp->dccps_mss_cache))
|
|
return -EMSGSIZE;
|
|
|
|
lock_sock(sk);
|
|
@@ -769,6 +769,12 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|
goto out_discard;
|
|
}
|
|
|
|
+ /* We need to check dccps_mss_cache after socket is locked. */
|
|
+ if (len > dp->dccps_mss_cache) {
|
|
+ rc = -EMSGSIZE;
|
|
+ goto out_discard;
|
|
+ }
|
|
+
|
|
skb_reserve(skb, sk->sk_prot->max_header);
|
|
rc = memcpy_from_msg(skb_put(skb, len), msg, len);
|
|
if (rc != 0)
|
|
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
|
|
index 92c02c886fe73..586b1b3e35b80 100644
|
|
--- a/net/ipv4/ip_tunnel_core.c
|
|
+++ b/net/ipv4/ip_tunnel_core.c
|
|
@@ -224,7 +224,7 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
|
|
.un.frag.__unused = 0,
|
|
.un.frag.mtu = htons(mtu),
|
|
};
|
|
- icmph->checksum = ip_compute_csum(icmph, len);
|
|
+ icmph->checksum = csum_fold(skb_checksum(skb, 0, len, 0));
|
|
skb_reset_transport_header(skb);
|
|
|
|
niph = skb_push(skb, sizeof(*niph));
|
|
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
|
|
index d8ef05347fd98..9cc2879024541 100644
|
|
--- a/net/ipv4/nexthop.c
|
|
+++ b/net/ipv4/nexthop.c
|
|
@@ -3221,13 +3221,9 @@ static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
|
|
&rtm_dump_nexthop_cb, &filter);
|
|
if (err < 0) {
|
|
if (likely(skb->len))
|
|
- goto out;
|
|
- goto out_err;
|
|
+ err = skb->len;
|
|
}
|
|
|
|
-out:
|
|
- err = skb->len;
|
|
-out_err:
|
|
cb->seq = net->nexthop.seq;
|
|
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
|
|
return err;
|
|
@@ -3367,25 +3363,19 @@ static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
|
|
dd->filter.res_bucket_nh_id != nhge->nh->id)
|
|
continue;
|
|
|
|
+ dd->ctx->bucket_index = bucket_index;
|
|
err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
|
|
RTM_NEWNEXTHOPBUCKET, portid,
|
|
cb->nlh->nlmsg_seq, NLM_F_MULTI,
|
|
cb->extack);
|
|
- if (err < 0) {
|
|
- if (likely(skb->len))
|
|
- goto out;
|
|
- goto out_err;
|
|
- }
|
|
+ if (err)
|
|
+ return err;
|
|
}
|
|
|
|
dd->ctx->done_nh_idx = dd->ctx->nh.idx + 1;
|
|
- bucket_index = 0;
|
|
+ dd->ctx->bucket_index = 0;
|
|
|
|
-out:
|
|
- err = skb->len;
|
|
-out_err:
|
|
- dd->ctx->bucket_index = bucket_index;
|
|
- return err;
|
|
+ return 0;
|
|
}
|
|
|
|
static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
|
|
@@ -3434,13 +3424,9 @@ static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
|
|
|
|
if (err < 0) {
|
|
if (likely(skb->len))
|
|
- goto out;
|
|
- goto out_err;
|
|
+ err = skb->len;
|
|
}
|
|
|
|
-out:
|
|
- err = skb->len;
|
|
-out_err:
|
|
cb->seq = net->nexthop.seq;
|
|
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
|
|
return err;
|
|
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
|
|
index 3a553494ff164..a4d43eb45a9de 100644
|
|
--- a/net/ipv6/ndisc.c
|
|
+++ b/net/ipv6/ndisc.c
|
|
@@ -197,7 +197,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
|
|
static inline int ndisc_is_useropt(const struct net_device *dev,
|
|
struct nd_opt_hdr *opt)
|
|
{
|
|
- return opt->nd_opt_type == ND_OPT_RDNSS ||
|
|
+ return opt->nd_opt_type == ND_OPT_PREFIX_INFO ||
|
|
+ opt->nd_opt_type == ND_OPT_RDNSS ||
|
|
opt->nd_opt_type == ND_OPT_DNSSL ||
|
|
opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL ||
|
|
opt->nd_opt_type == ND_OPT_PREF64 ||
|
|
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
|
|
index f6f2e6417dcbe..61fefa1a82db2 100644
|
|
--- a/net/mptcp/protocol.c
|
|
+++ b/net/mptcp/protocol.c
|
|
@@ -2367,7 +2367,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
|
|
|
|
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
|
|
|
|
- if (flags & MPTCP_CF_FASTCLOSE) {
|
|
+ if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
|
|
/* be sure to force the tcp_disconnect() path,
|
|
* to generate the egress reset
|
|
*/
|
|
@@ -3370,7 +3370,7 @@ static void mptcp_release_cb(struct sock *sk)
|
|
|
|
if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
|
|
__mptcp_clean_una_wakeup(sk);
|
|
- if (unlikely(&msk->cb_flags)) {
|
|
+ if (unlikely(msk->cb_flags)) {
|
|
/* be sure to set the current sk state before tacking actions
|
|
* depending on sk_state, that is processing MPTCP_ERROR_REPORT
|
|
*/
|
|
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
|
|
index fc00dd587a297..d77b25636125b 100644
|
|
--- a/net/mptcp/protocol.h
|
|
+++ b/net/mptcp/protocol.h
|
|
@@ -312,7 +312,6 @@ struct mptcp_sock {
|
|
|
|
u32 setsockopt_seq;
|
|
char ca_name[TCP_CA_NAME_MAX];
|
|
- struct mptcp_sock *dl_next;
|
|
};
|
|
|
|
#define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
|
|
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
|
|
index 047e46dd028dd..52a747a80e88e 100644
|
|
--- a/net/mptcp/subflow.c
|
|
+++ b/net/mptcp/subflow.c
|
|
@@ -1724,16 +1724,31 @@ static void subflow_state_change(struct sock *sk)
|
|
void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
|
|
{
|
|
struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
|
|
- struct mptcp_sock *msk, *next, *head = NULL;
|
|
- struct request_sock *req;
|
|
- struct sock *sk;
|
|
+ struct request_sock *req, *head, *tail;
|
|
+ struct mptcp_subflow_context *subflow;
|
|
+ struct sock *sk, *ssk;
|
|
|
|
- /* build a list of all unaccepted mptcp sockets */
|
|
+ /* Due to lock dependencies no relevant lock can be acquired under rskq_lock.
|
|
+ * Splice the req list, so that accept() can not reach the pending ssk after
|
|
+ * the listener socket is released below.
|
|
+ */
|
|
spin_lock_bh(&queue->rskq_lock);
|
|
- for (req = queue->rskq_accept_head; req; req = req->dl_next) {
|
|
- struct mptcp_subflow_context *subflow;
|
|
- struct sock *ssk = req->sk;
|
|
+ head = queue->rskq_accept_head;
|
|
+ tail = queue->rskq_accept_tail;
|
|
+ queue->rskq_accept_head = NULL;
|
|
+ queue->rskq_accept_tail = NULL;
|
|
+ spin_unlock_bh(&queue->rskq_lock);
|
|
+
|
|
+ if (!head)
|
|
+ return;
|
|
|
|
+ /* can't acquire the msk socket lock under the subflow one,
|
|
+ * or will cause ABBA deadlock
|
|
+ */
|
|
+ release_sock(listener_ssk);
|
|
+
|
|
+ for (req = head; req; req = req->dl_next) {
|
|
+ ssk = req->sk;
|
|
if (!sk_is_mptcp(ssk))
|
|
continue;
|
|
|
|
@@ -1741,32 +1756,10 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s
|
|
if (!subflow || !subflow->conn)
|
|
continue;
|
|
|
|
- /* skip if already in list */
|
|
sk = subflow->conn;
|
|
- msk = mptcp_sk(sk);
|
|
- if (msk->dl_next || msk == head)
|
|
- continue;
|
|
-
|
|
sock_hold(sk);
|
|
- msk->dl_next = head;
|
|
- head = msk;
|
|
- }
|
|
- spin_unlock_bh(&queue->rskq_lock);
|
|
- if (!head)
|
|
- return;
|
|
-
|
|
- /* can't acquire the msk socket lock under the subflow one,
|
|
- * or will cause ABBA deadlock
|
|
- */
|
|
- release_sock(listener_ssk);
|
|
-
|
|
- for (msk = head; msk; msk = next) {
|
|
- sk = (struct sock *)msk;
|
|
|
|
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
|
|
- next = msk->dl_next;
|
|
- msk->dl_next = NULL;
|
|
-
|
|
__mptcp_unaccepted_force_close(sk);
|
|
release_sock(sk);
|
|
|
|
@@ -1790,6 +1783,13 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s
|
|
|
|
/* we are still under the listener msk socket lock */
|
|
lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
|
|
+
|
|
+ /* restore the listener queue, to let the TCP code clean it up */
|
|
+ spin_lock_bh(&queue->rskq_lock);
|
|
+ WARN_ON_ONCE(queue->rskq_accept_head);
|
|
+ queue->rskq_accept_head = head;
|
|
+ queue->rskq_accept_tail = tail;
|
|
+ spin_unlock_bh(&queue->rskq_lock);
|
|
}
|
|
|
|
static int subflow_ulp_init(struct sock *sk)
|
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
|
index dd57a9ebe113d..f6e6273838859 100644
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -255,8 +255,10 @@ int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain)
|
|
if (chain->bound)
|
|
return -EBUSY;
|
|
|
|
+ if (!nft_use_inc(&chain->use))
|
|
+ return -EMFILE;
|
|
+
|
|
chain->bound = true;
|
|
- chain->use++;
|
|
nft_chain_trans_bind(ctx, chain);
|
|
|
|
return 0;
|
|
@@ -439,7 +441,7 @@ static int nft_delchain(struct nft_ctx *ctx)
|
|
if (IS_ERR(trans))
|
|
return PTR_ERR(trans);
|
|
|
|
- ctx->table->use--;
|
|
+ nft_use_dec(&ctx->table->use);
|
|
nft_deactivate_next(ctx->net, ctx->chain);
|
|
|
|
return 0;
|
|
@@ -478,7 +480,7 @@ nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
|
|
/* You cannot delete the same rule twice */
|
|
if (nft_is_active_next(ctx->net, rule)) {
|
|
nft_deactivate_next(ctx->net, rule);
|
|
- ctx->chain->use--;
|
|
+ nft_use_dec(&ctx->chain->use);
|
|
return 0;
|
|
}
|
|
return -ENOENT;
|
|
@@ -645,7 +647,7 @@ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
|
|
nft_map_deactivate(ctx, set);
|
|
|
|
nft_deactivate_next(ctx->net, set);
|
|
- ctx->table->use--;
|
|
+ nft_use_dec(&ctx->table->use);
|
|
|
|
return err;
|
|
}
|
|
@@ -677,7 +679,7 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
|
|
return err;
|
|
|
|
nft_deactivate_next(ctx->net, obj);
|
|
- ctx->table->use--;
|
|
+ nft_use_dec(&ctx->table->use);
|
|
|
|
return err;
|
|
}
|
|
@@ -712,7 +714,7 @@ static int nft_delflowtable(struct nft_ctx *ctx,
|
|
return err;
|
|
|
|
nft_deactivate_next(ctx->net, flowtable);
|
|
- ctx->table->use--;
|
|
+ nft_use_dec(&ctx->table->use);
|
|
|
|
return err;
|
|
}
|
|
@@ -2358,9 +2360,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
|
unsigned int data_size;
|
|
int err;
|
|
|
|
- if (table->use == UINT_MAX)
|
|
- return -EOVERFLOW;
|
|
-
|
|
if (nla[NFTA_CHAIN_HOOK]) {
|
|
struct nft_stats __percpu *stats = NULL;
|
|
struct nft_chain_hook hook;
|
|
@@ -2457,6 +2456,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
|
if (err < 0)
|
|
goto err_destroy_chain;
|
|
|
|
+ if (!nft_use_inc(&table->use)) {
|
|
+ err = -EMFILE;
|
|
+ goto err_use;
|
|
+ }
|
|
+
|
|
trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
|
|
if (IS_ERR(trans)) {
|
|
err = PTR_ERR(trans);
|
|
@@ -2473,10 +2477,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
|
goto err_unregister_hook;
|
|
}
|
|
|
|
- table->use++;
|
|
-
|
|
return 0;
|
|
+
|
|
err_unregister_hook:
|
|
+ nft_use_dec_restore(&table->use);
|
|
+err_use:
|
|
nf_tables_unregister_hook(net, table, chain);
|
|
err_destroy_chain:
|
|
nf_tables_chain_destroy(ctx);
|
|
@@ -3663,9 +3668,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
|
|
return -EINVAL;
|
|
handle = nf_tables_alloc_handle(table);
|
|
|
|
- if (chain->use == UINT_MAX)
|
|
- return -EOVERFLOW;
|
|
-
|
|
if (nla[NFTA_RULE_POSITION]) {
|
|
pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
|
|
old_rule = __nft_rule_lookup(chain, pos_handle);
|
|
@@ -3759,6 +3761,11 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
|
|
}
|
|
}
|
|
|
|
+ if (!nft_use_inc(&chain->use)) {
|
|
+ err = -EMFILE;
|
|
+ goto err_release_rule;
|
|
+ }
|
|
+
|
|
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
|
|
err = nft_delrule(&ctx, old_rule);
|
|
if (err < 0)
|
|
@@ -3790,7 +3797,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
|
|
}
|
|
}
|
|
kvfree(expr_info);
|
|
- chain->use++;
|
|
|
|
if (flow)
|
|
nft_trans_flow_rule(trans) = flow;
|
|
@@ -3801,6 +3807,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
|
|
return 0;
|
|
|
|
err_destroy_flow_rule:
|
|
+ nft_use_dec_restore(&chain->use);
|
|
if (flow)
|
|
nft_flow_rule_destroy(flow);
|
|
err_release_rule:
|
|
@@ -4818,9 +4825,15 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
|
|
alloc_size = sizeof(*set) + size + udlen;
|
|
if (alloc_size < size || alloc_size > INT_MAX)
|
|
return -ENOMEM;
|
|
+
|
|
+ if (!nft_use_inc(&table->use))
|
|
+ return -EMFILE;
|
|
+
|
|
set = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT);
|
|
- if (!set)
|
|
- return -ENOMEM;
|
|
+ if (!set) {
|
|
+ err = -ENOMEM;
|
|
+ goto err_alloc;
|
|
+ }
|
|
|
|
name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL_ACCOUNT);
|
|
if (!name) {
|
|
@@ -4878,7 +4891,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
|
|
goto err_set_expr_alloc;
|
|
|
|
list_add_tail_rcu(&set->list, &table->sets);
|
|
- table->use++;
|
|
+
|
|
return 0;
|
|
|
|
err_set_expr_alloc:
|
|
@@ -4890,6 +4903,9 @@ err_set_init:
|
|
kfree(set->name);
|
|
err_set_name:
|
|
kvfree(set);
|
|
+err_alloc:
|
|
+ nft_use_dec_restore(&table->use);
|
|
+
|
|
return err;
|
|
}
|
|
|
|
@@ -5024,9 +5040,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|
struct nft_set_binding *i;
|
|
struct nft_set_iter iter;
|
|
|
|
- if (set->use == UINT_MAX)
|
|
- return -EOVERFLOW;
|
|
-
|
|
if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
|
|
return -EBUSY;
|
|
|
|
@@ -5054,10 +5067,12 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|
return iter.err;
|
|
}
|
|
bind:
|
|
+ if (!nft_use_inc(&set->use))
|
|
+ return -EMFILE;
|
|
+
|
|
binding->chain = ctx->chain;
|
|
list_add_tail_rcu(&binding->list, &set->bindings);
|
|
nft_set_trans_bind(ctx, set);
|
|
- set->use++;
|
|
|
|
return 0;
|
|
}
|
|
@@ -5131,7 +5146,7 @@ void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
|
|
nft_clear(ctx->net, set);
|
|
}
|
|
|
|
- set->use++;
|
|
+ nft_use_inc_restore(&set->use);
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_tables_activate_set);
|
|
|
|
@@ -5147,7 +5162,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|
else
|
|
list_del_rcu(&binding->list);
|
|
|
|
- set->use--;
|
|
+ nft_use_dec(&set->use);
|
|
break;
|
|
case NFT_TRANS_PREPARE:
|
|
if (nft_set_is_anonymous(set)) {
|
|
@@ -5156,7 +5171,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|
|
|
nft_deactivate_next(ctx->net, set);
|
|
}
|
|
- set->use--;
|
|
+ nft_use_dec(&set->use);
|
|
return;
|
|
case NFT_TRANS_ABORT:
|
|
case NFT_TRANS_RELEASE:
|
|
@@ -5164,7 +5179,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|
set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
|
|
nft_map_deactivate(ctx, set);
|
|
|
|
- set->use--;
|
|
+ nft_use_dec(&set->use);
|
|
fallthrough;
|
|
default:
|
|
nf_tables_unbind_set(ctx, set, binding,
|
|
@@ -5933,7 +5948,7 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
|
|
nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext));
|
|
|
|
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
|
|
- (*nft_set_ext_obj(ext))->use--;
|
|
+ nft_use_dec(&(*nft_set_ext_obj(ext))->use);
|
|
kfree(elem);
|
|
}
|
|
EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
|
|
@@ -6435,8 +6450,16 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|
set->objtype, genmask);
|
|
if (IS_ERR(obj)) {
|
|
err = PTR_ERR(obj);
|
|
+ obj = NULL;
|
|
goto err_parse_key_end;
|
|
}
|
|
+
|
|
+ if (!nft_use_inc(&obj->use)) {
|
|
+ err = -EMFILE;
|
|
+ obj = NULL;
|
|
+ goto err_parse_key_end;
|
|
+ }
|
|
+
|
|
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
|
|
if (err < 0)
|
|
goto err_parse_key_end;
|
|
@@ -6505,10 +6528,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|
if (flags)
|
|
*nft_set_ext_flags(ext) = flags;
|
|
|
|
- if (obj) {
|
|
+ if (obj)
|
|
*nft_set_ext_obj(ext) = obj;
|
|
- obj->use++;
|
|
- }
|
|
+
|
|
if (ulen > 0) {
|
|
if (nft_set_ext_check(&tmpl, NFT_SET_EXT_USERDATA, ulen) < 0) {
|
|
err = -EINVAL;
|
|
@@ -6573,12 +6595,13 @@ err_element_clash:
|
|
kfree(trans);
|
|
err_elem_free:
|
|
nf_tables_set_elem_destroy(ctx, set, elem.priv);
|
|
- if (obj)
|
|
- obj->use--;
|
|
err_parse_data:
|
|
if (nla[NFTA_SET_ELEM_DATA] != NULL)
|
|
nft_data_release(&elem.data.val, desc.type);
|
|
err_parse_key_end:
|
|
+ if (obj)
|
|
+ nft_use_dec_restore(&obj->use);
|
|
+
|
|
nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);
|
|
err_parse_key:
|
|
nft_data_release(&elem.key.val, NFT_DATA_VALUE);
|
|
@@ -6659,7 +6682,7 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
|
|
case NFT_JUMP:
|
|
case NFT_GOTO:
|
|
chain = data->verdict.chain;
|
|
- chain->use++;
|
|
+ nft_use_inc_restore(&chain->use);
|
|
break;
|
|
}
|
|
}
|
|
@@ -6674,7 +6697,7 @@ static void nft_setelem_data_activate(const struct net *net,
|
|
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
|
|
nft_data_hold(nft_set_ext_data(ext), set->dtype);
|
|
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
|
|
- (*nft_set_ext_obj(ext))->use++;
|
|
+ nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use);
|
|
}
|
|
|
|
static void nft_setelem_data_deactivate(const struct net *net,
|
|
@@ -6686,7 +6709,7 @@ static void nft_setelem_data_deactivate(const struct net *net,
|
|
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
|
|
nft_data_release(nft_set_ext_data(ext), set->dtype);
|
|
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
|
|
- (*nft_set_ext_obj(ext))->use--;
|
|
+ nft_use_dec(&(*nft_set_ext_obj(ext))->use);
|
|
}
|
|
|
|
static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
|
|
@@ -7225,9 +7248,14 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
|
|
|
|
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
|
|
|
|
+ if (!nft_use_inc(&table->use))
|
|
+ return -EMFILE;
|
|
+
|
|
type = nft_obj_type_get(net, objtype);
|
|
- if (IS_ERR(type))
|
|
- return PTR_ERR(type);
|
|
+ if (IS_ERR(type)) {
|
|
+ err = PTR_ERR(type);
|
|
+ goto err_type;
|
|
+ }
|
|
|
|
obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]);
|
|
if (IS_ERR(obj)) {
|
|
@@ -7261,7 +7289,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
|
|
goto err_obj_ht;
|
|
|
|
list_add_tail_rcu(&obj->list, &table->objects);
|
|
- table->use++;
|
|
+
|
|
return 0;
|
|
err_obj_ht:
|
|
/* queued in transaction log */
|
|
@@ -7277,6 +7305,9 @@ err_strdup:
|
|
kfree(obj);
|
|
err_init:
|
|
module_put(type->owner);
|
|
+err_type:
|
|
+ nft_use_dec_restore(&table->use);
|
|
+
|
|
return err;
|
|
}
|
|
|
|
@@ -7667,7 +7698,7 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx,
|
|
case NFT_TRANS_PREPARE:
|
|
case NFT_TRANS_ABORT:
|
|
case NFT_TRANS_RELEASE:
|
|
- flowtable->use--;
|
|
+ nft_use_dec(&flowtable->use);
|
|
fallthrough;
|
|
default:
|
|
return;
|
|
@@ -8015,9 +8046,14 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
|
|
|
|
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
|
|
|
|
+ if (!nft_use_inc(&table->use))
|
|
+ return -EMFILE;
|
|
+
|
|
flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL_ACCOUNT);
|
|
- if (!flowtable)
|
|
- return -ENOMEM;
|
|
+ if (!flowtable) {
|
|
+ err = -ENOMEM;
|
|
+ goto flowtable_alloc;
|
|
+ }
|
|
|
|
flowtable->table = table;
|
|
flowtable->handle = nf_tables_alloc_handle(table);
|
|
@@ -8072,7 +8108,6 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
|
|
goto err5;
|
|
|
|
list_add_tail_rcu(&flowtable->list, &table->flowtables);
|
|
- table->use++;
|
|
|
|
return 0;
|
|
err5:
|
|
@@ -8089,6 +8124,9 @@ err2:
|
|
kfree(flowtable->name);
|
|
err1:
|
|
kfree(flowtable);
|
|
+flowtable_alloc:
|
|
+ nft_use_dec_restore(&table->use);
|
|
+
|
|
return err;
|
|
}
|
|
|
|
@@ -9374,7 +9412,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|
*/
|
|
if (nft_set_is_anonymous(nft_trans_set(trans)) &&
|
|
!list_empty(&nft_trans_set(trans)->bindings))
|
|
- trans->ctx.table->use--;
|
|
+ nft_use_dec(&trans->ctx.table->use);
|
|
}
|
|
nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
|
|
NFT_MSG_NEWSET, GFP_KERNEL);
|
|
@@ -9593,7 +9631,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
|
nft_trans_destroy(trans);
|
|
break;
|
|
}
|
|
- trans->ctx.table->use--;
|
|
+ nft_use_dec_restore(&trans->ctx.table->use);
|
|
nft_chain_del(trans->ctx.chain);
|
|
nf_tables_unregister_hook(trans->ctx.net,
|
|
trans->ctx.table,
|
|
@@ -9601,7 +9639,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
|
}
|
|
break;
|
|
case NFT_MSG_DELCHAIN:
|
|
- trans->ctx.table->use++;
|
|
+ nft_use_inc_restore(&trans->ctx.table->use);
|
|
nft_clear(trans->ctx.net, trans->ctx.chain);
|
|
nft_trans_destroy(trans);
|
|
break;
|
|
@@ -9610,7 +9648,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
|
nft_trans_destroy(trans);
|
|
break;
|
|
}
|
|
- trans->ctx.chain->use--;
|
|
+ nft_use_dec_restore(&trans->ctx.chain->use);
|
|
list_del_rcu(&nft_trans_rule(trans)->list);
|
|
nft_rule_expr_deactivate(&trans->ctx,
|
|
nft_trans_rule(trans),
|
|
@@ -9619,7 +9657,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
|
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
|
|
break;
|
|
case NFT_MSG_DELRULE:
|
|
- trans->ctx.chain->use++;
|
|
+ nft_use_inc_restore(&trans->ctx.chain->use);
|
|
nft_clear(trans->ctx.net, nft_trans_rule(trans));
|
|
nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
|
|
if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
|
|
@@ -9632,7 +9670,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
|
nft_trans_destroy(trans);
|
|
break;
|
|
}
|
|
- trans->ctx.table->use--;
|
|
+ nft_use_dec_restore(&trans->ctx.table->use);
|
|
if (nft_trans_set_bound(trans)) {
|
|
nft_trans_destroy(trans);
|
|
break;
|
|
@@ -9640,7 +9678,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
|
list_del_rcu(&nft_trans_set(trans)->list);
|
|
break;
|
|
case NFT_MSG_DELSET:
|
|
- trans->ctx.table->use++;
|
|
+ nft_use_inc_restore(&trans->ctx.table->use);
|
|
nft_clear(trans->ctx.net, nft_trans_set(trans));
|
|
if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
|
|
nft_map_activate(&trans->ctx, nft_trans_set(trans));
|
|
@@ -9683,12 +9721,12 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
|
nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
|
|
nft_trans_destroy(trans);
|
|
} else {
|
|
- trans->ctx.table->use--;
|
|
+ nft_use_dec_restore(&trans->ctx.table->use);
|
|
nft_obj_del(nft_trans_obj(trans));
|
|
}
|
|
break;
|
|
case NFT_MSG_DELOBJ:
|
|
- trans->ctx.table->use++;
|
|
+ nft_use_inc_restore(&trans->ctx.table->use);
|
|
nft_clear(trans->ctx.net, nft_trans_obj(trans));
|
|
nft_trans_destroy(trans);
|
|
break;
|
|
@@ -9697,7 +9735,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
|
nft_unregister_flowtable_net_hooks(net,
|
|
&nft_trans_flowtable_hooks(trans));
|
|
} else {
|
|
- trans->ctx.table->use--;
|
|
+ nft_use_dec_restore(&trans->ctx.table->use);
|
|
list_del_rcu(&nft_trans_flowtable(trans)->list);
|
|
nft_unregister_flowtable_net_hooks(net,
|
|
&nft_trans_flowtable(trans)->hook_list);
|
|
@@ -9708,7 +9746,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
|
list_splice(&nft_trans_flowtable_hooks(trans),
|
|
&nft_trans_flowtable(trans)->hook_list);
|
|
} else {
|
|
- trans->ctx.table->use++;
|
|
+ nft_use_inc_restore(&trans->ctx.table->use);
|
|
nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
|
|
}
|
|
nft_trans_destroy(trans);
|
|
@@ -10161,8 +10199,9 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
|
|
if (desc->flags & NFT_DATA_DESC_SETELEM &&
|
|
chain->flags & NFT_CHAIN_BINDING)
|
|
return -EINVAL;
|
|
+ if (!nft_use_inc(&chain->use))
|
|
+ return -EMFILE;
|
|
|
|
- chain->use++;
|
|
data->verdict.chain = chain;
|
|
break;
|
|
}
|
|
@@ -10180,7 +10219,7 @@ static void nft_verdict_uninit(const struct nft_data *data)
|
|
case NFT_JUMP:
|
|
case NFT_GOTO:
|
|
chain = data->verdict.chain;
|
|
- chain->use--;
|
|
+ nft_use_dec(&chain->use);
|
|
break;
|
|
}
|
|
}
|
|
@@ -10349,11 +10388,11 @@ int __nft_release_basechain(struct nft_ctx *ctx)
|
|
nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
|
|
list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
|
|
list_del(&rule->list);
|
|
- ctx->chain->use--;
|
|
+ nft_use_dec(&ctx->chain->use);
|
|
nf_tables_rule_release(ctx, rule);
|
|
}
|
|
nft_chain_del(ctx->chain);
|
|
- ctx->table->use--;
|
|
+ nft_use_dec(&ctx->table->use);
|
|
nf_tables_chain_destroy(ctx);
|
|
|
|
return 0;
|
|
@@ -10406,18 +10445,18 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
|
|
ctx.chain = chain;
|
|
list_for_each_entry_safe(rule, nr, &chain->rules, list) {
|
|
list_del(&rule->list);
|
|
- chain->use--;
|
|
+ nft_use_dec(&chain->use);
|
|
nf_tables_rule_release(&ctx, rule);
|
|
}
|
|
}
|
|
list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
|
|
list_del(&flowtable->list);
|
|
- table->use--;
|
|
+ nft_use_dec(&table->use);
|
|
nf_tables_flowtable_destroy(flowtable);
|
|
}
|
|
list_for_each_entry_safe(set, ns, &table->sets, list) {
|
|
list_del(&set->list);
|
|
- table->use--;
|
|
+ nft_use_dec(&table->use);
|
|
if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
|
|
nft_map_deactivate(&ctx, set);
|
|
|
|
@@ -10425,13 +10464,13 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
|
|
}
|
|
list_for_each_entry_safe(obj, ne, &table->objects, list) {
|
|
nft_obj_del(obj);
|
|
- table->use--;
|
|
+ nft_use_dec(&table->use);
|
|
nft_obj_destroy(&ctx, obj);
|
|
}
|
|
list_for_each_entry_safe(chain, nc, &table->chains, list) {
|
|
ctx.chain = chain;
|
|
nft_chain_del(chain);
|
|
- table->use--;
|
|
+ nft_use_dec(&table->use);
|
|
nf_tables_chain_destroy(&ctx);
|
|
}
|
|
nf_tables_table_destroy(&ctx);
|
|
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
|
|
index a25c88bc8b750..8a43f6f9c90b6 100644
|
|
--- a/net/netfilter/nft_flow_offload.c
|
|
+++ b/net/netfilter/nft_flow_offload.c
|
|
@@ -404,8 +404,10 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx,
|
|
if (IS_ERR(flowtable))
|
|
return PTR_ERR(flowtable);
|
|
|
|
+ if (!nft_use_inc(&flowtable->use))
|
|
+ return -EMFILE;
|
|
+
|
|
priv->flowtable = flowtable;
|
|
- flowtable->use++;
|
|
|
|
return nf_ct_netns_get(ctx->net, ctx->family);
|
|
}
|
|
@@ -424,7 +426,7 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
|
|
{
|
|
struct nft_flow_offload *priv = nft_expr_priv(expr);
|
|
|
|
- priv->flowtable->use++;
|
|
+ nft_use_inc_restore(&priv->flowtable->use);
|
|
}
|
|
|
|
static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
|
|
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
|
|
index 391c18e4b3ebd..5f59dbab3e933 100644
|
|
--- a/net/netfilter/nft_immediate.c
|
|
+++ b/net/netfilter/nft_immediate.c
|
|
@@ -168,7 +168,7 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
|
|
nft_immediate_chain_deactivate(ctx, chain, phase);
|
|
nft_chain_del(chain);
|
|
chain->bound = false;
|
|
- chain->table->use--;
|
|
+ nft_use_dec(&chain->table->use);
|
|
break;
|
|
}
|
|
break;
|
|
@@ -207,7 +207,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
|
|
* let the transaction records release this chain and its rules.
|
|
*/
|
|
if (chain->bound) {
|
|
- chain->use--;
|
|
+ nft_use_dec(&chain->use);
|
|
break;
|
|
}
|
|
|
|
@@ -215,9 +215,9 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
|
|
chain_ctx = *ctx;
|
|
chain_ctx.chain = chain;
|
|
|
|
- chain->use--;
|
|
+ nft_use_dec(&chain->use);
|
|
list_for_each_entry_safe(rule, n, &chain->rules, list) {
|
|
- chain->use--;
|
|
+ nft_use_dec(&chain->use);
|
|
list_del(&rule->list);
|
|
nf_tables_rule_destroy(&chain_ctx, rule);
|
|
}
|
|
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
|
|
index 7f8e480b6be5b..0017bd3418722 100644
|
|
--- a/net/netfilter/nft_objref.c
|
|
+++ b/net/netfilter/nft_objref.c
|
|
@@ -41,8 +41,10 @@ static int nft_objref_init(const struct nft_ctx *ctx,
|
|
if (IS_ERR(obj))
|
|
return -ENOENT;
|
|
|
|
+ if (!nft_use_inc(&obj->use))
|
|
+ return -EMFILE;
|
|
+
|
|
nft_objref_priv(expr) = obj;
|
|
- obj->use++;
|
|
|
|
return 0;
|
|
}
|
|
@@ -71,7 +73,7 @@ static void nft_objref_deactivate(const struct nft_ctx *ctx,
|
|
if (phase == NFT_TRANS_COMMIT)
|
|
return;
|
|
|
|
- obj->use--;
|
|
+ nft_use_dec(&obj->use);
|
|
}
|
|
|
|
static void nft_objref_activate(const struct nft_ctx *ctx,
|
|
@@ -79,7 +81,7 @@ static void nft_objref_activate(const struct nft_ctx *ctx,
|
|
{
|
|
struct nft_object *obj = nft_objref_priv(expr);
|
|
|
|
- obj->use++;
|
|
+ nft_use_inc_restore(&obj->use);
|
|
}
|
|
|
|
static struct nft_expr_type nft_objref_type;
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index 1681068400733..451bd8bfafd23 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -404,18 +404,20 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
|
|
{
|
|
union tpacket_uhdr h;
|
|
|
|
+ /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
|
|
+
|
|
h.raw = frame;
|
|
switch (po->tp_version) {
|
|
case TPACKET_V1:
|
|
- h.h1->tp_status = status;
|
|
+ WRITE_ONCE(h.h1->tp_status, status);
|
|
flush_dcache_page(pgv_to_page(&h.h1->tp_status));
|
|
break;
|
|
case TPACKET_V2:
|
|
- h.h2->tp_status = status;
|
|
+ WRITE_ONCE(h.h2->tp_status, status);
|
|
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
|
|
break;
|
|
case TPACKET_V3:
|
|
- h.h3->tp_status = status;
|
|
+ WRITE_ONCE(h.h3->tp_status, status);
|
|
flush_dcache_page(pgv_to_page(&h.h3->tp_status));
|
|
break;
|
|
default:
|
|
@@ -432,17 +434,19 @@ static int __packet_get_status(const struct packet_sock *po, void *frame)
|
|
|
|
smp_rmb();
|
|
|
|
+ /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
|
|
+
|
|
h.raw = frame;
|
|
switch (po->tp_version) {
|
|
case TPACKET_V1:
|
|
flush_dcache_page(pgv_to_page(&h.h1->tp_status));
|
|
- return h.h1->tp_status;
|
|
+ return READ_ONCE(h.h1->tp_status);
|
|
case TPACKET_V2:
|
|
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
|
|
- return h.h2->tp_status;
|
|
+ return READ_ONCE(h.h2->tp_status);
|
|
case TPACKET_V3:
|
|
flush_dcache_page(pgv_to_page(&h.h3->tp_status));
|
|
- return h.h3->tp_status;
|
|
+ return READ_ONCE(h.h3->tp_status);
|
|
default:
|
|
WARN(1, "TPACKET version not supported.\n");
|
|
BUG();
|
|
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
|
|
index aa9842158df0a..d0e045116d4e9 100644
|
|
--- a/net/sched/sch_netem.c
|
|
+++ b/net/sched/sch_netem.c
|
|
@@ -773,12 +773,10 @@ static void dist_free(struct disttable *d)
|
|
* signed 16 bit values.
|
|
*/
|
|
|
|
-static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
|
|
- const struct nlattr *attr)
|
|
+static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
|
|
{
|
|
size_t n = nla_len(attr)/sizeof(__s16);
|
|
const __s16 *data = nla_data(attr);
|
|
- spinlock_t *root_lock;
|
|
struct disttable *d;
|
|
int i;
|
|
|
|
@@ -793,13 +791,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
|
|
for (i = 0; i < n; i++)
|
|
d->table[i] = data[i];
|
|
|
|
- root_lock = qdisc_root_sleeping_lock(sch);
|
|
-
|
|
- spin_lock_bh(root_lock);
|
|
- swap(*tbl, d);
|
|
- spin_unlock_bh(root_lock);
|
|
-
|
|
- dist_free(d);
|
|
+ *tbl = d;
|
|
return 0;
|
|
}
|
|
|
|
@@ -956,6 +948,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
|
|
{
|
|
struct netem_sched_data *q = qdisc_priv(sch);
|
|
struct nlattr *tb[TCA_NETEM_MAX + 1];
|
|
+ struct disttable *delay_dist = NULL;
|
|
+ struct disttable *slot_dist = NULL;
|
|
struct tc_netem_qopt *qopt;
|
|
struct clgstate old_clg;
|
|
int old_loss_model = CLG_RANDOM;
|
|
@@ -966,6 +960,18 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
+ if (tb[TCA_NETEM_DELAY_DIST]) {
|
|
+ ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
|
|
+ if (ret)
|
|
+ goto table_free;
|
|
+ }
|
|
+
|
|
+ if (tb[TCA_NETEM_SLOT_DIST]) {
|
|
+ ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
|
|
+ if (ret)
|
|
+ goto table_free;
|
|
+ }
|
|
+
|
|
sch_tree_lock(sch);
|
|
/* backup q->clg and q->loss_model */
|
|
old_clg = q->clg;
|
|
@@ -975,26 +981,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
|
|
ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
|
|
if (ret) {
|
|
q->loss_model = old_loss_model;
|
|
+ q->clg = old_clg;
|
|
goto unlock;
|
|
}
|
|
} else {
|
|
q->loss_model = CLG_RANDOM;
|
|
}
|
|
|
|
- if (tb[TCA_NETEM_DELAY_DIST]) {
|
|
- ret = get_dist_table(sch, &q->delay_dist,
|
|
- tb[TCA_NETEM_DELAY_DIST]);
|
|
- if (ret)
|
|
- goto get_table_failure;
|
|
- }
|
|
-
|
|
- if (tb[TCA_NETEM_SLOT_DIST]) {
|
|
- ret = get_dist_table(sch, &q->slot_dist,
|
|
- tb[TCA_NETEM_SLOT_DIST]);
|
|
- if (ret)
|
|
- goto get_table_failure;
|
|
- }
|
|
-
|
|
+ if (delay_dist)
|
|
+ swap(q->delay_dist, delay_dist);
|
|
+ if (slot_dist)
|
|
+ swap(q->slot_dist, slot_dist);
|
|
sch->limit = qopt->limit;
|
|
|
|
q->latency = PSCHED_TICKS2NS(qopt->latency);
|
|
@@ -1044,17 +1041,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
|
|
|
|
unlock:
|
|
sch_tree_unlock(sch);
|
|
- return ret;
|
|
|
|
-get_table_failure:
|
|
- /* recover clg and loss_model, in case of
|
|
- * q->clg and q->loss_model were modified
|
|
- * in get_loss_clg()
|
|
- */
|
|
- q->clg = old_clg;
|
|
- q->loss_model = old_loss_model;
|
|
-
|
|
- goto unlock;
|
|
+table_free:
|
|
+ dist_free(delay_dist);
|
|
+ dist_free(slot_dist);
|
|
+ return ret;
|
|
}
|
|
|
|
static int netem_init(struct Qdisc *sch, struct nlattr *opt,
|
|
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
|
|
index 5ae0a54a823b5..5712a5297bd01 100644
|
|
--- a/net/smc/af_smc.c
|
|
+++ b/net/smc/af_smc.c
|
|
@@ -438,13 +438,60 @@ out:
|
|
return rc;
|
|
}
|
|
|
|
+/* copy only relevant settings and flags of SOL_SOCKET level from smc to
|
|
+ * clc socket (since smc is not called for these options from net/core)
|
|
+ */
|
|
+
|
|
+#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
|
|
+ (1UL << SOCK_KEEPOPEN) | \
|
|
+ (1UL << SOCK_LINGER) | \
|
|
+ (1UL << SOCK_BROADCAST) | \
|
|
+ (1UL << SOCK_TIMESTAMP) | \
|
|
+ (1UL << SOCK_DBG) | \
|
|
+ (1UL << SOCK_RCVTSTAMP) | \
|
|
+ (1UL << SOCK_RCVTSTAMPNS) | \
|
|
+ (1UL << SOCK_LOCALROUTE) | \
|
|
+ (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
|
|
+ (1UL << SOCK_RXQ_OVFL) | \
|
|
+ (1UL << SOCK_WIFI_STATUS) | \
|
|
+ (1UL << SOCK_NOFCS) | \
|
|
+ (1UL << SOCK_FILTER_LOCKED) | \
|
|
+ (1UL << SOCK_TSTAMP_NEW))
|
|
+
|
|
+/* if set, use value set by setsockopt() - else use IPv4 or SMC sysctl value */
|
|
+static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
|
|
+ unsigned long mask)
|
|
+{
|
|
+ struct net *nnet = sock_net(nsk);
|
|
+
|
|
+ nsk->sk_userlocks = osk->sk_userlocks;
|
|
+ if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
|
|
+ nsk->sk_sndbuf = osk->sk_sndbuf;
|
|
+ } else {
|
|
+ if (mask == SK_FLAGS_SMC_TO_CLC)
|
|
+ WRITE_ONCE(nsk->sk_sndbuf,
|
|
+ READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
|
|
+ else
|
|
+ WRITE_ONCE(nsk->sk_sndbuf,
|
|
+ 2 * READ_ONCE(nnet->smc.sysctl_wmem));
|
|
+ }
|
|
+ if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
|
|
+ nsk->sk_rcvbuf = osk->sk_rcvbuf;
|
|
+ } else {
|
|
+ if (mask == SK_FLAGS_SMC_TO_CLC)
|
|
+ WRITE_ONCE(nsk->sk_rcvbuf,
|
|
+ READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
|
|
+ else
|
|
+ WRITE_ONCE(nsk->sk_rcvbuf,
|
|
+ 2 * READ_ONCE(nnet->smc.sysctl_rmem));
|
|
+ }
|
|
+}
|
|
+
|
|
static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
|
|
unsigned long mask)
|
|
{
|
|
/* options we don't get control via setsockopt for */
|
|
nsk->sk_type = osk->sk_type;
|
|
- nsk->sk_sndbuf = osk->sk_sndbuf;
|
|
- nsk->sk_rcvbuf = osk->sk_rcvbuf;
|
|
nsk->sk_sndtimeo = osk->sk_sndtimeo;
|
|
nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
|
|
nsk->sk_mark = READ_ONCE(osk->sk_mark);
|
|
@@ -455,26 +502,10 @@ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
|
|
|
|
nsk->sk_flags &= ~mask;
|
|
nsk->sk_flags |= osk->sk_flags & mask;
|
|
+
|
|
+ smc_adjust_sock_bufsizes(nsk, osk, mask);
|
|
}
|
|
|
|
-#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
|
|
- (1UL << SOCK_KEEPOPEN) | \
|
|
- (1UL << SOCK_LINGER) | \
|
|
- (1UL << SOCK_BROADCAST) | \
|
|
- (1UL << SOCK_TIMESTAMP) | \
|
|
- (1UL << SOCK_DBG) | \
|
|
- (1UL << SOCK_RCVTSTAMP) | \
|
|
- (1UL << SOCK_RCVTSTAMPNS) | \
|
|
- (1UL << SOCK_LOCALROUTE) | \
|
|
- (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
|
|
- (1UL << SOCK_RXQ_OVFL) | \
|
|
- (1UL << SOCK_WIFI_STATUS) | \
|
|
- (1UL << SOCK_NOFCS) | \
|
|
- (1UL << SOCK_FILTER_LOCKED) | \
|
|
- (1UL << SOCK_TSTAMP_NEW))
|
|
-/* copy only relevant settings and flags of SOL_SOCKET level from smc to
|
|
- * clc socket (since smc is not called for these options from net/core)
|
|
- */
|
|
static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
|
|
{
|
|
smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
|
|
@@ -2466,8 +2497,6 @@ static void smc_tcp_listen_work(struct work_struct *work)
|
|
sock_hold(lsk); /* sock_put in smc_listen_work */
|
|
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
|
|
smc_copy_sock_settings_to_smc(new_smc);
|
|
- new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
|
|
- new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
|
|
sock_hold(&new_smc->sk); /* sock_put in passive closing */
|
|
if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
|
|
sock_put(&new_smc->sk);
|
|
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
|
|
index bf69c9d6d06c0..1849827884735 100644
|
|
--- a/net/tls/tls_device.c
|
|
+++ b/net/tls/tls_device.c
|
|
@@ -52,6 +52,8 @@ static LIST_HEAD(tls_device_list);
|
|
static LIST_HEAD(tls_device_down_list);
|
|
static DEFINE_SPINLOCK(tls_device_lock);
|
|
|
|
+static struct page *dummy_page;
|
|
+
|
|
static void tls_device_free_ctx(struct tls_context *ctx)
|
|
{
|
|
if (ctx->tx_conf == TLS_HW) {
|
|
@@ -313,36 +315,33 @@ static int tls_push_record(struct sock *sk,
|
|
return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
|
|
}
|
|
|
|
-static int tls_device_record_close(struct sock *sk,
|
|
- struct tls_context *ctx,
|
|
- struct tls_record_info *record,
|
|
- struct page_frag *pfrag,
|
|
- unsigned char record_type)
|
|
+static void tls_device_record_close(struct sock *sk,
|
|
+ struct tls_context *ctx,
|
|
+ struct tls_record_info *record,
|
|
+ struct page_frag *pfrag,
|
|
+ unsigned char record_type)
|
|
{
|
|
struct tls_prot_info *prot = &ctx->prot_info;
|
|
- int ret;
|
|
+ struct page_frag dummy_tag_frag;
|
|
|
|
/* append tag
|
|
* device will fill in the tag, we just need to append a placeholder
|
|
* use socket memory to improve coalescing (re-using a single buffer
|
|
* increases frag count)
|
|
- * if we can't allocate memory now, steal some back from data
|
|
+ * if we can't allocate memory now use the dummy page
|
|
*/
|
|
- if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
|
|
- sk->sk_allocation))) {
|
|
- ret = 0;
|
|
- tls_append_frag(record, pfrag, prot->tag_size);
|
|
- } else {
|
|
- ret = prot->tag_size;
|
|
- if (record->len <= prot->overhead_size)
|
|
- return -ENOMEM;
|
|
+ if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) &&
|
|
+ !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) {
|
|
+ dummy_tag_frag.page = dummy_page;
|
|
+ dummy_tag_frag.offset = 0;
|
|
+ pfrag = &dummy_tag_frag;
|
|
}
|
|
+ tls_append_frag(record, pfrag, prot->tag_size);
|
|
|
|
/* fill prepend */
|
|
tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
|
|
record->len - prot->overhead_size,
|
|
record_type);
|
|
- return ret;
|
|
}
|
|
|
|
static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
|
|
@@ -535,18 +534,8 @@ last_record:
|
|
|
|
if (done || record->len >= max_open_record_len ||
|
|
(record->num_frags >= MAX_SKB_FRAGS - 1)) {
|
|
- rc = tls_device_record_close(sk, tls_ctx, record,
|
|
- pfrag, record_type);
|
|
- if (rc) {
|
|
- if (rc > 0) {
|
|
- size += rc;
|
|
- } else {
|
|
- size = orig_size;
|
|
- destroy_record(record);
|
|
- ctx->open_record = NULL;
|
|
- break;
|
|
- }
|
|
- }
|
|
+ tls_device_record_close(sk, tls_ctx, record,
|
|
+ pfrag, record_type);
|
|
|
|
rc = tls_push_record(sk,
|
|
tls_ctx,
|
|
@@ -1466,14 +1455,26 @@ int __init tls_device_init(void)
|
|
{
|
|
int err;
|
|
|
|
- destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
|
|
- if (!destruct_wq)
|
|
+ dummy_page = alloc_page(GFP_KERNEL);
|
|
+ if (!dummy_page)
|
|
return -ENOMEM;
|
|
|
|
+ destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
|
|
+ if (!destruct_wq) {
|
|
+ err = -ENOMEM;
|
|
+ goto err_free_dummy;
|
|
+ }
|
|
+
|
|
err = register_netdevice_notifier(&tls_dev_notifier);
|
|
if (err)
|
|
- destroy_workqueue(destruct_wq);
|
|
+ goto err_destroy_wq;
|
|
|
|
+ return 0;
|
|
+
|
|
+err_destroy_wq:
|
|
+ destroy_workqueue(destruct_wq);
|
|
+err_free_dummy:
|
|
+ put_page(dummy_page);
|
|
return err;
|
|
}
|
|
|
|
@@ -1482,4 +1483,5 @@ void __exit tls_device_cleanup(void)
|
|
unregister_netdevice_notifier(&tls_dev_notifier);
|
|
destroy_workqueue(destruct_wq);
|
|
clean_acked_data_flush();
|
|
+ put_page(dummy_page);
|
|
}
|
|
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
|
|
index 087c0c442e231..c2363d44a1ffc 100644
|
|
--- a/net/wireless/nl80211.c
|
|
+++ b/net/wireless/nl80211.c
|
|
@@ -5378,8 +5378,11 @@ nl80211_parse_mbssid_elems(struct wiphy *wiphy, struct nlattr *attrs)
|
|
if (!wiphy->mbssid_max_interfaces)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- nla_for_each_nested(nl_elems, attrs, rem_elems)
|
|
+ nla_for_each_nested(nl_elems, attrs, rem_elems) {
|
|
+ if (num_elems >= 255)
|
|
+ return ERR_PTR(-EINVAL);
|
|
num_elems++;
|
|
+ }
|
|
|
|
elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL);
|
|
if (!elems)
|
|
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
|
|
index 22bf10ffbf2d1..f7592638e61d3 100644
|
|
--- a/net/xdp/xsk.c
|
|
+++ b/net/xdp/xsk.c
|
|
@@ -994,6 +994,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
|
err = xp_alloc_tx_descs(xs->pool, xs);
|
|
if (err) {
|
|
xp_put_pool(xs->pool);
|
|
+ xs->pool = NULL;
|
|
sockfd_put(sock);
|
|
goto out_unlock;
|
|
}
|
|
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
|
|
index 9a1895747b153..84c730da36dd3 100644
|
|
--- a/scripts/gcc-plugins/gcc-common.h
|
|
+++ b/scripts/gcc-plugins/gcc-common.h
|
|
@@ -71,7 +71,9 @@
|
|
#include "varasm.h"
|
|
#include "stor-layout.h"
|
|
#include "internal-fn.h"
|
|
+#include "gimple.h"
|
|
#include "gimple-expr.h"
|
|
+#include "gimple-iterator.h"
|
|
#include "gimple-fold.h"
|
|
#include "context.h"
|
|
#include "tree-ssa-alias.h"
|
|
@@ -85,10 +87,8 @@
|
|
#include "tree-eh.h"
|
|
#include "stmt.h"
|
|
#include "gimplify.h"
|
|
-#include "gimple.h"
|
|
#include "tree-phinodes.h"
|
|
#include "tree-cfg.h"
|
|
-#include "gimple-iterator.h"
|
|
#include "gimple-ssa.h"
|
|
#include "ssa-iterators.h"
|
|
|
|
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
|
|
index a61c7bcbc72da..63f468bf8245c 100644
|
|
--- a/tools/testing/radix-tree/regression1.c
|
|
+++ b/tools/testing/radix-tree/regression1.c
|
|
@@ -177,7 +177,7 @@ void regression1_test(void)
|
|
nr_threads = 2;
|
|
pthread_barrier_init(&worker_barrier, NULL, nr_threads);
|
|
|
|
- threads = malloc(nr_threads * sizeof(pthread_t *));
|
|
+ threads = malloc(nr_threads * sizeof(*threads));
|
|
|
|
for (i = 0; i < nr_threads; i++) {
|
|
arg = i;
|
|
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
|
|
index 0f5e88c8f4ffe..df8d90b51867a 100755
|
|
--- a/tools/testing/selftests/net/fib_nexthops.sh
|
|
+++ b/tools/testing/selftests/net/fib_nexthops.sh
|
|
@@ -1981,6 +1981,11 @@ basic()
|
|
|
|
run_cmd "$IP link set dev lo up"
|
|
|
|
+ # Dump should not loop endlessly when maximum nexthop ID is configured.
|
|
+ run_cmd "$IP nexthop add id $((2**32-1)) blackhole"
|
|
+ run_cmd "timeout 5 $IP nexthop"
|
|
+ log_test $? 0 "Maximum nexthop ID dump"
|
|
+
|
|
#
|
|
# groups
|
|
#
|
|
@@ -2201,6 +2206,11 @@ basic_res()
|
|
run_cmd "$IP nexthop bucket list fdb"
|
|
log_test $? 255 "Dump all nexthop buckets with invalid 'fdb' keyword"
|
|
|
|
+ # Dump should not loop endlessly when maximum nexthop ID is configured.
|
|
+ run_cmd "$IP nexthop add id $((2**32-1)) group 1/2 type resilient buckets 4"
|
|
+ run_cmd "timeout 5 $IP nexthop bucket"
|
|
+ log_test $? 0 "Maximum nexthop ID dump"
|
|
+
|
|
#
|
|
# resilient nexthop buckets get requests
|
|
#
|
|
diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh
|
|
index dbb9fcf759e0f..aa2eafb7b2437 100755
|
|
--- a/tools/testing/selftests/net/forwarding/ethtool.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/ethtool.sh
|
|
@@ -286,6 +286,8 @@ different_speeds_autoneg_on()
|
|
ethtool -s $h1 autoneg on
|
|
}
|
|
|
|
+skip_on_veth
|
|
+
|
|
trap cleanup EXIT
|
|
|
|
setup_prepare
|
|
diff --git a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh
|
|
index 072faa77f53bd..17f89c3b7c020 100755
|
|
--- a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh
|
|
@@ -108,6 +108,8 @@ no_cable()
|
|
ip link set dev $swp3 down
|
|
}
|
|
|
|
+skip_on_veth
|
|
+
|
|
setup_prepare
|
|
|
|
tests_run
|
|
diff --git a/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh b/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh
|
|
index eb9ec4a68f84b..7594bbb490292 100755
|
|
--- a/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh
|
|
@@ -99,6 +99,8 @@ test_stats_rx()
|
|
test_stats g2a rx
|
|
}
|
|
|
|
+skip_on_veth
|
|
+
|
|
trap cleanup EXIT
|
|
|
|
setup_prepare
|
|
diff --git a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
|
|
index 9f5b3e2e5e954..49fa94b53a1ca 100755
|
|
--- a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
|
|
@@ -14,6 +14,8 @@ ALL_TESTS="
|
|
NUM_NETIFS=4
|
|
source lib.sh
|
|
|
|
+require_command $TROUTE6
|
|
+
|
|
h1_create()
|
|
{
|
|
simple_if_init $h1 2001:1:1::2/64
|
|
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
|
|
index f4721f1b2886b..06027772cf79a 100755
|
|
--- a/tools/testing/selftests/net/forwarding/lib.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/lib.sh
|
|
@@ -30,6 +30,7 @@ REQUIRE_MZ=${REQUIRE_MZ:=yes}
|
|
REQUIRE_MTOOLS=${REQUIRE_MTOOLS:=no}
|
|
STABLE_MAC_ADDRS=${STABLE_MAC_ADDRS:=no}
|
|
TCPDUMP_EXTRA_FLAGS=${TCPDUMP_EXTRA_FLAGS:=}
|
|
+TROUTE6=${TROUTE6:=traceroute6}
|
|
|
|
relative_path="${BASH_SOURCE%/*}"
|
|
if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
|
|
@@ -137,6 +138,17 @@ check_locked_port_support()
|
|
fi
|
|
}
|
|
|
|
+skip_on_veth()
|
|
+{
|
|
+ local kind=$(ip -j -d link show dev ${NETIFS[p1]} |
|
|
+ jq -r '.[].linkinfo.info_kind')
|
|
+
|
|
+ if [[ $kind == veth ]]; then
|
|
+ echo "SKIP: Test cannot be run with veth pairs"
|
|
+ exit $ksft_skip
|
|
+ fi
|
|
+}
|
|
+
|
|
if [[ "$(id -u)" -ne 0 ]]; then
|
|
echo "SKIP: need root privileges"
|
|
exit $ksft_skip
|
|
@@ -199,6 +211,11 @@ create_netif_veth()
|
|
for ((i = 1; i <= NUM_NETIFS; ++i)); do
|
|
local j=$((i+1))
|
|
|
|
+ if [ -z ${NETIFS[p$i]} ]; then
|
|
+ echo "SKIP: Cannot create interface. Name not specified"
|
|
+ exit $ksft_skip
|
|
+ fi
|
|
+
|
|
ip link show dev ${NETIFS[p$i]} &> /dev/null
|
|
if [[ $? -ne 0 ]]; then
|
|
ip link add ${NETIFS[p$i]} type veth \
|
|
diff --git a/tools/testing/selftests/net/forwarding/settings b/tools/testing/selftests/net/forwarding/settings
|
|
new file mode 100644
|
|
index 0000000000000..e7b9417537fbc
|
|
--- /dev/null
|
|
+++ b/tools/testing/selftests/net/forwarding/settings
|
|
@@ -0,0 +1 @@
|
|
+timeout=0
|
|
diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
|
|
index 683711f41aa9b..b1daad19b01ec 100755
|
|
--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
|
|
@@ -52,8 +52,8 @@ match_dst_mac_test()
|
|
tc_check_packets "dev $h2 ingress" 101 1
|
|
check_fail $? "Matched on a wrong filter"
|
|
|
|
- tc_check_packets "dev $h2 ingress" 102 1
|
|
- check_err $? "Did not match on correct filter"
|
|
+ tc_check_packets "dev $h2 ingress" 102 0
|
|
+ check_fail $? "Did not match on correct filter"
|
|
|
|
tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
|
|
tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
|
|
@@ -78,8 +78,8 @@ match_src_mac_test()
|
|
tc_check_packets "dev $h2 ingress" 101 1
|
|
check_fail $? "Matched on a wrong filter"
|
|
|
|
- tc_check_packets "dev $h2 ingress" 102 1
|
|
- check_err $? "Did not match on correct filter"
|
|
+ tc_check_packets "dev $h2 ingress" 102 0
|
|
+ check_fail $? "Did not match on correct filter"
|
|
|
|
tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
|
|
tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
|
|
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
|
|
index a3108c3cff471..7b20878a1af59 100755
|
|
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
|
|
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
|
|
@@ -650,6 +650,7 @@ pm_nl_del_endpoint()
|
|
local addr=$3
|
|
|
|
if [ $ip_mptcp -eq 1 ]; then
|
|
+ [ $id -ne 0 ] && addr=''
|
|
ip -n $ns mptcp endpoint delete id $id $addr
|
|
else
|
|
ip netns exec $ns ./pm_nl_ctl del $id $addr
|
|
@@ -740,10 +741,11 @@ pm_nl_check_endpoint()
|
|
fi
|
|
|
|
if [ $ip_mptcp -eq 1 ]; then
|
|
+ # get line and trim trailing whitespace
|
|
line=$(ip -n $ns mptcp endpoint show $id)
|
|
+ line="${line% }"
|
|
# the dump order is: address id flags port dev
|
|
- expected_line="$addr"
|
|
- [ -n "$addr" ] && expected_line="$expected_line $addr"
|
|
+ [ -n "$addr" ] && expected_line="$addr"
|
|
expected_line="$expected_line $id"
|
|
[ -n "$_flags" ] && expected_line="$expected_line ${_flags//","/" "}"
|
|
[ -n "$dev" ] && expected_line="$expected_line $dev"
|
|
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
|
|
index 3a173e184566c..cb5a9fc629fed 100644
|
|
--- a/tools/testing/selftests/rseq/Makefile
|
|
+++ b/tools/testing/selftests/rseq/Makefile
|
|
@@ -4,8 +4,10 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
|
|
CLANG_FLAGS += -no-integrated-as
|
|
endif
|
|
|
|
+top_srcdir = ../../../..
|
|
+
|
|
CFLAGS += -O2 -Wall -g -I./ $(KHDR_INCLUDES) -L$(OUTPUT) -Wl,-rpath=./ \
|
|
- $(CLANG_FLAGS)
|
|
+ $(CLANG_FLAGS) -I$(top_srcdir)/tools/include
|
|
LDLIBS += -lpthread -ldl
|
|
|
|
# Own dependencies because we only want to build against 1st prerequisite, but
|
|
diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
|
|
index b736a5169aad0..e20191fb40d49 100644
|
|
--- a/tools/testing/selftests/rseq/rseq.c
|
|
+++ b/tools/testing/selftests/rseq/rseq.c
|
|
@@ -29,6 +29,8 @@
|
|
#include <dlfcn.h>
|
|
#include <stddef.h>
|
|
|
|
+#include <linux/compiler.h>
|
|
+
|
|
#include "../kselftest.h"
|
|
#include "rseq.h"
|
|
|