mirror of
https://github.com/armbian/build.git
synced 2025-08-18 09:06:58 +02:00
10485 lines
317 KiB
Diff
10485 lines
317 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index d6bc9f597e8b8..bc4adb561a7cf 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 6
|
|
PATCHLEVEL = 1
|
|
-SUBLEVEL = 79
|
|
+SUBLEVEL = 80
|
|
EXTRAVERSION =
|
|
NAME = Curry Ramen
|
|
|
|
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
|
|
index 95e731676cea4..961daac653261 100644
|
|
--- a/arch/arm/mach-ep93xx/core.c
|
|
+++ b/arch/arm/mach-ep93xx/core.c
|
|
@@ -339,6 +339,7 @@ static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
|
|
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
|
GPIO_LOOKUP_IDX("G", 0, NULL, 1,
|
|
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
|
+ { }
|
|
},
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
|
|
index bfa3580429d10..61f0186447dad 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
|
|
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
|
|
@@ -607,6 +607,7 @@ spi0: spi@ff1d0000 {
|
|
clock-names = "spiclk", "apb_pclk";
|
|
dmas = <&dmac 12>, <&dmac 13>;
|
|
dma-names = "tx", "rx";
|
|
+ num-cs = <2>;
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&spi0_clk &spi0_csn &spi0_miso &spi0_mosi>;
|
|
#address-cells = <1>;
|
|
@@ -622,6 +623,7 @@ spi1: spi@ff1d8000 {
|
|
clock-names = "spiclk", "apb_pclk";
|
|
dmas = <&dmac 14>, <&dmac 15>;
|
|
dma-names = "tx", "rx";
|
|
+ num-cs = <2>;
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&spi1_clk &spi1_csn0 &spi1_csn1 &spi1_miso &spi1_mosi>;
|
|
#address-cells = <1>;
|
|
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
|
|
index d720b6f7e5f9c..da18413712c04 100644
|
|
--- a/arch/arm64/include/asm/fpsimd.h
|
|
+++ b/arch/arm64/include/asm/fpsimd.h
|
|
@@ -343,6 +343,7 @@ extern void sme_alloc(struct task_struct *task, bool flush);
|
|
extern unsigned int sme_get_vl(void);
|
|
extern int sme_set_current_vl(unsigned long arg);
|
|
extern int sme_get_current_vl(void);
|
|
+extern void sme_suspend_exit(void);
|
|
|
|
/*
|
|
* Return how many bytes of memory are required to store the full SME
|
|
@@ -372,6 +373,7 @@ static inline int sme_max_vl(void) { return 0; }
|
|
static inline int sme_max_virtualisable_vl(void) { return 0; }
|
|
static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
|
|
static inline int sme_get_current_vl(void) { return -EINVAL; }
|
|
+static inline void sme_suspend_exit(void) { }
|
|
|
|
static inline size_t za_state_size(struct task_struct const *task)
|
|
{
|
|
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
|
|
index 8c226d79abdfc..59b5a16bab5d6 100644
|
|
--- a/arch/arm64/kernel/fpsimd.c
|
|
+++ b/arch/arm64/kernel/fpsimd.c
|
|
@@ -1347,6 +1347,20 @@ void __init sme_setup(void)
|
|
get_sme_default_vl());
|
|
}
|
|
|
|
+void sme_suspend_exit(void)
|
|
+{
|
|
+ u64 smcr = 0;
|
|
+
|
|
+ if (!system_supports_sme())
|
|
+ return;
|
|
+
|
|
+ if (system_supports_fa64())
|
|
+ smcr |= SMCR_ELx_FA64;
|
|
+
|
|
+ write_sysreg_s(smcr, SYS_SMCR_EL1);
|
|
+ write_sysreg_s(0, SYS_SMPRI_EL1);
|
|
+}
|
|
+
|
|
#endif /* CONFIG_ARM64_SME */
|
|
|
|
static void sve_init_regs(void)
|
|
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
|
|
index 8b02d310838f9..064d996cc55b2 100644
|
|
--- a/arch/arm64/kernel/suspend.c
|
|
+++ b/arch/arm64/kernel/suspend.c
|
|
@@ -11,6 +11,7 @@
|
|
#include <asm/daifflags.h>
|
|
#include <asm/debug-monitors.h>
|
|
#include <asm/exec.h>
|
|
+#include <asm/fpsimd.h>
|
|
#include <asm/mte.h>
|
|
#include <asm/memory.h>
|
|
#include <asm/mmu_context.h>
|
|
@@ -77,6 +78,8 @@ void notrace __cpu_suspend_exit(void)
|
|
*/
|
|
spectre_v4_enable_mitigation(NULL);
|
|
|
|
+ sme_suspend_exit();
|
|
+
|
|
/* Restore additional feature-specific configuration */
|
|
ptrauth_suspend_exit();
|
|
}
|
|
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
|
|
index 3c344e4cd4cad..092327665a6ef 100644
|
|
--- a/arch/arm64/kvm/vgic/vgic-its.c
|
|
+++ b/arch/arm64/kvm/vgic/vgic-its.c
|
|
@@ -462,6 +462,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
|
|
}
|
|
|
|
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
|
|
+ if (!irq)
|
|
+ continue;
|
|
+
|
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
|
irq->pending_latch = pendmask & (1U << bit_nr);
|
|
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
|
@@ -1427,6 +1430,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
|
|
|
|
for (i = 0; i < irq_count; i++) {
|
|
irq = vgic_get_irq(kvm, NULL, intids[i]);
|
|
+ if (!irq)
|
|
+ continue;
|
|
|
|
update_affinity(irq, vcpu2);
|
|
|
|
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
|
|
index e737dc8cd660c..fa3171f563274 100644
|
|
--- a/arch/loongarch/Kconfig
|
|
+++ b/arch/loongarch/Kconfig
|
|
@@ -9,6 +9,7 @@ config LOONGARCH
|
|
select ARCH_BINFMT_ELF_STATE
|
|
select ARCH_ENABLE_MEMORY_HOTPLUG
|
|
select ARCH_ENABLE_MEMORY_HOTREMOVE
|
|
+ select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
|
|
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
|
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
|
select ARCH_HAS_PTE_SPECIAL
|
|
@@ -80,6 +81,7 @@ config LOONGARCH
|
|
select GPIOLIB
|
|
select HAVE_ARCH_AUDITSYSCALL
|
|
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
|
+ select HAVE_ARCH_SECCOMP
|
|
select HAVE_ARCH_SECCOMP_FILTER
|
|
select HAVE_ARCH_TRACEHOOK
|
|
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
|
@@ -461,23 +463,6 @@ config PHYSICAL_START
|
|
specified in the "crashkernel=YM@XM" command line boot parameter
|
|
passed to the panic-ed kernel).
|
|
|
|
-config SECCOMP
|
|
- bool "Enable seccomp to safely compute untrusted bytecode"
|
|
- depends on PROC_FS
|
|
- default y
|
|
- help
|
|
- This kernel feature is useful for number crunching applications
|
|
- that may need to compute untrusted bytecode during their
|
|
- execution. By using pipes or other transports made available to
|
|
- the process as file descriptors supporting the read/write
|
|
- syscalls, it's possible to isolate those applications in
|
|
- their own address space using seccomp. Once seccomp is
|
|
- enabled via /proc/<pid>/seccomp, it cannot be disabled
|
|
- and the task is only allowed to execute a few safe syscalls
|
|
- defined by each seccomp mode.
|
|
-
|
|
- If unsure, say Y. Only embedded should say N here.
|
|
-
|
|
endmenu
|
|
|
|
config ARCH_SELECT_MEMORY_MODEL
|
|
@@ -495,10 +480,6 @@ config ARCH_SPARSEMEM_ENABLE
|
|
or have huge holes in the physical address space for other reasons.
|
|
See <file:Documentation/mm/numa.rst> for more.
|
|
|
|
-config ARCH_ENABLE_THP_MIGRATION
|
|
- def_bool y
|
|
- depends on TRANSPARENT_HUGEPAGE
|
|
-
|
|
config ARCH_MEMORY_PROBE
|
|
def_bool y
|
|
depends on MEMORY_HOTPLUG
|
|
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
|
|
index e0404df2c952f..18a2b37f4aea3 100644
|
|
--- a/arch/loongarch/kernel/smp.c
|
|
+++ b/arch/loongarch/kernel/smp.c
|
|
@@ -297,6 +297,7 @@ void play_dead(void)
|
|
addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
|
|
} while (addr == 0);
|
|
|
|
+ local_irq_disable();
|
|
init_fn = (void *)TO_CACHE(addr);
|
|
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
|
|
|
|
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
|
|
index 246c6a6b02614..5b778995d4483 100644
|
|
--- a/arch/mips/kernel/traps.c
|
|
+++ b/arch/mips/kernel/traps.c
|
|
@@ -2007,7 +2007,13 @@ unsigned long vi_handlers[64];
|
|
|
|
void reserve_exception_space(phys_addr_t addr, unsigned long size)
|
|
{
|
|
- memblock_reserve(addr, size);
|
|
+ /*
|
|
+ * reserve exception space on CPUs other than CPU0
|
|
+ * is too late, since memblock is unavailable when APs
|
|
+ * up
|
|
+ */
|
|
+ if (smp_processor_id() == 0)
|
|
+ memblock_reserve(addr, size);
|
|
}
|
|
|
|
void __init *set_except_vector(int n, void *addr)
|
|
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
|
|
index 1f6c776d80813..c67883487ecd3 100644
|
|
--- a/arch/parisc/kernel/processor.c
|
|
+++ b/arch/parisc/kernel/processor.c
|
|
@@ -171,7 +171,6 @@ static int __init processor_probe(struct parisc_device *dev)
|
|
p->cpu_num = cpu_info.cpu_num;
|
|
p->cpu_loc = cpu_info.cpu_loc;
|
|
|
|
- set_cpu_possible(cpuid, true);
|
|
store_cpu_topology(cpuid);
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -466,13 +465,6 @@ static struct parisc_driver cpu_driver __refdata = {
|
|
*/
|
|
void __init processor_init(void)
|
|
{
|
|
- unsigned int cpu;
|
|
-
|
|
reset_cpu_topology();
|
|
-
|
|
- /* reset possible mask. We will mark those which are possible. */
|
|
- for_each_possible_cpu(cpu)
|
|
- set_cpu_possible(cpu, false);
|
|
-
|
|
register_parisc_driver(&cpu_driver);
|
|
}
|
|
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
|
|
index 2c99f9552b2f5..394c69fda399e 100644
|
|
--- a/arch/s390/pci/pci.c
|
|
+++ b/arch/s390/pci/pci.c
|
|
@@ -241,7 +241,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
|
/* combine single writes by using store-block insn */
|
|
void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
|
|
{
|
|
- zpci_memcpy_toio(to, from, count);
|
|
+ zpci_memcpy_toio(to, from, count * 8);
|
|
}
|
|
|
|
static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
|
|
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
|
|
index 2f123d4fb85b5..d3706de91a934 100644
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -222,6 +222,8 @@ extern void srso_alias_untrain_ret(void);
|
|
extern void entry_untrain_ret(void);
|
|
extern void entry_ibpb(void);
|
|
|
|
+extern void (*x86_return_thunk)(void);
|
|
+
|
|
#ifdef CONFIG_RETPOLINE
|
|
|
|
#define GEN(reg) \
|
|
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
|
|
index 6b8c93989aa31..69f85e2746119 100644
|
|
--- a/arch/x86/kernel/alternative.c
|
|
+++ b/arch/x86/kernel/alternative.c
|
|
@@ -536,6 +536,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
|
|
}
|
|
|
|
#ifdef CONFIG_RETHUNK
|
|
+
|
|
/*
|
|
* Rewrite the compiler generated return thunk tail-calls.
|
|
*
|
|
@@ -551,14 +552,18 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
|
|
{
|
|
int i = 0;
|
|
|
|
- if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
|
- return -1;
|
|
+ if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
|
|
+ if (x86_return_thunk == __x86_return_thunk)
|
|
+ return -1;
|
|
|
|
- bytes[i++] = RET_INSN_OPCODE;
|
|
+ i = JMP32_INSN_SIZE;
|
|
+ __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
|
|
+ } else {
|
|
+ bytes[i++] = RET_INSN_OPCODE;
|
|
+ }
|
|
|
|
for (; i < insn->length;)
|
|
bytes[i++] = INT3_INSN_OPCODE;
|
|
-
|
|
return i;
|
|
}
|
|
|
|
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
|
|
index e07234ec7e237..ec51ce713dea4 100644
|
|
--- a/arch/x86/kernel/ftrace.c
|
|
+++ b/arch/x86/kernel/ftrace.c
|
|
@@ -361,7 +361,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
|
|
|
ip = trampoline + size;
|
|
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
|
- __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, &__x86_return_thunk, JMP32_INSN_SIZE);
|
|
+ __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
|
|
else
|
|
memcpy(ip, retq, sizeof(retq));
|
|
|
|
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
|
|
index 3fbb491688275..b32134b093ec8 100644
|
|
--- a/arch/x86/kernel/static_call.c
|
|
+++ b/arch/x86/kernel/static_call.c
|
|
@@ -80,7 +80,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
|
|
|
|
case RET:
|
|
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
|
- code = text_gen_insn(JMP32_INSN_OPCODE, insn, &__x86_return_thunk);
|
|
+ code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
|
|
else
|
|
code = &retinsn;
|
|
break;
|
|
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
|
|
index aa39d678fe81d..dae5c952735c7 100644
|
|
--- a/arch/x86/mm/numa.c
|
|
+++ b/arch/x86/mm/numa.c
|
|
@@ -961,7 +961,7 @@ static int __init cmp_memblk(const void *a, const void *b)
|
|
const struct numa_memblk *ma = *(const struct numa_memblk **)a;
|
|
const struct numa_memblk *mb = *(const struct numa_memblk **)b;
|
|
|
|
- return ma->start - mb->start;
|
|
+ return (ma->start > mb->start) - (ma->start < mb->start);
|
|
}
|
|
|
|
static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
|
|
@@ -971,14 +971,12 @@ static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
|
|
* @start: address to begin fill
|
|
* @end: address to end fill
|
|
*
|
|
- * Find and extend numa_meminfo memblks to cover the @start-@end
|
|
- * physical address range, such that the first memblk includes
|
|
- * @start, the last memblk includes @end, and any gaps in between
|
|
- * are filled.
|
|
+ * Find and extend numa_meminfo memblks to cover the physical
|
|
+ * address range @start-@end
|
|
*
|
|
* RETURNS:
|
|
* 0 : Success
|
|
- * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
|
|
+ * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
|
|
*/
|
|
|
|
int __init numa_fill_memblks(u64 start, u64 end)
|
|
@@ -990,17 +988,14 @@ int __init numa_fill_memblks(u64 start, u64 end)
|
|
|
|
/*
|
|
* Create a list of pointers to numa_meminfo memblks that
|
|
- * overlap start, end. Exclude (start == bi->end) since
|
|
- * end addresses in both a CFMWS range and a memblk range
|
|
- * are exclusive.
|
|
- *
|
|
- * This list of pointers is used to make in-place changes
|
|
- * that fill out the numa_meminfo memblks.
|
|
+ * overlap start, end. The list is used to make in-place
|
|
+ * changes that fill out the numa_meminfo memblks.
|
|
*/
|
|
for (int i = 0; i < mi->nr_blks; i++) {
|
|
struct numa_memblk *bi = &mi->blk[i];
|
|
|
|
- if (start < bi->end && end >= bi->start) {
|
|
+ if (memblock_addrs_overlap(start, end - start, bi->start,
|
|
+ bi->end - bi->start)) {
|
|
blk[count] = &mi->blk[i];
|
|
count++;
|
|
}
|
|
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
|
|
index b69aee6245e4a..7913440c0fd46 100644
|
|
--- a/arch/x86/net/bpf_jit_comp.c
|
|
+++ b/arch/x86/net/bpf_jit_comp.c
|
|
@@ -432,7 +432,7 @@ static void emit_return(u8 **pprog, u8 *ip)
|
|
u8 *prog = *pprog;
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
|
|
- emit_jump(&prog, &__x86_return_thunk, ip);
|
|
+ emit_jump(&prog, x86_return_thunk, ip);
|
|
} else {
|
|
EMIT1(0xC3); /* ret */
|
|
if (IS_ENABLED(CONFIG_SLS))
|
|
diff --git a/block/blk-map.c b/block/blk-map.c
|
|
index 66da9e2b19abf..b337ae347bfa3 100644
|
|
--- a/block/blk-map.c
|
|
+++ b/block/blk-map.c
|
|
@@ -203,12 +203,19 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
|
/*
|
|
* success
|
|
*/
|
|
- if ((iov_iter_rw(iter) == WRITE &&
|
|
- (!map_data || !map_data->null_mapped)) ||
|
|
- (map_data && map_data->from_user)) {
|
|
+ if (iov_iter_rw(iter) == WRITE &&
|
|
+ (!map_data || !map_data->null_mapped)) {
|
|
ret = bio_copy_from_iter(bio, iter);
|
|
if (ret)
|
|
goto cleanup;
|
|
+ } else if (map_data && map_data->from_user) {
|
|
+ struct iov_iter iter2 = *iter;
|
|
+
|
|
+ /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
|
|
+ iter2.data_source = ITER_SOURCE;
|
|
+ ret = bio_copy_from_iter(bio, &iter2);
|
|
+ if (ret)
|
|
+ goto cleanup;
|
|
} else {
|
|
if (bmd->is_our_pages)
|
|
zero_fill_bio(bio);
|
|
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
|
|
index 805645efb3ccf..1790a2ecb9fac 100644
|
|
--- a/drivers/ata/ahci.c
|
|
+++ b/drivers/ata/ahci.c
|
|
@@ -49,6 +49,7 @@ enum {
|
|
enum board_ids {
|
|
/* board IDs by feature in alphabetical order */
|
|
board_ahci,
|
|
+ board_ahci_43bit_dma,
|
|
board_ahci_ign_iferr,
|
|
board_ahci_low_power,
|
|
board_ahci_no_debounce_delay,
|
|
@@ -129,6 +130,13 @@ static const struct ata_port_info ahci_port_info[] = {
|
|
.udma_mask = ATA_UDMA6,
|
|
.port_ops = &ahci_ops,
|
|
},
|
|
+ [board_ahci_43bit_dma] = {
|
|
+ AHCI_HFLAGS (AHCI_HFLAG_43BIT_ONLY),
|
|
+ .flags = AHCI_FLAG_COMMON,
|
|
+ .pio_mask = ATA_PIO4,
|
|
+ .udma_mask = ATA_UDMA6,
|
|
+ .port_ops = &ahci_ops,
|
|
+ },
|
|
[board_ahci_ign_iferr] = {
|
|
AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
|
|
.flags = AHCI_FLAG_COMMON,
|
|
@@ -597,14 +605,19 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
|
|
{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
|
|
|
|
- /* Asmedia */
|
|
- { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
|
|
- { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
|
|
- { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
|
|
- { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
|
|
- { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
|
|
- { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
|
|
- { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
|
|
+ /* ASMedia */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma }, /* ASM1060 */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma }, /* ASM1060 */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma }, /* ASM1061R */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma }, /* ASM1062R */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma }, /* ASM1062+JMB575 */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci }, /* ASM1062A */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci }, /* ASM1064 */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci }, /* ASM1164 */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x1165), board_ahci }, /* ASM1165 */
|
|
+ { PCI_VDEVICE(ASMEDIA, 0x1166), board_ahci }, /* ASM1166 */
|
|
|
|
/*
|
|
* Samsung SSDs found on some macbooks. NCQ times out if MSI is
|
|
@@ -658,6 +671,11 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
|
|
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
|
|
struct ahci_host_priv *hpriv)
|
|
{
|
|
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
|
|
+ dev_info(&pdev->dev, "ASM1166 has only six ports\n");
|
|
+ hpriv->saved_port_map = 0x3f;
|
|
+ }
|
|
+
|
|
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
|
|
dev_info(&pdev->dev, "JMB361 has only one port\n");
|
|
hpriv->saved_port_map = 1;
|
|
@@ -944,11 +962,20 @@ static int ahci_pci_device_resume(struct device *dev)
|
|
|
|
#endif /* CONFIG_PM */
|
|
|
|
-static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
|
|
+static int ahci_configure_dma_masks(struct pci_dev *pdev,
|
|
+ struct ahci_host_priv *hpriv)
|
|
{
|
|
- const int dma_bits = using_dac ? 64 : 32;
|
|
+ int dma_bits;
|
|
int rc;
|
|
|
|
+ if (hpriv->cap & HOST_CAP_64) {
|
|
+ dma_bits = 64;
|
|
+ if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY)
|
|
+ dma_bits = 43;
|
|
+ } else {
|
|
+ dma_bits = 32;
|
|
+ }
|
|
+
|
|
/*
|
|
* If the device fixup already set the dma_mask to some non-standard
|
|
* value, don't extend it here. This happens on STA2X11, for example.
|
|
@@ -1921,7 +1948,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
ahci_gtf_filter_workaround(host);
|
|
|
|
/* initialize adapter */
|
|
- rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
|
|
+ rc = ahci_configure_dma_masks(pdev, hpriv);
|
|
if (rc)
|
|
return rc;
|
|
|
|
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
|
|
index ff8e6ae1c6362..f9c5906a8afa8 100644
|
|
--- a/drivers/ata/ahci.h
|
|
+++ b/drivers/ata/ahci.h
|
|
@@ -247,6 +247,7 @@ enum {
|
|
AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
|
|
suspend/resume */
|
|
AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
|
|
+ AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */
|
|
|
|
/* ap->flags bits */
|
|
|
|
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
|
|
index cb24ecf36fafe..50e07ea60e45c 100644
|
|
--- a/drivers/ata/ahci_ceva.c
|
|
+++ b/drivers/ata/ahci_ceva.c
|
|
@@ -88,7 +88,6 @@ struct ceva_ahci_priv {
|
|
u32 axicc;
|
|
bool is_cci_enabled;
|
|
int flags;
|
|
- struct reset_control *rst;
|
|
};
|
|
|
|
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
|
|
@@ -189,6 +188,60 @@ static struct scsi_host_template ahci_platform_sht = {
|
|
AHCI_SHT(DRV_NAME),
|
|
};
|
|
|
|
+static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
|
|
+{
|
|
+ int rc, i;
|
|
+
|
|
+ rc = ahci_platform_enable_regulators(hpriv);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+
|
|
+ rc = ahci_platform_enable_clks(hpriv);
|
|
+ if (rc)
|
|
+ goto disable_regulator;
|
|
+
|
|
+ /* Assert the controller reset */
|
|
+ rc = ahci_platform_assert_rsts(hpriv);
|
|
+ if (rc)
|
|
+ goto disable_clks;
|
|
+
|
|
+ for (i = 0; i < hpriv->nports; i++) {
|
|
+ rc = phy_init(hpriv->phys[i]);
|
|
+ if (rc)
|
|
+ goto disable_rsts;
|
|
+ }
|
|
+
|
|
+ /* De-assert the controller reset */
|
|
+ ahci_platform_deassert_rsts(hpriv);
|
|
+
|
|
+ for (i = 0; i < hpriv->nports; i++) {
|
|
+ rc = phy_power_on(hpriv->phys[i]);
|
|
+ if (rc) {
|
|
+ phy_exit(hpriv->phys[i]);
|
|
+ goto disable_phys;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+disable_rsts:
|
|
+ ahci_platform_deassert_rsts(hpriv);
|
|
+
|
|
+disable_phys:
|
|
+ while (--i >= 0) {
|
|
+ phy_power_off(hpriv->phys[i]);
|
|
+ phy_exit(hpriv->phys[i]);
|
|
+ }
|
|
+
|
|
+disable_clks:
|
|
+ ahci_platform_disable_clks(hpriv);
|
|
+
|
|
+disable_regulator:
|
|
+ ahci_platform_disable_regulators(hpriv);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
static int ceva_ahci_probe(struct platform_device *pdev)
|
|
{
|
|
struct device_node *np = pdev->dev.of_node;
|
|
@@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev)
|
|
return -ENOMEM;
|
|
|
|
cevapriv->ahci_pdev = pdev;
|
|
-
|
|
- cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
|
|
- NULL);
|
|
- if (IS_ERR(cevapriv->rst))
|
|
- dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
|
|
- "failed to get reset\n");
|
|
-
|
|
hpriv = ahci_platform_get_resources(pdev, 0);
|
|
if (IS_ERR(hpriv))
|
|
return PTR_ERR(hpriv);
|
|
|
|
- if (!cevapriv->rst) {
|
|
- rc = ahci_platform_enable_resources(hpriv);
|
|
- if (rc)
|
|
- return rc;
|
|
- } else {
|
|
- int i;
|
|
+ hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev,
|
|
+ NULL);
|
|
+ if (IS_ERR(hpriv->rsts))
|
|
+ return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts),
|
|
+ "failed to get reset\n");
|
|
|
|
- rc = ahci_platform_enable_clks(hpriv);
|
|
- if (rc)
|
|
- return rc;
|
|
- /* Assert the controller reset */
|
|
- reset_control_assert(cevapriv->rst);
|
|
-
|
|
- for (i = 0; i < hpriv->nports; i++) {
|
|
- rc = phy_init(hpriv->phys[i]);
|
|
- if (rc)
|
|
- return rc;
|
|
- }
|
|
-
|
|
- /* De-assert the controller reset */
|
|
- reset_control_deassert(cevapriv->rst);
|
|
-
|
|
- for (i = 0; i < hpriv->nports; i++) {
|
|
- rc = phy_power_on(hpriv->phys[i]);
|
|
- if (rc) {
|
|
- phy_exit(hpriv->phys[i]);
|
|
- return rc;
|
|
- }
|
|
- }
|
|
- }
|
|
+ rc = ceva_ahci_platform_enable_resources(hpriv);
|
|
+ if (rc)
|
|
+ return rc;
|
|
|
|
if (of_property_read_bool(np, "ceva,broken-gen2"))
|
|
cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
|
|
@@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev)
|
|
if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
|
|
(u8 *)&cevapriv->pp2c[0], 4) < 0) {
|
|
dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
|
|
- return -EINVAL;
|
|
+ rc = -EINVAL;
|
|
+ goto disable_resources;
|
|
}
|
|
|
|
if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
|
|
(u8 *)&cevapriv->pp2c[1], 4) < 0) {
|
|
dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
|
|
- return -EINVAL;
|
|
+ rc = -EINVAL;
|
|
+ goto disable_resources;
|
|
}
|
|
|
|
/* Read OOB timing value for COMWAKE from device-tree*/
|
|
if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
|
|
(u8 *)&cevapriv->pp3c[0], 4) < 0) {
|
|
dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
|
|
- return -EINVAL;
|
|
+ rc = -EINVAL;
|
|
+ goto disable_resources;
|
|
}
|
|
|
|
if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
|
|
(u8 *)&cevapriv->pp3c[1], 4) < 0) {
|
|
dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
|
|
- return -EINVAL;
|
|
+ rc = -EINVAL;
|
|
+ goto disable_resources;
|
|
}
|
|
|
|
/* Read phy BURST timing value from device-tree */
|
|
if (of_property_read_u8_array(np, "ceva,p0-burst-params",
|
|
(u8 *)&cevapriv->pp4c[0], 4) < 0) {
|
|
dev_warn(dev, "ceva,p0-burst-params property not defined\n");
|
|
- return -EINVAL;
|
|
+ rc = -EINVAL;
|
|
+ goto disable_resources;
|
|
}
|
|
|
|
if (of_property_read_u8_array(np, "ceva,p1-burst-params",
|
|
(u8 *)&cevapriv->pp4c[1], 4) < 0) {
|
|
dev_warn(dev, "ceva,p1-burst-params property not defined\n");
|
|
- return -EINVAL;
|
|
+ rc = -EINVAL;
|
|
+ goto disable_resources;
|
|
}
|
|
|
|
/* Read phy RETRY interval timing value from device-tree */
|
|
if (of_property_read_u16_array(np, "ceva,p0-retry-params",
|
|
(u16 *)&cevapriv->pp5c[0], 2) < 0) {
|
|
dev_warn(dev, "ceva,p0-retry-params property not defined\n");
|
|
- return -EINVAL;
|
|
+ rc = -EINVAL;
|
|
+ goto disable_resources;
|
|
}
|
|
|
|
if (of_property_read_u16_array(np, "ceva,p1-retry-params",
|
|
(u16 *)&cevapriv->pp5c[1], 2) < 0) {
|
|
dev_warn(dev, "ceva,p1-retry-params property not defined\n");
|
|
- return -EINVAL;
|
|
+ rc = -EINVAL;
|
|
+ goto disable_resources;
|
|
}
|
|
|
|
/*
|
|
@@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev)
|
|
struct ahci_host_priv *hpriv = host->private_data;
|
|
int rc;
|
|
|
|
- rc = ahci_platform_enable_resources(hpriv);
|
|
+ rc = ceva_ahci_platform_enable_resources(hpriv);
|
|
if (rc)
|
|
return rc;
|
|
|
|
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
|
|
index fa2fc1953fc26..f14e56a5cff6b 100644
|
|
--- a/drivers/ata/libata-core.c
|
|
+++ b/drivers/ata/libata-core.c
|
|
@@ -2005,6 +2005,10 @@ void ata_dev_power_set_active(struct ata_device *dev)
|
|
struct ata_taskfile tf;
|
|
unsigned int err_mask;
|
|
|
|
+ /* If the device is already sleeping, do nothing. */
|
|
+ if (dev->flags & ATA_DFLAG_SLEEPING)
|
|
+ return;
|
|
+
|
|
/*
|
|
* Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
|
|
* if supported by the device.
|
|
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
|
|
index 128722cf6c3ca..827802e418dd3 100644
|
|
--- a/drivers/block/aoe/aoeblk.c
|
|
+++ b/drivers/block/aoe/aoeblk.c
|
|
@@ -333,6 +333,7 @@ aoeblk_gdalloc(void *vp)
|
|
struct gendisk *gd;
|
|
mempool_t *mp;
|
|
struct blk_mq_tag_set *set;
|
|
+ sector_t ssize;
|
|
ulong flags;
|
|
int late = 0;
|
|
int err;
|
|
@@ -395,7 +396,7 @@ aoeblk_gdalloc(void *vp)
|
|
gd->minors = AOE_PARTITIONS;
|
|
gd->fops = &aoe_bdops;
|
|
gd->private_data = d;
|
|
- set_capacity(gd, d->ssize);
|
|
+ ssize = d->ssize;
|
|
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
|
|
d->aoemajor, d->aoeminor);
|
|
|
|
@@ -404,6 +405,8 @@ aoeblk_gdalloc(void *vp)
|
|
|
|
spin_unlock_irqrestore(&d->lock, flags);
|
|
|
|
+ set_capacity(gd, ssize);
|
|
+
|
|
err = device_add_disk(NULL, gd, aoe_attr_groups);
|
|
if (err)
|
|
goto out_disk_cleanup;
|
|
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
|
|
index 3124837aa406f..505026f0025c7 100644
|
|
--- a/drivers/block/virtio_blk.c
|
|
+++ b/drivers/block/virtio_blk.c
|
|
@@ -1206,14 +1206,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
|
|
{
|
|
struct virtio_blk *vblk = vdev->priv;
|
|
|
|
+ /* Ensure no requests in virtqueues before deleting vqs. */
|
|
+ blk_mq_freeze_queue(vblk->disk->queue);
|
|
+
|
|
/* Ensure we don't receive any more interrupts */
|
|
virtio_reset_device(vdev);
|
|
|
|
/* Make sure no work handler is accessing the device. */
|
|
flush_work(&vblk->config_work);
|
|
|
|
- blk_mq_quiesce_queue(vblk->disk->queue);
|
|
-
|
|
vdev->config->del_vqs(vdev);
|
|
kfree(vblk->vqs);
|
|
|
|
@@ -1231,7 +1232,7 @@ static int virtblk_restore(struct virtio_device *vdev)
|
|
|
|
virtio_device_ready(vdev);
|
|
|
|
- blk_mq_unquiesce_queue(vblk->disk->queue);
|
|
+ blk_mq_unfreeze_queue(vblk->disk->queue);
|
|
return 0;
|
|
}
|
|
#endif
|
|
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
|
|
index 168195672e2e1..d2df97cfcb294 100644
|
|
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
|
|
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
|
|
@@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
|
|
}
|
|
|
|
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
|
|
- struct virtio_crypto_ctrl_header *header, void *para,
|
|
+ struct virtio_crypto_ctrl_header *header,
|
|
+ struct virtio_crypto_akcipher_session_para *para,
|
|
const uint8_t *key, unsigned int keylen)
|
|
{
|
|
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
|
|
@@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
|
|
|
|
ctrl = &vc_ctrl_req->ctrl;
|
|
memcpy(&ctrl->header, header, sizeof(ctrl->header));
|
|
- memcpy(&ctrl->u, para, sizeof(ctrl->u));
|
|
+ memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
|
|
input = &vc_ctrl_req->input;
|
|
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
|
|
|
|
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
|
|
index 003a44132418a..5584af15300a8 100644
|
|
--- a/drivers/cxl/core/pci.c
|
|
+++ b/drivers/cxl/core/pci.c
|
|
@@ -376,9 +376,9 @@ static bool __cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
|
|
allowed++;
|
|
}
|
|
|
|
- if (!allowed) {
|
|
- cxl_set_mem_enable(cxlds, 0);
|
|
- info->mem_enabled = 0;
|
|
+ if (!allowed && info->mem_enabled) {
|
|
+ dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
|
|
+ return -ENXIO;
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
|
|
index 4cf8da77bdd91..cac4532fe23a9 100644
|
|
--- a/drivers/dma/apple-admac.c
|
|
+++ b/drivers/dma/apple-admac.c
|
|
@@ -56,6 +56,8 @@
|
|
|
|
#define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
|
|
|
|
+#define BUS_WIDTH_WORD_SIZE GENMASK(3, 0)
|
|
+#define BUS_WIDTH_FRAME_SIZE GENMASK(7, 4)
|
|
#define BUS_WIDTH_8BIT 0x00
|
|
#define BUS_WIDTH_16BIT 0x01
|
|
#define BUS_WIDTH_32BIT 0x02
|
|
@@ -739,7 +741,8 @@ static int admac_device_config(struct dma_chan *chan,
|
|
struct admac_data *ad = adchan->host;
|
|
bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
|
|
int wordsize = 0;
|
|
- u32 bus_width = 0;
|
|
+ u32 bus_width = readl_relaxed(ad->base + REG_BUS_WIDTH(adchan->no)) &
|
|
+ ~(BUS_WIDTH_WORD_SIZE | BUS_WIDTH_FRAME_SIZE);
|
|
|
|
switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
|
|
case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
|
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
|
|
index 69385f32e2756..f383f219ed008 100644
|
|
--- a/drivers/dma/fsl-qdma.c
|
|
+++ b/drivers/dma/fsl-qdma.c
|
|
@@ -805,7 +805,7 @@ fsl_qdma_irq_init(struct platform_device *pdev,
|
|
int i;
|
|
int cpu;
|
|
int ret;
|
|
- char irq_name[20];
|
|
+ char irq_name[32];
|
|
|
|
fsl_qdma->error_irq =
|
|
platform_get_irq_byname(pdev, "qdma-error");
|
|
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
|
|
index 9c121a4b33ad8..f97d80343aea4 100644
|
|
--- a/drivers/dma/sh/shdma.h
|
|
+++ b/drivers/dma/sh/shdma.h
|
|
@@ -25,7 +25,7 @@ struct sh_dmae_chan {
|
|
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
|
|
int xmit_shift; /* log_2(bytes_per_xfer) */
|
|
void __iomem *base;
|
|
- char dev_id[16]; /* unique name per DMAC of channel */
|
|
+ char dev_id[32]; /* unique name per DMAC of channel */
|
|
int pm_error;
|
|
dma_addr_t slave_addr;
|
|
};
|
|
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
|
|
index 7ec6e5d728b03..9212ac9f978f2 100644
|
|
--- a/drivers/dma/ti/edma.c
|
|
+++ b/drivers/dma/ti/edma.c
|
|
@@ -2413,6 +2413,11 @@ static int edma_probe(struct platform_device *pdev)
|
|
if (irq > 0) {
|
|
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
|
|
dev_name(dev));
|
|
+ if (!irq_name) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_disable_pm;
|
|
+ }
|
|
+
|
|
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
|
|
ecc);
|
|
if (ret) {
|
|
@@ -2429,6 +2434,11 @@ static int edma_probe(struct platform_device *pdev)
|
|
if (irq > 0) {
|
|
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
|
|
dev_name(dev));
|
|
+ if (!irq_name) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_disable_pm;
|
|
+ }
|
|
+
|
|
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
|
|
ecc);
|
|
if (ret) {
|
|
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
|
|
index 6ac5ff20a2fe2..8aaa7fcb2630d 100644
|
|
--- a/drivers/firewire/core-card.c
|
|
+++ b/drivers/firewire/core-card.c
|
|
@@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
|
|
*/
|
|
card->bm_generation = generation;
|
|
|
|
- if (root_device == NULL) {
|
|
+ if (card->gap_count == 0) {
|
|
+ /*
|
|
+ * If self IDs have inconsistent gap counts, do a
|
|
+ * bus reset ASAP. The config rom read might never
|
|
+ * complete, so don't wait for it. However, still
|
|
+ * send a PHY configuration packet prior to the
|
|
+ * bus reset. The PHY configuration packet might
|
|
+ * fail, but 1394-2008 8.4.5.2 explicitly permits
|
|
+ * it in this case, so it should be safe to try.
|
|
+ */
|
|
+ new_root_id = local_id;
|
|
+ /*
|
|
+ * We must always send a bus reset if the gap count
|
|
+ * is inconsistent, so bypass the 5-reset limit.
|
|
+ */
|
|
+ card->bm_retries = 0;
|
|
+ } else if (root_device == NULL) {
|
|
/*
|
|
* Either link_on is false, or we failed to read the
|
|
* config rom. In either case, pick another root.
|
|
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
|
|
index 7c48c380d722c..1995f0a2e0fc0 100644
|
|
--- a/drivers/firmware/efi/arm-runtime.c
|
|
+++ b/drivers/firmware/efi/arm-runtime.c
|
|
@@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void)
|
|
efi_memory_desc_t *md;
|
|
|
|
for_each_efi_memory_desc(md) {
|
|
- int md_size = md->num_pages << EFI_PAGE_SHIFT;
|
|
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
|
|
struct resource *res;
|
|
|
|
if (!(md->attribute & EFI_MEMORY_SP))
|
|
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
|
|
index 2fd770b499a35..ff9791ce2e156 100644
|
|
--- a/drivers/firmware/efi/efi-init.c
|
|
+++ b/drivers/firmware/efi/efi-init.c
|
|
@@ -116,15 +116,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
|
|
case EFI_BOOT_SERVICES_DATA:
|
|
case EFI_CONVENTIONAL_MEMORY:
|
|
case EFI_PERSISTENT_MEMORY:
|
|
- /*
|
|
- * Special purpose memory is 'soft reserved', which means it
|
|
- * is set aside initially, but can be hotplugged back in or
|
|
- * be assigned to the dax driver after boot.
|
|
- */
|
|
- if (efi_soft_reserve_enabled() &&
|
|
- (md->attribute & EFI_MEMORY_SP))
|
|
- return false;
|
|
-
|
|
/*
|
|
* According to the spec, these regions are no longer reserved
|
|
* after calling ExitBootServices(). However, we can only use
|
|
@@ -169,6 +160,16 @@ static __init void reserve_regions(void)
|
|
size = npages << PAGE_SHIFT;
|
|
|
|
if (is_memory(md)) {
|
|
+ /*
|
|
+ * Special purpose memory is 'soft reserved', which
|
|
+ * means it is set aside initially. Don't add a memblock
|
|
+ * for it now so that it can be hotplugged back in or
|
|
+ * be assigned to the dax driver after boot.
|
|
+ */
|
|
+ if (efi_soft_reserve_enabled() &&
|
|
+ (md->attribute & EFI_MEMORY_SP))
|
|
+ continue;
|
|
+
|
|
early_init_dt_add_memory_arch(paddr, size);
|
|
|
|
if (!is_usable_memory(md))
|
|
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
|
|
index ef5045a53ce09..b6e1dcb98a64c 100644
|
|
--- a/drivers/firmware/efi/libstub/Makefile
|
|
+++ b/drivers/firmware/efi/libstub/Makefile
|
|
@@ -25,7 +25,7 @@ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
|
|
-fno-builtin -fpic \
|
|
$(call cc-option,-mno-single-pic-base)
|
|
cflags-$(CONFIG_RISCV) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
|
|
- -fpic
|
|
+ -fpic -mno-relax
|
|
cflags-$(CONFIG_LOONGARCH) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
|
|
-fpie
|
|
|
|
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
|
|
index d0daacd2c903f..6b142aa35389e 100644
|
|
--- a/drivers/firmware/efi/riscv-runtime.c
|
|
+++ b/drivers/firmware/efi/riscv-runtime.c
|
|
@@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void)
|
|
efi_memory_desc_t *md;
|
|
|
|
for_each_efi_memory_desc(md) {
|
|
- int md_size = md->num_pages << EFI_PAGE_SHIFT;
|
|
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
|
|
struct resource *res;
|
|
|
|
if (!(md->attribute & EFI_MEMORY_SP))
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
|
|
index c46c6fbd235e8..e636c7850f777 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
|
|
@@ -999,6 +999,8 @@ struct amdgpu_device {
|
|
bool in_s3;
|
|
bool in_s4;
|
|
bool in_s0ix;
|
|
+ /* indicate amdgpu suspension status */
|
|
+ bool suspend_complete;
|
|
|
|
enum pp_mp1_state mp1_state;
|
|
struct amdgpu_doorbell_index doorbell_index;
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
|
index b9983ca99eb7d..f24c3a20e901d 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
|
@@ -2414,6 +2414,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
|
|
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
|
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
|
|
|
+ adev->suspend_complete = false;
|
|
if (amdgpu_acpi_is_s0ix_active(adev))
|
|
adev->in_s0ix = true;
|
|
else if (amdgpu_acpi_is_s3_active(adev))
|
|
@@ -2428,6 +2429,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
|
|
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
|
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
|
|
|
+ adev->suspend_complete = true;
|
|
if (amdgpu_acpi_should_gpu_reset(adev))
|
|
return amdgpu_asic_reset(adev);
|
|
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
|
index 84ca601f7d5f3..195b298923543 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
|
@@ -3064,6 +3064,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
|
|
|
|
gfx_v9_0_cp_gfx_enable(adev, true);
|
|
|
|
+ /* Now only limit the quirk on the APU gfx9 series and already
|
|
+ * confirmed that the APU gfx10/gfx11 needn't such update.
|
|
+ */
|
|
+ if (adev->flags & AMD_IS_APU &&
|
|
+ adev->in_s3 && !adev->suspend_complete) {
|
|
+ DRM_INFO(" Will skip the CSB packet resubmit\n");
|
|
+ return 0;
|
|
+ }
|
|
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
|
|
if (r) {
|
|
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
|
|
index 811dd3ea63620..489c89465c78b 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
|
|
@@ -1285,10 +1285,32 @@ static int soc15_common_suspend(void *handle)
|
|
return soc15_common_hw_fini(adev);
|
|
}
|
|
|
|
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
|
|
+{
|
|
+ u32 sol_reg;
|
|
+
|
|
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
|
+
|
|
+ /* Will reset for the following suspend abort cases.
|
|
+ * 1) Only reset limit on APU side, dGPU hasn't checked yet.
|
|
+ * 2) S3 suspend abort and TOS already launched.
|
|
+ */
|
|
+ if (adev->flags & AMD_IS_APU && adev->in_s3 &&
|
|
+ !adev->suspend_complete &&
|
|
+ sol_reg)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
static int soc15_common_resume(void *handle)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
+ if (soc15_need_reset_on_resume(adev)) {
|
|
+ dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
|
|
+ soc15_asic_reset(adev);
|
|
+ }
|
|
return soc15_common_hw_init(adev);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
index a826c92933199..da16048bf1004 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
@@ -2255,6 +2255,7 @@ static int dm_sw_fini(void *handle)
|
|
|
|
if (adev->dm.dmub_srv) {
|
|
dmub_srv_destroy(adev->dm.dmub_srv);
|
|
+ kfree(adev->dm.dmub_srv);
|
|
adev->dm.dmub_srv = NULL;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
|
|
index da0145bc104a8..8f2737075dc2f 100644
|
|
--- a/drivers/gpu/drm/drm_syncobj.c
|
|
+++ b/drivers/gpu/drm/drm_syncobj.c
|
|
@@ -980,7 +980,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
|
|
uint64_t *points;
|
|
uint32_t signaled_count, i;
|
|
|
|
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
|
|
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
|
|
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
|
|
lockdep_assert_none_held_once();
|
|
|
|
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
|
|
@@ -1049,7 +1050,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
|
|
* fallthough and try a 0 timeout wait!
|
|
*/
|
|
|
|
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
|
|
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
|
|
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
|
|
for (i = 0; i < count; ++i)
|
|
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
|
|
}
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
|
|
index 19188683c8fca..8c2bf1c16f2a9 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
|
|
@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
|
|
return (void *)fw;
|
|
}
|
|
|
|
+static void
|
|
+shadow_fw_release(void *fw)
|
|
+{
|
|
+ release_firmware(fw);
|
|
+}
|
|
+
|
|
static const struct nvbios_source
|
|
shadow_fw = {
|
|
.name = "firmware",
|
|
.init = shadow_fw_init,
|
|
- .fini = (void(*)(void *))release_firmware,
|
|
+ .fini = shadow_fw_release,
|
|
.read = shadow_fw_read,
|
|
.rw = false,
|
|
};
|
|
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
|
|
index 86affe987a1cb..393b97b4a991f 100644
|
|
--- a/drivers/gpu/drm/ttm/ttm_pool.c
|
|
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
|
|
@@ -383,7 +383,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
|
|
enum ttm_caching caching,
|
|
pgoff_t start_page, pgoff_t end_page)
|
|
{
|
|
- struct page **pages = tt->pages;
|
|
+ struct page **pages = &tt->pages[start_page];
|
|
unsigned int order;
|
|
pgoff_t i, nr;
|
|
|
|
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
|
|
index 59344ad62822d..c0aa6bfa66b24 100644
|
|
--- a/drivers/hwmon/coretemp.c
|
|
+++ b/drivers/hwmon/coretemp.c
|
|
@@ -40,7 +40,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
|
|
|
|
#define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
|
|
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
|
|
-#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
|
|
+#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */
|
|
#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
|
|
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
|
|
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
|
|
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
|
|
index fc70920c4ddab..0c203c614197c 100644
|
|
--- a/drivers/i2c/busses/i2c-imx.c
|
|
+++ b/drivers/i2c/busses/i2c-imx.c
|
|
@@ -804,6 +804,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
|
|
ctl &= ~I2CR_MTX;
|
|
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
|
|
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
|
|
+
|
|
+ /* flag the last byte as processed */
|
|
+ i2c_imx_slave_event(i2c_imx,
|
|
+ I2C_SLAVE_READ_PROCESSED, &value);
|
|
+
|
|
i2c_imx_slave_finish_op(i2c_imx);
|
|
return IRQ_HANDLED;
|
|
}
|
|
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
|
|
index 4ed8814efde6f..6ed0568747eaa 100644
|
|
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
|
|
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
|
|
@@ -1710,7 +1710,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
|
|
switch (srq_attr_mask) {
|
|
case IB_SRQ_MAX_WR:
|
|
/* SRQ resize is not supported */
|
|
- break;
|
|
+ return -EINVAL;
|
|
case IB_SRQ_LIMIT:
|
|
/* Change the SRQ threshold */
|
|
if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
|
|
@@ -1725,13 +1725,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
|
|
/* On success, update the shadow */
|
|
srq->srq_limit = srq_attr->srq_limit;
|
|
/* No need to Build and send response back to udata */
|
|
- break;
|
|
+ return 0;
|
|
default:
|
|
ibdev_err(&rdev->ibdev,
|
|
"Unsupported srq_attr_mask 0x%x", srq_attr_mask);
|
|
return -EINVAL;
|
|
}
|
|
- return 0;
|
|
}
|
|
|
|
int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
|
|
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
|
|
index 51ae58c02b15c..802b0e5801a7d 100644
|
|
--- a/drivers/infiniband/hw/hfi1/pio.c
|
|
+++ b/drivers/infiniband/hw/hfi1/pio.c
|
|
@@ -2089,7 +2089,7 @@ int init_credit_return(struct hfi1_devdata *dd)
|
|
"Unable to allocate credit return DMA range for NUMA %d\n",
|
|
i);
|
|
ret = -ENOMEM;
|
|
- goto done;
|
|
+ goto free_cr_base;
|
|
}
|
|
}
|
|
set_dev_node(&dd->pcidev->dev, dd->node);
|
|
@@ -2097,6 +2097,10 @@ int init_credit_return(struct hfi1_devdata *dd)
|
|
ret = 0;
|
|
done:
|
|
return ret;
|
|
+
|
|
+free_cr_base:
|
|
+ free_credit_return(dd);
|
|
+ goto done;
|
|
}
|
|
|
|
void free_credit_return(struct hfi1_devdata *dd)
|
|
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
|
|
index 26c62162759ba..969c5c3ab859e 100644
|
|
--- a/drivers/infiniband/hw/hfi1/sdma.c
|
|
+++ b/drivers/infiniband/hw/hfi1/sdma.c
|
|
@@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
|
{
|
|
int rval = 0;
|
|
|
|
- if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
|
|
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
|
|
rval = _extend_sdma_tx_descs(dd, tx);
|
|
if (rval) {
|
|
__sdma_txclean(dd, tx);
|
|
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
|
|
index ad54260cb58c9..ebe98fa2b1cd2 100644
|
|
--- a/drivers/infiniband/hw/irdma/defs.h
|
|
+++ b/drivers/infiniband/hw/irdma/defs.h
|
|
@@ -345,6 +345,7 @@ enum irdma_cqp_op_type {
|
|
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
|
|
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
|
|
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
|
|
+#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
|
|
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
|
|
#define IRDMA_AE_RESET_SENT 0x0601
|
|
#define IRDMA_AE_TERMINATE_SENT 0x0602
|
|
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
|
|
index 311a1138e838d..918a2d783141f 100644
|
|
--- a/drivers/infiniband/hw/irdma/hw.c
|
|
+++ b/drivers/infiniband/hw/irdma/hw.c
|
|
@@ -379,6 +379,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
|
|
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
|
|
case IRDMA_AE_LCE_QP_CATASTROPHIC:
|
|
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
|
|
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
|
|
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
|
|
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
|
|
default:
|
|
@@ -562,6 +563,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
|
|
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
|
|
irq_update_affinity_hint(msix_vec->irq, NULL);
|
|
free_irq(msix_vec->irq, dev_id);
|
|
+ if (rf == dev_id) {
|
|
+ tasklet_kill(&rf->dpc_tasklet);
|
|
+ } else {
|
|
+ struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
|
|
+
|
|
+ tasklet_kill(&iwceq->dpc_tasklet);
|
|
+ }
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
|
|
index 01faec6ea5285..42c671f209233 100644
|
|
--- a/drivers/infiniband/hw/irdma/verbs.c
|
|
+++ b/drivers/infiniband/hw/irdma/verbs.c
|
|
@@ -762,7 +762,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
|
|
|
|
if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
|
|
init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
|
|
- init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
|
|
+ init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
|
|
+ init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
|
|
+ init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
|
|
return -EINVAL;
|
|
|
|
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
|
|
@@ -2119,9 +2121,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
|
|
info.cq_base_pa = iwcq->kmem.pa;
|
|
}
|
|
|
|
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
|
|
- info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
|
|
- (u32)IRDMA_MAX_CQ_READ_THRESH);
|
|
+ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
|
|
+ (u32)IRDMA_MAX_CQ_READ_THRESH);
|
|
|
|
if (irdma_sc_cq_init(cq, &info)) {
|
|
ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
|
|
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
|
|
index d745ce9dc88aa..61755b5f3e20d 100644
|
|
--- a/drivers/infiniband/hw/qedr/verbs.c
|
|
+++ b/drivers/infiniband/hw/qedr/verbs.c
|
|
@@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
|
|
/* RQ - read access only (0) */
|
|
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
|
|
ureq.rq_len, true, 0, alloc_and_init);
|
|
- if (rc)
|
|
+ if (rc) {
|
|
+ ib_umem_release(qp->usq.umem);
|
|
+ qp->usq.umem = NULL;
|
|
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
|
|
+ qedr_free_pbl(dev, &qp->usq.pbl_info,
|
|
+ qp->usq.pbl_tbl);
|
|
+ } else {
|
|
+ kfree(qp->usq.pbl_tbl);
|
|
+ }
|
|
return rc;
|
|
+ }
|
|
}
|
|
|
|
memset(&in_params, 0, sizeof(in_params));
|
|
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
index 25e799dba999e..cffa93f114a73 100644
|
|
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
@@ -79,12 +79,16 @@ module_param(srpt_srq_size, int, 0444);
|
|
MODULE_PARM_DESC(srpt_srq_size,
|
|
"Shared receive queue (SRQ) size.");
|
|
|
|
+static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp)
|
|
+{
|
|
+ return kstrtou64(buffer, 16, (u64 *)kp->arg);
|
|
+}
|
|
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
|
|
{
|
|
return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
|
|
}
|
|
-module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
|
|
- 0444);
|
|
+module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x,
|
|
+ &srpt_service_guid, 0444);
|
|
MODULE_PARM_DESC(srpt_service_guid,
|
|
"Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
|
|
|
|
@@ -210,10 +214,12 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
|
|
/**
|
|
* srpt_qp_event - QP event callback function
|
|
* @event: Description of the event that occurred.
|
|
- * @ch: SRPT RDMA channel.
|
|
+ * @ptr: SRPT RDMA channel.
|
|
*/
|
|
-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
|
|
+static void srpt_qp_event(struct ib_event *event, void *ptr)
|
|
{
|
|
+ struct srpt_rdma_ch *ch = ptr;
|
|
+
|
|
pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
|
|
event->event, ch, ch->sess_name, ch->qp->qp_num,
|
|
get_ch_state_name(ch->state));
|
|
@@ -1807,8 +1813,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
|
|
ch->cq_size = ch->rq_size + sq_size;
|
|
|
|
qp_init->qp_context = (void *)ch;
|
|
- qp_init->event_handler
|
|
- = (void(*)(struct ib_event *, void*))srpt_qp_event;
|
|
+ qp_init->event_handler = srpt_qp_event;
|
|
qp_init->send_cq = ch->cq;
|
|
qp_init->recv_cq = ch->cq;
|
|
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
|
|
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
|
|
index e8011d70d0799..02f3bc4e4895e 100644
|
|
--- a/drivers/input/joystick/xpad.c
|
|
+++ b/drivers/input/joystick/xpad.c
|
|
@@ -294,6 +294,7 @@ static const struct xpad_device {
|
|
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
|
|
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
|
|
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
|
|
+ { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
|
|
{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
|
|
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
|
|
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
|
@@ -489,6 +490,7 @@ static const struct usb_device_id xpad_table[] = {
|
|
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
|
|
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
|
|
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
|
|
+ XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
|
|
XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
|
|
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
|
|
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
|
|
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
|
|
index cd45a65e17f2c..dfc6c581873b7 100644
|
|
--- a/drivers/input/serio/i8042-acpipnpio.h
|
|
+++ b/drivers/input/serio/i8042-acpipnpio.h
|
|
@@ -634,6 +634,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
|
|
},
|
|
.driver_data = (void *)(SERIO_QUIRK_NOAUX)
|
|
},
|
|
+ {
|
|
+ /* Fujitsu Lifebook U728 */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U728"),
|
|
+ },
|
|
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
|
|
+ },
|
|
{
|
|
/* Gigabyte M912 */
|
|
.matches = {
|
|
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
|
|
index 3f0732db7bf5b..6de64b3f900fb 100644
|
|
--- a/drivers/input/touchscreen/goodix.c
|
|
+++ b/drivers/input/touchscreen/goodix.c
|
|
@@ -884,7 +884,8 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
|
|
}
|
|
}
|
|
|
|
- if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
|
|
+ /* Some devices with gpio_int_idx 0 list a third unused GPIO */
|
|
+ if ((ts->gpio_count == 2 || ts->gpio_count == 3) && ts->gpio_int_idx == 0) {
|
|
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
|
|
gpio_mapping = acpi_goodix_int_first_gpios;
|
|
} else if (ts->gpio_count == 2 && ts->gpio_int_idx == 1) {
|
|
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
|
|
index b83b39e93e1a9..4d03fb3a82460 100644
|
|
--- a/drivers/irqchip/irq-gic-v3-its.c
|
|
+++ b/drivers/irqchip/irq-gic-v3-its.c
|
|
@@ -3161,6 +3161,7 @@ static void its_cpu_init_lpis(void)
|
|
val |= GICR_CTLR_ENABLE_LPIS;
|
|
writel_relaxed(val, rbase + GICR_CTLR);
|
|
|
|
+out:
|
|
if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
|
|
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
|
|
|
|
@@ -3196,7 +3197,6 @@ static void its_cpu_init_lpis(void)
|
|
|
|
/* Make sure the GIC has seen the above */
|
|
dsb(sy);
|
|
-out:
|
|
gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
|
|
pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
|
|
smp_processor_id(),
|
|
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
|
|
index 2f4784860df5d..be5e19a86ac3b 100644
|
|
--- a/drivers/irqchip/irq-sifive-plic.c
|
|
+++ b/drivers/irqchip/irq-sifive-plic.c
|
|
@@ -144,7 +144,13 @@ static void plic_irq_eoi(struct irq_data *d)
|
|
{
|
|
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
|
|
|
|
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
|
|
+ if (unlikely(irqd_irq_disabled(d))) {
|
|
+ plic_toggle(handler, d->hwirq, 1);
|
|
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
|
|
+ plic_toggle(handler, d->hwirq, 0);
|
|
+ } else {
|
|
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
|
|
+ }
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
|
|
index 0e6068ee783e7..3e215aa85b99a 100644
|
|
--- a/drivers/md/dm-crypt.c
|
|
+++ b/drivers/md/dm-crypt.c
|
|
@@ -61,6 +61,8 @@ struct convert_context {
|
|
struct skcipher_request *req;
|
|
struct aead_request *req_aead;
|
|
} r;
|
|
+ bool aead_recheck;
|
|
+ bool aead_failed;
|
|
|
|
};
|
|
|
|
@@ -81,6 +83,8 @@ struct dm_crypt_io {
|
|
blk_status_t error;
|
|
sector_t sector;
|
|
|
|
+ struct bvec_iter saved_bi_iter;
|
|
+
|
|
struct rb_node rb_node;
|
|
} CRYPTO_MINALIGN_ATTR;
|
|
|
|
@@ -1365,10 +1369,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
|
|
if (r == -EBADMSG) {
|
|
sector_t s = le64_to_cpu(*sector);
|
|
|
|
- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
|
|
- ctx->bio_in->bi_bdev, s);
|
|
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
|
|
- ctx->bio_in, s, 0);
|
|
+ ctx->aead_failed = true;
|
|
+ if (ctx->aead_recheck) {
|
|
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
|
|
+ ctx->bio_in->bi_bdev, s);
|
|
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
|
|
+ ctx->bio_in, s, 0);
|
|
+ }
|
|
}
|
|
|
|
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
|
|
@@ -1724,6 +1731,8 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
|
|
io->base_bio = bio;
|
|
io->sector = sector;
|
|
io->error = 0;
|
|
+ io->ctx.aead_recheck = false;
|
|
+ io->ctx.aead_failed = false;
|
|
io->ctx.r.req = NULL;
|
|
io->integrity_metadata = NULL;
|
|
io->integrity_metadata_from_pool = false;
|
|
@@ -1735,6 +1744,8 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
|
|
atomic_inc(&io->io_pending);
|
|
}
|
|
|
|
+static void kcryptd_queue_read(struct dm_crypt_io *io);
|
|
+
|
|
/*
|
|
* One of the bios was finished. Check for completion of
|
|
* the whole request and correctly clean up the buffer.
|
|
@@ -1748,6 +1759,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
|
|
if (!atomic_dec_and_test(&io->io_pending))
|
|
return;
|
|
|
|
+ if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
|
|
+ cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
|
|
+ io->ctx.aead_recheck = true;
|
|
+ io->ctx.aead_failed = false;
|
|
+ io->error = 0;
|
|
+ kcryptd_queue_read(io);
|
|
+ return;
|
|
+ }
|
|
+
|
|
if (io->ctx.r.req)
|
|
crypt_free_req(cc, io->ctx.r.req, base_bio);
|
|
|
|
@@ -1783,15 +1803,19 @@ static void crypt_endio(struct bio *clone)
|
|
struct dm_crypt_io *io = clone->bi_private;
|
|
struct crypt_config *cc = io->cc;
|
|
unsigned int rw = bio_data_dir(clone);
|
|
- blk_status_t error;
|
|
+ blk_status_t error = clone->bi_status;
|
|
+
|
|
+ if (io->ctx.aead_recheck && !error) {
|
|
+ kcryptd_queue_crypt(io);
|
|
+ return;
|
|
+ }
|
|
|
|
/*
|
|
* free the processed pages
|
|
*/
|
|
- if (rw == WRITE)
|
|
+ if (rw == WRITE || io->ctx.aead_recheck)
|
|
crypt_free_buffer_pages(cc, clone);
|
|
|
|
- error = clone->bi_status;
|
|
bio_put(clone);
|
|
|
|
if (rw == READ && !error) {
|
|
@@ -1812,6 +1836,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
|
struct crypt_config *cc = io->cc;
|
|
struct bio *clone;
|
|
|
|
+ if (io->ctx.aead_recheck) {
|
|
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
|
|
+ return 1;
|
|
+ crypt_inc_pending(io);
|
|
+ clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
|
|
+ if (unlikely(!clone)) {
|
|
+ crypt_dec_pending(io);
|
|
+ return 1;
|
|
+ }
|
|
+ clone->bi_iter.bi_sector = cc->start + io->sector;
|
|
+ crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
|
|
+ io->saved_bi_iter = clone->bi_iter;
|
|
+ dm_submit_bio_remap(io->base_bio, clone);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
/*
|
|
* We need the original biovec array in order to decrypt the whole bio
|
|
* data *afterwards* -- thanks to immutable biovecs we don't need to
|
|
@@ -2038,6 +2078,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
|
io->ctx.bio_out = clone;
|
|
io->ctx.iter_out = clone->bi_iter;
|
|
|
|
+ if (crypt_integrity_aead(cc)) {
|
|
+ bio_copy_data(clone, io->base_bio);
|
|
+ io->ctx.bio_in = clone;
|
|
+ io->ctx.iter_in = clone->bi_iter;
|
|
+ }
|
|
+
|
|
sector += bio_sectors(clone);
|
|
|
|
crypt_inc_pending(io);
|
|
@@ -2074,6 +2120,14 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
|
|
|
static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
|
|
{
|
|
+ if (io->ctx.aead_recheck) {
|
|
+ if (!io->error) {
|
|
+ io->ctx.bio_in->bi_iter = io->saved_bi_iter;
|
|
+ bio_copy_data(io->base_bio, io->ctx.bio_in);
|
|
+ }
|
|
+ crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
|
|
+ bio_put(io->ctx.bio_in);
|
|
+ }
|
|
crypt_dec_pending(io);
|
|
}
|
|
|
|
@@ -2103,11 +2157,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
|
|
|
|
crypt_inc_pending(io);
|
|
|
|
- crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
|
|
- io->sector);
|
|
+ if (io->ctx.aead_recheck) {
|
|
+ io->ctx.cc_sector = io->sector + cc->iv_offset;
|
|
+ r = crypt_convert(cc, &io->ctx,
|
|
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
|
|
+ } else {
|
|
+ crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
|
|
+ io->sector);
|
|
|
|
- r = crypt_convert(cc, &io->ctx,
|
|
- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
|
|
+ r = crypt_convert(cc, &io->ctx,
|
|
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
|
|
+ }
|
|
/*
|
|
* Crypto API backlogged the request, because its queue was full
|
|
* and we're in softirq context, so continue from a workqueue
|
|
@@ -2150,10 +2210,13 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
|
|
if (error == -EBADMSG) {
|
|
sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
|
|
|
|
- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
|
|
- ctx->bio_in->bi_bdev, s);
|
|
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
|
|
- ctx->bio_in, s, 0);
|
|
+ ctx->aead_failed = true;
|
|
+ if (ctx->aead_recheck) {
|
|
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
|
|
+ ctx->bio_in->bi_bdev, s);
|
|
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
|
|
+ ctx->bio_in, s, 0);
|
|
+ }
|
|
io->error = BLK_STS_PROTECTION;
|
|
} else if (error < 0)
|
|
io->error = BLK_STS_IOERR;
|
|
@@ -3079,7 +3142,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
|
|
sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
|
|
if (!strcasecmp(sval, "aead")) {
|
|
set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
|
|
- } else if (strcasecmp(sval, "none")) {
|
|
+ } else if (strcasecmp(sval, "none")) {
|
|
ti->error = "Unknown integrity profile";
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
|
|
index 77fcff82c82ac..3da4359f51645 100644
|
|
--- a/drivers/md/dm-integrity.c
|
|
+++ b/drivers/md/dm-integrity.c
|
|
@@ -279,6 +279,8 @@ struct dm_integrity_c {
|
|
|
|
atomic64_t number_of_mismatches;
|
|
|
|
+ mempool_t recheck_pool;
|
|
+
|
|
struct notifier_block reboot_notifier;
|
|
};
|
|
|
|
@@ -1699,6 +1701,77 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
|
|
get_random_bytes(result, ic->tag_size);
|
|
}
|
|
|
|
+static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
|
|
+{
|
|
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
|
|
+ struct dm_integrity_c *ic = dio->ic;
|
|
+ struct bvec_iter iter;
|
|
+ struct bio_vec bv;
|
|
+ sector_t sector, logical_sector, area, offset;
|
|
+ struct page *page;
|
|
+ void *buffer;
|
|
+
|
|
+ get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
|
|
+ dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
|
|
+ &dio->metadata_offset);
|
|
+ sector = get_data_sector(ic, area, offset);
|
|
+ logical_sector = dio->range.logical_sector;
|
|
+
|
|
+ page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
|
|
+ buffer = page_to_virt(page);
|
|
+
|
|
+ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
|
|
+ unsigned pos = 0;
|
|
+
|
|
+ do {
|
|
+ char *mem;
|
|
+ int r;
|
|
+ struct dm_io_request io_req;
|
|
+ struct dm_io_region io_loc;
|
|
+ io_req.bi_opf = REQ_OP_READ;
|
|
+ io_req.mem.type = DM_IO_KMEM;
|
|
+ io_req.mem.ptr.addr = buffer;
|
|
+ io_req.notify.fn = NULL;
|
|
+ io_req.client = ic->io;
|
|
+ io_loc.bdev = ic->dev->bdev;
|
|
+ io_loc.sector = sector;
|
|
+ io_loc.count = ic->sectors_per_block;
|
|
+
|
|
+ r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
+ if (unlikely(r)) {
|
|
+ dio->bi_status = errno_to_blk_status(r);
|
|
+ goto free_ret;
|
|
+ }
|
|
+
|
|
+ integrity_sector_checksum(ic, logical_sector, buffer, checksum);
|
|
+ r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
|
|
+ &dio->metadata_offset, ic->tag_size, TAG_CMP);
|
|
+ if (r) {
|
|
+ if (r > 0) {
|
|
+ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
|
|
+ bio->bi_bdev, logical_sector);
|
|
+ atomic64_inc(&ic->number_of_mismatches);
|
|
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
|
|
+ bio, logical_sector, 0);
|
|
+ r = -EILSEQ;
|
|
+ }
|
|
+ dio->bi_status = errno_to_blk_status(r);
|
|
+ goto free_ret;
|
|
+ }
|
|
+
|
|
+ mem = bvec_kmap_local(&bv);
|
|
+ memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
|
|
+ kunmap_local(mem);
|
|
+
|
|
+ pos += ic->sectors_per_block << SECTOR_SHIFT;
|
|
+ sector += ic->sectors_per_block;
|
|
+ logical_sector += ic->sectors_per_block;
|
|
+ } while (pos < bv.bv_len);
|
|
+ }
|
|
+free_ret:
|
|
+ mempool_free(page, &ic->recheck_pool);
|
|
+}
|
|
+
|
|
static void integrity_metadata(struct work_struct *w)
|
|
{
|
|
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
|
|
@@ -1784,15 +1857,8 @@ static void integrity_metadata(struct work_struct *w)
|
|
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
|
|
if (unlikely(r)) {
|
|
if (r > 0) {
|
|
- sector_t s;
|
|
-
|
|
- s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
|
|
- DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
|
|
- bio->bi_bdev, s);
|
|
- r = -EILSEQ;
|
|
- atomic64_inc(&ic->number_of_mismatches);
|
|
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
|
|
- bio, s, 0);
|
|
+ integrity_recheck(dio, checksums);
|
|
+ goto skip_io;
|
|
}
|
|
if (likely(checksums != checksums_onstack))
|
|
kfree(checksums);
|
|
@@ -4208,6 +4274,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
|
|
goto bad;
|
|
}
|
|
|
|
+ r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
|
|
+ if (r) {
|
|
+ ti->error = "Cannot allocate mempool";
|
|
+ goto bad;
|
|
+ }
|
|
+
|
|
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
|
|
WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
|
|
if (!ic->metadata_wq) {
|
|
@@ -4572,6 +4644,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
|
|
kvfree(ic->bbs);
|
|
if (ic->bufio)
|
|
dm_bufio_client_destroy(ic->bufio);
|
|
+ mempool_exit(&ic->recheck_pool);
|
|
mempool_exit(&ic->journal_io_mempool);
|
|
if (ic->io)
|
|
dm_io_client_destroy(ic->io);
|
|
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
|
|
index 4669923f4cfb4..b48e1b59e6da4 100644
|
|
--- a/drivers/md/dm-verity-target.c
|
|
+++ b/drivers/md/dm-verity-target.c
|
|
@@ -474,6 +474,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
|
|
return 0;
|
|
}
|
|
|
|
+static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
|
|
+ u8 *data, size_t len)
|
|
+{
|
|
+ memcpy(data, io->recheck_buffer, len);
|
|
+ io->recheck_buffer += len;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
|
|
+ struct bvec_iter start, sector_t cur_block)
|
|
+{
|
|
+ struct page *page;
|
|
+ void *buffer;
|
|
+ int r;
|
|
+ struct dm_io_request io_req;
|
|
+ struct dm_io_region io_loc;
|
|
+
|
|
+ page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
|
|
+ buffer = page_to_virt(page);
|
|
+
|
|
+ io_req.bi_opf = REQ_OP_READ;
|
|
+ io_req.mem.type = DM_IO_KMEM;
|
|
+ io_req.mem.ptr.addr = buffer;
|
|
+ io_req.notify.fn = NULL;
|
|
+ io_req.client = v->io;
|
|
+ io_loc.bdev = v->data_dev->bdev;
|
|
+ io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
|
|
+ io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
|
|
+ r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
+ if (unlikely(r))
|
|
+ goto free_ret;
|
|
+
|
|
+ r = verity_hash(v, verity_io_hash_req(v, io), buffer,
|
|
+ 1 << v->data_dev_block_bits,
|
|
+ verity_io_real_digest(v, io), true);
|
|
+ if (unlikely(r))
|
|
+ goto free_ret;
|
|
+
|
|
+ if (memcmp(verity_io_real_digest(v, io),
|
|
+ verity_io_want_digest(v, io), v->digest_size)) {
|
|
+ r = -EIO;
|
|
+ goto free_ret;
|
|
+ }
|
|
+
|
|
+ io->recheck_buffer = buffer;
|
|
+ r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
|
|
+ if (unlikely(r))
|
|
+ goto free_ret;
|
|
+
|
|
+ r = 0;
|
|
+free_ret:
|
|
+ mempool_free(page, &v->recheck_pool);
|
|
+
|
|
+ return r;
|
|
+}
|
|
+
|
|
static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
|
|
u8 *data, size_t len)
|
|
{
|
|
@@ -500,9 +557,7 @@ static int verity_verify_io(struct dm_verity_io *io)
|
|
{
|
|
bool is_zero;
|
|
struct dm_verity *v = io->v;
|
|
-#if defined(CONFIG_DM_VERITY_FEC)
|
|
struct bvec_iter start;
|
|
-#endif
|
|
struct bvec_iter iter_copy;
|
|
struct bvec_iter *iter;
|
|
struct crypto_wait wait;
|
|
@@ -553,10 +608,7 @@ static int verity_verify_io(struct dm_verity_io *io)
|
|
if (unlikely(r < 0))
|
|
return r;
|
|
|
|
-#if defined(CONFIG_DM_VERITY_FEC)
|
|
- if (verity_fec_is_enabled(v))
|
|
- start = *iter;
|
|
-#endif
|
|
+ start = *iter;
|
|
r = verity_for_io_block(v, io, iter, &wait);
|
|
if (unlikely(r < 0))
|
|
return r;
|
|
@@ -578,6 +630,10 @@ static int verity_verify_io(struct dm_verity_io *io)
|
|
* tasklet since it may sleep, so fallback to work-queue.
|
|
*/
|
|
return -EAGAIN;
|
|
+ } else if (verity_recheck(v, io, start, cur_block) == 0) {
|
|
+ if (v->validated_blocks)
|
|
+ set_bit(cur_block, v->validated_blocks);
|
|
+ continue;
|
|
#if defined(CONFIG_DM_VERITY_FEC)
|
|
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
|
|
cur_block, NULL, &start) == 0) {
|
|
@@ -928,6 +984,10 @@ static void verity_dtr(struct dm_target *ti)
|
|
if (v->verify_wq)
|
|
destroy_workqueue(v->verify_wq);
|
|
|
|
+ mempool_exit(&v->recheck_pool);
|
|
+ if (v->io)
|
|
+ dm_io_client_destroy(v->io);
|
|
+
|
|
if (v->bufio)
|
|
dm_bufio_client_destroy(v->bufio);
|
|
|
|
@@ -1364,6 +1424,20 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
}
|
|
v->hash_blocks = hash_position;
|
|
|
|
+ r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
|
|
+ if (unlikely(r)) {
|
|
+ ti->error = "Cannot allocate mempool";
|
|
+ goto bad;
|
|
+ }
|
|
+
|
|
+ v->io = dm_io_client_create();
|
|
+ if (IS_ERR(v->io)) {
|
|
+ r = PTR_ERR(v->io);
|
|
+ v->io = NULL;
|
|
+ ti->error = "Cannot allocate dm io";
|
|
+ goto bad;
|
|
+ }
|
|
+
|
|
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
|
|
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
|
|
dm_bufio_alloc_callback, NULL,
|
|
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
|
|
index f3f6070084196..4620a98c99561 100644
|
|
--- a/drivers/md/dm-verity.h
|
|
+++ b/drivers/md/dm-verity.h
|
|
@@ -11,6 +11,7 @@
|
|
#ifndef DM_VERITY_H
|
|
#define DM_VERITY_H
|
|
|
|
+#include <linux/dm-io.h>
|
|
#include <linux/dm-bufio.h>
|
|
#include <linux/device-mapper.h>
|
|
#include <linux/interrupt.h>
|
|
@@ -68,6 +69,9 @@ struct dm_verity {
|
|
unsigned long *validated_blocks; /* bitset blocks validated */
|
|
|
|
char *signature_key_desc; /* signature keyring reference */
|
|
+
|
|
+ struct dm_io_client *io;
|
|
+ mempool_t recheck_pool;
|
|
};
|
|
|
|
struct dm_verity_io {
|
|
@@ -84,6 +88,8 @@ struct dm_verity_io {
|
|
|
|
struct work_struct work;
|
|
|
|
+ char *recheck_buffer;
|
|
+
|
|
/*
|
|
* Three variably-size fields follow this struct:
|
|
*
|
|
diff --git a/drivers/md/md.c b/drivers/md/md.c
|
|
index c7efe15229514..846bdee4daa0e 100644
|
|
--- a/drivers/md/md.c
|
|
+++ b/drivers/md/md.c
|
|
@@ -564,8 +564,12 @@ static void submit_flushes(struct work_struct *ws)
|
|
rcu_read_lock();
|
|
}
|
|
rcu_read_unlock();
|
|
- if (atomic_dec_and_test(&mddev->flush_pending))
|
|
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
|
|
+ /* The pair is percpu_ref_get() from md_flush_request() */
|
|
+ percpu_ref_put(&mddev->active_io);
|
|
+
|
|
queue_work(md_wq, &mddev->flush_work);
|
|
+ }
|
|
}
|
|
|
|
static void md_submit_flush_data(struct work_struct *ws)
|
|
diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
|
|
index c61be3404c6f2..504b836a7abf8 100644
|
|
--- a/drivers/misc/open-dice.c
|
|
+++ b/drivers/misc/open-dice.c
|
|
@@ -142,7 +142,6 @@ static int __init open_dice_probe(struct platform_device *pdev)
|
|
return -ENOMEM;
|
|
|
|
*drvdata = (struct open_dice_drvdata){
|
|
- .lock = __MUTEX_INITIALIZER(drvdata->lock),
|
|
.rmem = rmem,
|
|
.misc = (struct miscdevice){
|
|
.parent = dev,
|
|
@@ -152,6 +151,7 @@ static int __init open_dice_probe(struct platform_device *pdev)
|
|
.mode = 0600,
|
|
},
|
|
};
|
|
+ mutex_init(&drvdata->lock);
|
|
|
|
/* Index overflow check not needed, misc_register() will fail. */
|
|
snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++);
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
|
|
index 3784347b6fd88..55639c133dd02 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
|
|
@@ -437,6 +437,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
|
|
return;
|
|
}
|
|
|
|
+ /* AF modifies given action iff PF/VF has requested for it */
|
|
+ if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
|
|
+ return;
|
|
+
|
|
/* copy VF default entry action to the VF mcam entry */
|
|
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
|
|
target_func);
|
|
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
|
|
index 3423c95cc84ae..7031f41287e09 100644
|
|
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
|
|
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
|
|
@@ -744,6 +744,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
|
|
platform_set_drvdata(pdev, sparx5);
|
|
sparx5->pdev = pdev;
|
|
sparx5->dev = &pdev->dev;
|
|
+ spin_lock_init(&sparx5->tx_lock);
|
|
|
|
/* Do switch core reset if available */
|
|
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
|
|
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
|
|
index 7a83222caa737..cb3173d2b0e8d 100644
|
|
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
|
|
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
|
|
@@ -278,6 +278,7 @@ struct sparx5 {
|
|
int xtr_irq;
|
|
/* Frame DMA */
|
|
int fdma_irq;
|
|
+ spinlock_t tx_lock; /* lock for frame transmission */
|
|
struct sparx5_rx rx;
|
|
struct sparx5_tx tx;
|
|
/* PTP */
|
|
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
|
|
index 6db6ac6a3bbc2..ac7e1cffbcecf 100644
|
|
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
|
|
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
|
|
@@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
|
|
}
|
|
|
|
skb_tx_timestamp(skb);
|
|
+ spin_lock(&sparx5->tx_lock);
|
|
if (sparx5->fdma_irq > 0)
|
|
ret = sparx5_fdma_xmit(sparx5, ifh, skb);
|
|
else
|
|
ret = sparx5_inject(sparx5, ifh, skb, dev);
|
|
+ spin_unlock(&sparx5->tx_lock);
|
|
|
|
if (ret == -EBUSY)
|
|
goto busy;
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
index 66178ce6d000e..91b2aa81914ba 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
@@ -5823,11 +5823,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
|
|
struct net_device *dev = (struct net_device *)dev_id;
|
|
struct stmmac_priv *priv = netdev_priv(dev);
|
|
|
|
- if (unlikely(!dev)) {
|
|
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
|
|
- return IRQ_NONE;
|
|
- }
|
|
-
|
|
/* Check if adapter is up */
|
|
if (test_bit(STMMAC_DOWN, &priv->state))
|
|
return IRQ_HANDLED;
|
|
@@ -5843,11 +5838,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
|
|
struct net_device *dev = (struct net_device *)dev_id;
|
|
struct stmmac_priv *priv = netdev_priv(dev);
|
|
|
|
- if (unlikely(!dev)) {
|
|
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
|
|
- return IRQ_NONE;
|
|
- }
|
|
-
|
|
/* Check if adapter is up */
|
|
if (test_bit(STMMAC_DOWN, &priv->state))
|
|
return IRQ_HANDLED;
|
|
@@ -5869,11 +5859,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
|
|
dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
|
|
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
|
|
|
|
- if (unlikely(!data)) {
|
|
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
|
|
- return IRQ_NONE;
|
|
- }
|
|
-
|
|
/* Check if adapter is up */
|
|
if (test_bit(STMMAC_DOWN, &priv->state))
|
|
return IRQ_HANDLED;
|
|
@@ -5900,11 +5885,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
|
|
dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
|
|
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
|
|
|
|
- if (unlikely(!data)) {
|
|
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
|
|
- return IRQ_NONE;
|
|
- }
|
|
-
|
|
/* Check if adapter is up */
|
|
if (test_bit(STMMAC_DOWN, &priv->state))
|
|
return IRQ_HANDLED;
|
|
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
|
|
index bace989591f75..937dd9cf4fbaf 100644
|
|
--- a/drivers/net/gtp.c
|
|
+++ b/drivers/net/gtp.c
|
|
@@ -1906,20 +1906,20 @@ static int __init gtp_init(void)
|
|
if (err < 0)
|
|
goto error_out;
|
|
|
|
- err = genl_register_family(>p_genl_family);
|
|
+ err = register_pernet_subsys(>p_net_ops);
|
|
if (err < 0)
|
|
goto unreg_rtnl_link;
|
|
|
|
- err = register_pernet_subsys(>p_net_ops);
|
|
+ err = genl_register_family(>p_genl_family);
|
|
if (err < 0)
|
|
- goto unreg_genl_family;
|
|
+ goto unreg_pernet_subsys;
|
|
|
|
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
|
|
sizeof(struct pdp_ctx));
|
|
return 0;
|
|
|
|
-unreg_genl_family:
|
|
- genl_unregister_family(>p_genl_family);
|
|
+unreg_pernet_subsys:
|
|
+ unregister_pernet_subsys(>p_net_ops);
|
|
unreg_rtnl_link:
|
|
rtnl_link_unregister(>p_link_ops);
|
|
error_out:
|
|
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
|
|
index 3d99fd6664d7a..70e52d27064ec 100644
|
|
--- a/drivers/net/phy/realtek.c
|
|
+++ b/drivers/net/phy/realtek.c
|
|
@@ -414,9 +414,11 @@ static int rtl8211f_config_init(struct phy_device *phydev)
|
|
ERR_PTR(ret));
|
|
return ret;
|
|
}
|
|
+
|
|
+ return genphy_soft_reset(phydev);
|
|
}
|
|
|
|
- return genphy_soft_reset(phydev);
|
|
+ return 0;
|
|
}
|
|
|
|
static int rtl821x_resume(struct phy_device *phydev)
|
|
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
|
|
index 177a365b8ec55..3dbf926fd99fd 100644
|
|
--- a/drivers/nvme/host/fc.c
|
|
+++ b/drivers/nvme/host/fc.c
|
|
@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
|
|
static DEFINE_IDA(nvme_fc_local_port_cnt);
|
|
static DEFINE_IDA(nvme_fc_ctrl_cnt);
|
|
|
|
-static struct workqueue_struct *nvme_fc_wq;
|
|
-
|
|
-static bool nvme_fc_waiting_to_unload;
|
|
-static DECLARE_COMPLETION(nvme_fc_unload_proceed);
|
|
-
|
|
/*
|
|
* These items are short-term. They will eventually be moved into
|
|
* a generic FC class. See comments in module init.
|
|
@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
|
|
/* remove from transport list */
|
|
spin_lock_irqsave(&nvme_fc_lock, flags);
|
|
list_del(&lport->port_list);
|
|
- if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
|
|
- complete(&nvme_fc_unload_proceed);
|
|
spin_unlock_irqrestore(&nvme_fc_lock, flags);
|
|
|
|
ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
|
|
@@ -3869,10 +3862,6 @@ static int __init nvme_fc_init_module(void)
|
|
{
|
|
int ret;
|
|
|
|
- nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
|
|
- if (!nvme_fc_wq)
|
|
- return -ENOMEM;
|
|
-
|
|
/*
|
|
* NOTE:
|
|
* It is expected that in the future the kernel will combine
|
|
@@ -3890,7 +3879,7 @@ static int __init nvme_fc_init_module(void)
|
|
ret = class_register(&fc_class);
|
|
if (ret) {
|
|
pr_err("couldn't register class fc\n");
|
|
- goto out_destroy_wq;
|
|
+ return ret;
|
|
}
|
|
|
|
/*
|
|
@@ -3914,8 +3903,6 @@ static int __init nvme_fc_init_module(void)
|
|
device_destroy(&fc_class, MKDEV(0, 0));
|
|
out_destroy_class:
|
|
class_unregister(&fc_class);
|
|
-out_destroy_wq:
|
|
- destroy_workqueue(nvme_fc_wq);
|
|
|
|
return ret;
|
|
}
|
|
@@ -3935,45 +3922,23 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
|
|
spin_unlock(&rport->lock);
|
|
}
|
|
|
|
-static void
|
|
-nvme_fc_cleanup_for_unload(void)
|
|
+static void __exit nvme_fc_exit_module(void)
|
|
{
|
|
struct nvme_fc_lport *lport;
|
|
struct nvme_fc_rport *rport;
|
|
-
|
|
- list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
|
|
- list_for_each_entry(rport, &lport->endp_list, endp_list) {
|
|
- nvme_fc_delete_controllers(rport);
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-static void __exit nvme_fc_exit_module(void)
|
|
-{
|
|
unsigned long flags;
|
|
- bool need_cleanup = false;
|
|
|
|
spin_lock_irqsave(&nvme_fc_lock, flags);
|
|
- nvme_fc_waiting_to_unload = true;
|
|
- if (!list_empty(&nvme_fc_lport_list)) {
|
|
- need_cleanup = true;
|
|
- nvme_fc_cleanup_for_unload();
|
|
- }
|
|
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
|
|
+ list_for_each_entry(rport, &lport->endp_list, endp_list)
|
|
+ nvme_fc_delete_controllers(rport);
|
|
spin_unlock_irqrestore(&nvme_fc_lock, flags);
|
|
- if (need_cleanup) {
|
|
- pr_info("%s: waiting for ctlr deletes\n", __func__);
|
|
- wait_for_completion(&nvme_fc_unload_proceed);
|
|
- pr_info("%s: ctrl deletes complete\n", __func__);
|
|
- }
|
|
+ flush_workqueue(nvme_delete_wq);
|
|
|
|
nvmf_unregister_transport(&nvme_fc_transport);
|
|
|
|
- ida_destroy(&nvme_fc_local_port_cnt);
|
|
- ida_destroy(&nvme_fc_ctrl_cnt);
|
|
-
|
|
device_destroy(&fc_class, MKDEV(0, 0));
|
|
class_unregister(&fc_class);
|
|
- destroy_workqueue(nvme_fc_wq);
|
|
}
|
|
|
|
module_init(nvme_fc_init_module);
|
|
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
|
|
index 1ab6601fdd5cf..8a02ed63b1566 100644
|
|
--- a/drivers/nvme/target/fc.c
|
|
+++ b/drivers/nvme/target/fc.c
|
|
@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
|
|
struct nvmet_fc_port_entry *pe;
|
|
struct kref ref;
|
|
u32 max_sg_cnt;
|
|
+
|
|
+ struct work_struct put_work;
|
|
};
|
|
|
|
struct nvmet_fc_port_entry {
|
|
@@ -165,7 +167,7 @@ struct nvmet_fc_tgt_assoc {
|
|
struct nvmet_fc_hostport *hostport;
|
|
struct nvmet_fc_ls_iod *rcv_disconn;
|
|
struct list_head a_list;
|
|
- struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
|
|
+ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
|
|
struct kref ref;
|
|
struct work_struct del_work;
|
|
struct rcu_head rcu;
|
|
@@ -248,6 +250,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
|
|
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
|
|
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
|
|
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
|
|
+static void nvmet_fc_put_tgtport_work(struct work_struct *work)
|
|
+{
|
|
+ struct nvmet_fc_tgtport *tgtport =
|
|
+ container_of(work, struct nvmet_fc_tgtport, put_work);
|
|
+
|
|
+ nvmet_fc_tgtport_put(tgtport);
|
|
+}
|
|
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
|
|
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
|
struct nvmet_fc_fcp_iod *fod);
|
|
@@ -359,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
|
|
|
|
if (!lsop->req_queued) {
|
|
spin_unlock_irqrestore(&tgtport->lock, flags);
|
|
- return;
|
|
+ goto out_putwork;
|
|
}
|
|
|
|
list_del(&lsop->lsreq_list);
|
|
@@ -372,7 +381,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
|
|
(lsreq->rqstlen + lsreq->rsplen),
|
|
DMA_BIDIRECTIONAL);
|
|
|
|
- nvmet_fc_tgtport_put(tgtport);
|
|
+out_putwork:
|
|
+ queue_work(nvmet_wq, &tgtport->put_work);
|
|
}
|
|
|
|
static int
|
|
@@ -801,14 +811,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
|
|
if (!queue)
|
|
return NULL;
|
|
|
|
- if (!nvmet_fc_tgt_a_get(assoc))
|
|
- goto out_free_queue;
|
|
-
|
|
queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
|
|
assoc->tgtport->fc_target_port.port_num,
|
|
assoc->a_id, qid);
|
|
if (!queue->work_q)
|
|
- goto out_a_put;
|
|
+ goto out_free_queue;
|
|
|
|
queue->qid = qid;
|
|
queue->sqsize = sqsize;
|
|
@@ -830,15 +837,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
|
|
goto out_fail_iodlist;
|
|
|
|
WARN_ON(assoc->queues[qid]);
|
|
- rcu_assign_pointer(assoc->queues[qid], queue);
|
|
+ assoc->queues[qid] = queue;
|
|
|
|
return queue;
|
|
|
|
out_fail_iodlist:
|
|
nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
|
|
destroy_workqueue(queue->work_q);
|
|
-out_a_put:
|
|
- nvmet_fc_tgt_a_put(assoc);
|
|
out_free_queue:
|
|
kfree(queue);
|
|
return NULL;
|
|
@@ -851,12 +856,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
|
|
struct nvmet_fc_tgt_queue *queue =
|
|
container_of(ref, struct nvmet_fc_tgt_queue, ref);
|
|
|
|
- rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
|
|
-
|
|
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
|
|
|
|
- nvmet_fc_tgt_a_put(queue->assoc);
|
|
-
|
|
destroy_workqueue(queue->work_q);
|
|
|
|
kfree_rcu(queue, rcu);
|
|
@@ -968,7 +969,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
|
if (association_id == assoc->association_id) {
|
|
- queue = rcu_dereference(assoc->queues[qid]);
|
|
+ queue = assoc->queues[qid];
|
|
if (queue &&
|
|
(!atomic_read(&queue->connected) ||
|
|
!nvmet_fc_tgt_q_get(queue)))
|
|
@@ -1077,8 +1078,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
|
|
/* new allocation not needed */
|
|
kfree(newhost);
|
|
newhost = match;
|
|
- /* no new allocation - release reference */
|
|
- nvmet_fc_tgtport_put(tgtport);
|
|
} else {
|
|
newhost->tgtport = tgtport;
|
|
newhost->hosthandle = hosthandle;
|
|
@@ -1093,13 +1092,28 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
|
|
}
|
|
|
|
static void
|
|
-nvmet_fc_delete_assoc(struct work_struct *work)
|
|
+nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
|
+{
|
|
+ nvmet_fc_delete_target_assoc(assoc);
|
|
+ nvmet_fc_tgt_a_put(assoc);
|
|
+}
|
|
+
|
|
+static void
|
|
+nvmet_fc_delete_assoc_work(struct work_struct *work)
|
|
{
|
|
struct nvmet_fc_tgt_assoc *assoc =
|
|
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
|
|
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
|
|
|
|
- nvmet_fc_delete_target_assoc(assoc);
|
|
- nvmet_fc_tgt_a_put(assoc);
|
|
+ nvmet_fc_delete_assoc(assoc);
|
|
+ nvmet_fc_tgtport_put(tgtport);
|
|
+}
|
|
+
|
|
+static void
|
|
+nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
|
+{
|
|
+ nvmet_fc_tgtport_get(assoc->tgtport);
|
|
+ queue_work(nvmet_wq, &assoc->del_work);
|
|
}
|
|
|
|
static struct nvmet_fc_tgt_assoc *
|
|
@@ -1111,6 +1125,9 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
|
|
int idx;
|
|
bool needrandom = true;
|
|
|
|
+ if (!tgtport->pe)
|
|
+ return NULL;
|
|
+
|
|
assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
|
|
if (!assoc)
|
|
return NULL;
|
|
@@ -1130,7 +1147,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
|
|
assoc->a_id = idx;
|
|
INIT_LIST_HEAD(&assoc->a_list);
|
|
kref_init(&assoc->ref);
|
|
- INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
|
|
+ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
|
|
atomic_set(&assoc->terminating, 0);
|
|
|
|
while (needrandom) {
|
|
@@ -1171,13 +1188,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
|
|
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
|
|
struct nvmet_fc_ls_iod *oldls;
|
|
unsigned long flags;
|
|
+ int i;
|
|
+
|
|
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
|
|
+ if (assoc->queues[i])
|
|
+ nvmet_fc_delete_target_queue(assoc->queues[i]);
|
|
+ }
|
|
|
|
/* Send Disconnect now that all i/o has completed */
|
|
nvmet_fc_xmt_disconnect_assoc(assoc);
|
|
|
|
nvmet_fc_free_hostport(assoc->hostport);
|
|
spin_lock_irqsave(&tgtport->lock, flags);
|
|
- list_del_rcu(&assoc->a_list);
|
|
oldls = assoc->rcv_disconn;
|
|
spin_unlock_irqrestore(&tgtport->lock, flags);
|
|
/* if pending Rcv Disconnect Association LS, send rsp now */
|
|
@@ -1207,7 +1229,7 @@ static void
|
|
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
|
{
|
|
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
|
|
- struct nvmet_fc_tgt_queue *queue;
|
|
+ unsigned long flags;
|
|
int i, terminating;
|
|
|
|
terminating = atomic_xchg(&assoc->terminating, 1);
|
|
@@ -1216,29 +1238,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
|
if (terminating)
|
|
return;
|
|
|
|
+ spin_lock_irqsave(&tgtport->lock, flags);
|
|
+ list_del_rcu(&assoc->a_list);
|
|
+ spin_unlock_irqrestore(&tgtport->lock, flags);
|
|
|
|
- for (i = NVMET_NR_QUEUES; i >= 0; i--) {
|
|
- rcu_read_lock();
|
|
- queue = rcu_dereference(assoc->queues[i]);
|
|
- if (!queue) {
|
|
- rcu_read_unlock();
|
|
- continue;
|
|
- }
|
|
+ synchronize_rcu();
|
|
|
|
- if (!nvmet_fc_tgt_q_get(queue)) {
|
|
- rcu_read_unlock();
|
|
- continue;
|
|
- }
|
|
- rcu_read_unlock();
|
|
- nvmet_fc_delete_target_queue(queue);
|
|
- nvmet_fc_tgt_q_put(queue);
|
|
+ /* ensure all in-flight I/Os have been processed */
|
|
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
|
|
+ if (assoc->queues[i])
|
|
+ flush_workqueue(assoc->queues[i]->work_q);
|
|
}
|
|
|
|
dev_info(tgtport->dev,
|
|
"{%d:%d} Association deleted\n",
|
|
tgtport->fc_target_port.port_num, assoc->a_id);
|
|
-
|
|
- nvmet_fc_tgt_a_put(assoc);
|
|
}
|
|
|
|
static struct nvmet_fc_tgt_assoc *
|
|
@@ -1414,6 +1428,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
|
|
kref_init(&newrec->ref);
|
|
ida_init(&newrec->assoc_cnt);
|
|
newrec->max_sg_cnt = template->max_sgl_segments;
|
|
+ INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
|
|
|
|
ret = nvmet_fc_alloc_ls_iodlist(newrec);
|
|
if (ret) {
|
|
@@ -1491,9 +1506,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
|
|
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
|
if (!nvmet_fc_tgt_a_get(assoc))
|
|
continue;
|
|
- if (!queue_work(nvmet_wq, &assoc->del_work))
|
|
- /* already deleting - release local reference */
|
|
- nvmet_fc_tgt_a_put(assoc);
|
|
+ nvmet_fc_schedule_delete_assoc(assoc);
|
|
+ nvmet_fc_tgt_a_put(assoc);
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
@@ -1546,9 +1560,8 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
|
|
continue;
|
|
assoc->hostport->invalid = 1;
|
|
noassoc = false;
|
|
- if (!queue_work(nvmet_wq, &assoc->del_work))
|
|
- /* already deleting - release local reference */
|
|
- nvmet_fc_tgt_a_put(assoc);
|
|
+ nvmet_fc_schedule_delete_assoc(assoc);
|
|
+ nvmet_fc_tgt_a_put(assoc);
|
|
}
|
|
spin_unlock_irqrestore(&tgtport->lock, flags);
|
|
|
|
@@ -1580,7 +1593,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
|
- queue = rcu_dereference(assoc->queues[0]);
|
|
+ queue = assoc->queues[0];
|
|
if (queue && queue->nvme_sq.ctrl == ctrl) {
|
|
if (nvmet_fc_tgt_a_get(assoc))
|
|
found_ctrl = true;
|
|
@@ -1592,9 +1605,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
|
|
nvmet_fc_tgtport_put(tgtport);
|
|
|
|
if (found_ctrl) {
|
|
- if (!queue_work(nvmet_wq, &assoc->del_work))
|
|
- /* already deleting - release local reference */
|
|
- nvmet_fc_tgt_a_put(assoc);
|
|
+ nvmet_fc_schedule_delete_assoc(assoc);
|
|
+ nvmet_fc_tgt_a_put(assoc);
|
|
return;
|
|
}
|
|
|
|
@@ -1624,6 +1636,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
|
|
/* terminate any outstanding associations */
|
|
__nvmet_fc_free_assocs(tgtport);
|
|
|
|
+ flush_workqueue(nvmet_wq);
|
|
+
|
|
/*
|
|
* should terminate LS's as well. However, LS's will be generated
|
|
* at the tail end of association termination, so they likely don't
|
|
@@ -1869,9 +1883,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
|
|
sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
|
|
FCNVME_LS_DISCONNECT_ASSOC);
|
|
|
|
- /* release get taken in nvmet_fc_find_target_assoc */
|
|
- nvmet_fc_tgt_a_put(assoc);
|
|
-
|
|
/*
|
|
* The rules for LS response says the response cannot
|
|
* go back until ABTS's have been sent for all outstanding
|
|
@@ -1886,8 +1897,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
|
|
assoc->rcv_disconn = iod;
|
|
spin_unlock_irqrestore(&tgtport->lock, flags);
|
|
|
|
- nvmet_fc_delete_target_assoc(assoc);
|
|
-
|
|
if (oldls) {
|
|
dev_info(tgtport->dev,
|
|
"{%d:%d} Multiple Disconnect Association LS's "
|
|
@@ -1903,6 +1912,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
|
|
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
|
|
}
|
|
|
|
+ nvmet_fc_schedule_delete_assoc(assoc);
|
|
+ nvmet_fc_tgt_a_put(assoc);
|
|
+
|
|
return false;
|
|
}
|
|
|
|
@@ -2539,8 +2551,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
|
|
|
fod->req.cmd = &fod->cmdiubuf.sqe;
|
|
fod->req.cqe = &fod->rspiubuf.cqe;
|
|
- if (tgtport->pe)
|
|
- fod->req.port = tgtport->pe->port;
|
|
+ if (!tgtport->pe)
|
|
+ goto transport_error;
|
|
+ fod->req.port = tgtport->pe->port;
|
|
|
|
/* clear any response payload */
|
|
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
|
|
@@ -2901,6 +2914,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)
|
|
|
|
nvmet_fc_portentry_unbind(pe);
|
|
|
|
+ /* terminate any outstanding associations */
|
|
+ __nvmet_fc_free_assocs(pe->tgtport);
|
|
+
|
|
kfree(pe);
|
|
}
|
|
|
|
@@ -2932,6 +2948,9 @@ static int __init nvmet_fc_init_module(void)
|
|
|
|
static void __exit nvmet_fc_exit_module(void)
|
|
{
|
|
+ /* ensure any shutdown operation, e.g. delete ctrls have finished */
|
|
+ flush_workqueue(nvmet_wq);
|
|
+
|
|
/* sanity check - all lports should be removed */
|
|
if (!list_empty(&nvmet_fc_target_list))
|
|
pr_warn("%s: targetport list not empty\n", __func__);
|
|
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
|
|
index c780af36c1d4a..f5b8442b653db 100644
|
|
--- a/drivers/nvme/target/fcloop.c
|
|
+++ b/drivers/nvme/target/fcloop.c
|
|
@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
|
|
if (!rport->targetport) {
|
|
tls_req->status = -ECONNREFUSED;
|
|
spin_lock(&rport->lock);
|
|
- list_add_tail(&rport->ls_list, &tls_req->ls_list);
|
|
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
|
|
spin_unlock(&rport->lock);
|
|
queue_work(nvmet_wq, &rport->ls_work);
|
|
return ret;
|
|
@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
|
|
if (remoteport) {
|
|
rport = remoteport->private;
|
|
spin_lock(&rport->lock);
|
|
- list_add_tail(&rport->ls_list, &tls_req->ls_list);
|
|
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
|
|
spin_unlock(&rport->lock);
|
|
queue_work(nvmet_wq, &rport->ls_work);
|
|
}
|
|
@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
|
|
if (!tport->remoteport) {
|
|
tls_req->status = -ECONNREFUSED;
|
|
spin_lock(&tport->lock);
|
|
- list_add_tail(&tport->ls_list, &tls_req->ls_list);
|
|
+ list_add_tail(&tls_req->ls_list, &tport->ls_list);
|
|
spin_unlock(&tport->lock);
|
|
queue_work(nvmet_wq, &tport->ls_work);
|
|
return ret;
|
|
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
|
|
index ce42afe8f64ef..3480768274699 100644
|
|
--- a/drivers/nvme/target/tcp.c
|
|
+++ b/drivers/nvme/target/tcp.c
|
|
@@ -1884,6 +1884,7 @@ static void __exit nvmet_tcp_exit(void)
|
|
flush_workqueue(nvmet_wq);
|
|
|
|
destroy_workqueue(nvmet_tcp_wq);
|
|
+ ida_destroy(&nvmet_tcp_queue_ida);
|
|
}
|
|
|
|
module_init(nvmet_tcp_init);
|
|
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
|
|
index 59c164b5c64aa..4086a7818981a 100644
|
|
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
|
|
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
|
|
@@ -6,6 +6,7 @@
|
|
* Author: Kishon Vijay Abraham I <kishon@ti.com>
|
|
*/
|
|
|
|
+#include <linux/align.h>
|
|
#include <linux/of.h>
|
|
#include <linux/platform_device.h>
|
|
|
|
@@ -600,7 +601,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
|
|
}
|
|
|
|
aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
|
|
- msg_addr &= ~aligned_offset;
|
|
+ msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
|
|
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
|
|
epc->mem->window.page_size);
|
|
if (ret)
|
|
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
|
|
index e9cf318e6670f..34877a1f43a15 100644
|
|
--- a/drivers/pci/msi/irqdomain.c
|
|
+++ b/drivers/pci/msi/irqdomain.c
|
|
@@ -60,7 +60,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
|
|
|
|
return (irq_hw_number_t)desc->msi_index |
|
|
pci_dev_id(dev) << 11 |
|
|
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
|
|
+ ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
|
|
}
|
|
|
|
static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
|
|
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
|
|
index 8e2b07ed2ce94..c10c99a31a90a 100644
|
|
--- a/drivers/platform/x86/intel/vbtn.c
|
|
+++ b/drivers/platform/x86/intel/vbtn.c
|
|
@@ -200,9 +200,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
|
|
autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
|
|
|
|
sparse_keymap_report_event(input_dev, event, val, autorelease);
|
|
-
|
|
- /* Some devices need this to report further events */
|
|
- acpi_evaluate_object(handle, "VBDL", NULL, NULL);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
|
|
index 6edd2e294750e..c2fb19af10705 100644
|
|
--- a/drivers/platform/x86/thinkpad_acpi.c
|
|
+++ b/drivers/platform/x86/thinkpad_acpi.c
|
|
@@ -10511,6 +10511,7 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
|
|
return 0;
|
|
default:
|
|
/* Unknown function */
|
|
+ pr_debug("unknown function 0x%x\n", funcmode);
|
|
return -EOPNOTSUPP;
|
|
}
|
|
return 0;
|
|
@@ -10696,8 +10697,8 @@ static void dytc_profile_refresh(void)
|
|
return;
|
|
|
|
perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
|
|
- convert_dytc_to_profile(funcmode, perfmode, &profile);
|
|
- if (profile != dytc_current_profile) {
|
|
+ err = convert_dytc_to_profile(funcmode, perfmode, &profile);
|
|
+ if (!err && profile != dytc_current_profile) {
|
|
dytc_current_profile = profile;
|
|
platform_profile_notify();
|
|
}
|
|
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
|
|
index 9a92d515abb9b..11d72a3533552 100644
|
|
--- a/drivers/platform/x86/touchscreen_dmi.c
|
|
+++ b/drivers/platform/x86/touchscreen_dmi.c
|
|
@@ -50,7 +50,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
|
|
};
|
|
|
|
static const struct ts_dmi_data chuwi_hi8_air_data = {
|
|
- .acpi_name = "MSSL1680:00",
|
|
+ .acpi_name = "MSSL1680",
|
|
.properties = chuwi_hi8_air_props,
|
|
};
|
|
|
|
@@ -913,6 +913,32 @@ static const struct ts_dmi_data teclast_tbook11_data = {
|
|
.properties = teclast_tbook11_props,
|
|
};
|
|
|
|
+static const struct property_entry teclast_x16_plus_props[] = {
|
|
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
|
|
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
|
|
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
|
|
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
|
|
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
|
|
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
|
|
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
|
|
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
|
|
+ { }
|
|
+};
|
|
+
|
|
+static const struct ts_dmi_data teclast_x16_plus_data = {
|
|
+ .embedded_fw = {
|
|
+ .name = "silead/gsl3692-teclast-x16-plus.fw",
|
|
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
|
|
+ .length = 43560,
|
|
+ .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
|
|
+ 0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
|
|
+ 0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
|
|
+ 0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
|
|
+ },
|
|
+ .acpi_name = "MSSL1680:00",
|
|
+ .properties = teclast_x16_plus_props,
|
|
+};
|
|
+
|
|
static const struct property_entry teclast_x3_plus_props[] = {
|
|
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
|
|
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
|
|
@@ -1567,6 +1593,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
|
|
DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
|
|
},
|
|
},
|
|
+ {
|
|
+ /* Teclast X16 Plus */
|
|
+ .driver_data = (void *)&teclast_x16_plus_data,
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
|
|
+ DMI_MATCH(DMI_PRODUCT_SKU, "D3A5_A1"),
|
|
+ },
|
|
+ },
|
|
{
|
|
/* Teclast X3 Plus */
|
|
.driver_data = (void *)&teclast_x3_plus_data,
|
|
@@ -1741,7 +1776,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
|
|
int error;
|
|
|
|
if (has_acpi_companion(dev) &&
|
|
- !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
|
|
+ strstarts(client->name, ts_data->acpi_name)) {
|
|
error = device_create_managed_software_node(dev, ts_data->properties, NULL);
|
|
if (error)
|
|
dev_err(dev, "failed to add properties: %d\n", error);
|
|
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
|
|
index b9eeaff1c6615..925e486f73a6d 100644
|
|
--- a/drivers/regulator/pwm-regulator.c
|
|
+++ b/drivers/regulator/pwm-regulator.c
|
|
@@ -158,6 +158,9 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
|
|
pwm_get_state(drvdata->pwm, &pstate);
|
|
|
|
voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
|
|
+ if (voltage < min(max_uV_duty, min_uV_duty) ||
|
|
+ voltage > max(max_uV_duty, min_uV_duty))
|
|
+ return -ENOTRECOVERABLE;
|
|
|
|
/*
|
|
* The dutycycle for min_uV might be greater than the one for max_uV.
|
|
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
|
|
index c533d1dadc6bb..a5dba3829769c 100644
|
|
--- a/drivers/s390/cio/device_ops.c
|
|
+++ b/drivers/s390/cio/device_ops.c
|
|
@@ -202,7 +202,8 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
|
|
return -EINVAL;
|
|
if (cdev->private->state == DEV_STATE_NOT_OPER)
|
|
return -ENODEV;
|
|
- if (cdev->private->state == DEV_STATE_VERIFY) {
|
|
+ if (cdev->private->state == DEV_STATE_VERIFY ||
|
|
+ cdev->private->flags.doverify) {
|
|
/* Remember to fake irb when finished. */
|
|
if (!cdev->private->flags.fake_irb) {
|
|
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
|
|
@@ -214,8 +215,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
|
|
}
|
|
if (cdev->private->state != DEV_STATE_ONLINE ||
|
|
((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
|
|
- !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
|
|
- cdev->private->flags.doverify)
|
|
+ !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
|
|
return -EBUSY;
|
|
ret = cio_set_options (sch, flags);
|
|
if (ret)
|
|
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
|
|
index 03e71e3d5e5b3..3b990cf2c1954 100644
|
|
--- a/drivers/scsi/Kconfig
|
|
+++ b/drivers/scsi/Kconfig
|
|
@@ -1285,7 +1285,7 @@ source "drivers/scsi/arm/Kconfig"
|
|
|
|
config JAZZ_ESP
|
|
bool "MIPS JAZZ FAS216 SCSI support"
|
|
- depends on MACH_JAZZ && SCSI
|
|
+ depends on MACH_JAZZ && SCSI=y
|
|
select SCSI_SPI_ATTRS
|
|
help
|
|
This is the driver for the onboard SCSI host adapter of MIPS Magnum
|
|
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
|
|
index 7aac9fc719675..0bb7e164b525f 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_scsi.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
|
|
@@ -1919,7 +1919,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|
*
|
|
* Returns the number of SGEs added to the SGL.
|
|
**/
|
|
-static int
|
|
+static uint32_t
|
|
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|
struct sli4_sge *sgl, int datasegcnt,
|
|
struct lpfc_io_buf *lpfc_cmd)
|
|
@@ -1927,8 +1927,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|
struct scatterlist *sgde = NULL; /* s/g data entry */
|
|
struct sli4_sge_diseed *diseed = NULL;
|
|
dma_addr_t physaddr;
|
|
- int i = 0, num_sge = 0, status;
|
|
- uint32_t reftag;
|
|
+ int i = 0, status;
|
|
+ uint32_t reftag, num_sge = 0;
|
|
uint8_t txop, rxop;
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
|
uint32_t rc;
|
|
@@ -2100,7 +2100,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|
*
|
|
* Returns the number of SGEs added to the SGL.
|
|
**/
|
|
-static int
|
|
+static uint32_t
|
|
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|
struct sli4_sge *sgl, int datacnt, int protcnt,
|
|
struct lpfc_io_buf *lpfc_cmd)
|
|
@@ -2124,8 +2124,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|
uint32_t rc;
|
|
#endif
|
|
uint32_t checking = 1;
|
|
- uint32_t dma_offset = 0;
|
|
- int num_sge = 0, j = 2;
|
|
+ uint32_t dma_offset = 0, num_sge = 0;
|
|
+ int j = 2;
|
|
struct sli4_hybrid_sgl *sgl_xtra = NULL;
|
|
|
|
sgpe = scsi_prot_sglist(sc);
|
|
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
|
|
index 3cda5d26b66ca..e70ab8db30142 100644
|
|
--- a/drivers/scsi/scsi.c
|
|
+++ b/drivers/scsi/scsi.c
|
|
@@ -328,21 +328,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
|
|
return result + 4;
|
|
}
|
|
|
|
+enum scsi_vpd_parameters {
|
|
+ SCSI_VPD_HEADER_SIZE = 4,
|
|
+ SCSI_VPD_LIST_SIZE = 36,
|
|
+};
|
|
+
|
|
static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
|
|
{
|
|
- unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
|
|
+ unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
|
|
int result;
|
|
|
|
if (sdev->no_vpd_size)
|
|
return SCSI_DEFAULT_VPD_LEN;
|
|
|
|
+ /*
|
|
+ * Fetch the supported pages VPD and validate that the requested page
|
|
+ * number is present.
|
|
+ */
|
|
+ if (page != 0) {
|
|
+ result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
|
|
+ if (result < SCSI_VPD_HEADER_SIZE)
|
|
+ return 0;
|
|
+
|
|
+ result -= SCSI_VPD_HEADER_SIZE;
|
|
+ if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
|
|
+ return 0;
|
|
+ }
|
|
/*
|
|
* Fetch the VPD page header to find out how big the page
|
|
* is. This is done to prevent problems on legacy devices
|
|
* which can not handle allocation lengths as large as
|
|
* potentially requested by the caller.
|
|
*/
|
|
- result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
|
|
+ result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
|
|
if (result < 0)
|
|
return 0;
|
|
|
|
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
|
|
index 47d487729635c..e44f6bb25a8ea 100644
|
|
--- a/drivers/scsi/smartpqi/smartpqi_init.c
|
|
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
|
|
@@ -6449,8 +6449,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
|
|
{
|
|
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
|
|
|
|
- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
|
+ if (!ctrl_info->disable_managed_interrupts)
|
|
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
|
ctrl_info->pci_dev, 0);
|
|
+ else
|
|
+ return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
|
|
}
|
|
|
|
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
|
|
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
|
|
index 474b272f9b02d..832adb570b501 100644
|
|
--- a/drivers/soc/mediatek/mtk-pm-domains.c
|
|
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
|
|
@@ -499,6 +499,11 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
|
|
goto err_put_node;
|
|
}
|
|
|
|
+ /* recursive call to add all subdomains */
|
|
+ ret = scpsys_add_subdomain(scpsys, child);
|
|
+ if (ret)
|
|
+ goto err_put_node;
|
|
+
|
|
ret = pm_genpd_add_subdomain(parent_pd, child_pd);
|
|
if (ret) {
|
|
dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
|
|
@@ -508,11 +513,6 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
|
|
dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
|
|
child_pd->name);
|
|
}
|
|
-
|
|
- /* recursive call to add all subdomains */
|
|
- ret = scpsys_add_subdomain(scpsys, child);
|
|
- if (ret)
|
|
- goto err_put_node;
|
|
}
|
|
|
|
return 0;
|
|
@@ -526,9 +526,6 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
|
|
{
|
|
int ret;
|
|
|
|
- if (scpsys_domain_is_on(pd))
|
|
- scpsys_power_off(&pd->genpd);
|
|
-
|
|
/*
|
|
* We're in the error cleanup already, so we only complain,
|
|
* but won't emit another error on top of the original one.
|
|
@@ -538,6 +535,8 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
|
|
dev_err(pd->scpsys->dev,
|
|
"failed to remove domain '%s' : %d - state may be inconsistent\n",
|
|
pd->genpd.name, ret);
|
|
+ if (scpsys_domain_is_on(pd))
|
|
+ scpsys_power_off(&pd->genpd);
|
|
|
|
clk_bulk_put(pd->num_clks, pd->clks);
|
|
clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
|
|
diff --git a/drivers/soc/renesas/r8a77980-sysc.c b/drivers/soc/renesas/r8a77980-sysc.c
|
|
index 39ca84a67daad..621e411fc9991 100644
|
|
--- a/drivers/soc/renesas/r8a77980-sysc.c
|
|
+++ b/drivers/soc/renesas/r8a77980-sysc.c
|
|
@@ -25,7 +25,8 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
|
|
PD_CPU_NOCR },
|
|
{ "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
|
|
PD_CPU_NOCR },
|
|
- { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON },
|
|
+ { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON,
|
|
+ PD_CPU_NOCR },
|
|
{ "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON },
|
|
{ "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR },
|
|
{ "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
|
|
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
|
|
index d3a23b1c2a4c5..61bf00dfe9c33 100644
|
|
--- a/drivers/spi/spi-hisi-sfc-v3xx.c
|
|
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
|
|
@@ -377,6 +377,11 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
|
|
static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
|
|
{
|
|
struct hisi_sfc_v3xx_host *host = data;
|
|
+ u32 reg;
|
|
+
|
|
+ reg = readl(host->regbase + HISI_SFC_V3XX_INT_STAT);
|
|
+ if (!reg)
|
|
+ return IRQ_NONE;
|
|
|
|
hisi_sfc_v3xx_disable_int(host);
|
|
|
|
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
|
|
index 51ceaa4857249..ec3a4939ee984 100644
|
|
--- a/drivers/spi/spi-sh-msiof.c
|
|
+++ b/drivers/spi/spi-sh-msiof.c
|
|
@@ -137,14 +137,14 @@ struct sh_msiof_spi_priv {
|
|
|
|
/* SIFCTR */
|
|
#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
|
|
-#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
|
|
-#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
|
|
-#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
|
|
-#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
|
|
-#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
|
|
-#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
|
|
-#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
|
|
-#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
|
|
+#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */
|
|
+#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */
|
|
+#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */
|
|
+#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */
|
|
+#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */
|
|
+#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */
|
|
+#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */
|
|
+#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */
|
|
#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
|
|
#define SIFCTR_TFUA_SHIFT 20
|
|
#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
|
|
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
|
|
index 301fe376a1206..13558cbd9b82e 100644
|
|
--- a/drivers/target/target_core_device.c
|
|
+++ b/drivers/target/target_core_device.c
|
|
@@ -147,7 +147,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
|
|
struct se_session *se_sess = se_cmd->se_sess;
|
|
struct se_node_acl *nacl = se_sess->se_node_acl;
|
|
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
|
|
- unsigned long flags;
|
|
|
|
rcu_read_lock();
|
|
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
|
|
@@ -178,10 +177,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
|
|
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
|
|
se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
|
|
|
|
- spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
|
|
- list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
|
|
- spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
|
|
-
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(transport_lookup_tmr_lun);
|
|
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
|
|
index 69a4c9581e80e..7aec34c090661 100644
|
|
--- a/drivers/target/target_core_pscsi.c
|
|
+++ b/drivers/target/target_core_pscsi.c
|
|
@@ -910,12 +910,15 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|
|
|
return 0;
|
|
fail:
|
|
- if (bio)
|
|
- bio_put(bio);
|
|
+ if (bio) {
|
|
+ bio_uninit(bio);
|
|
+ kfree(bio);
|
|
+ }
|
|
while (req->bio) {
|
|
bio = req->bio;
|
|
req->bio = bio->bi_next;
|
|
- bio_put(bio);
|
|
+ bio_uninit(bio);
|
|
+ kfree(bio);
|
|
}
|
|
req->biotail = NULL;
|
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
|
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
|
|
index 0686882bcbda3..fb93d74c5d0b2 100644
|
|
--- a/drivers/target/target_core_transport.c
|
|
+++ b/drivers/target/target_core_transport.c
|
|
@@ -3627,6 +3627,10 @@ int transport_generic_handle_tmr(
|
|
unsigned long flags;
|
|
bool aborted = false;
|
|
|
|
+ spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags);
|
|
+ list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list);
|
|
+ spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags);
|
|
+
|
|
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
if (cmd->transport_state & CMD_T_ABORTED) {
|
|
aborted = true;
|
|
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
|
|
index c74eaf2552c32..2f0f05259778a 100644
|
|
--- a/drivers/tty/serial/amba-pl011.c
|
|
+++ b/drivers/tty/serial/amba-pl011.c
|
|
@@ -1345,11 +1345,41 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap)
|
|
}
|
|
}
|
|
|
|
+static void pl011_rs485_tx_start(struct uart_amba_port *uap)
|
|
+{
|
|
+ struct uart_port *port = &uap->port;
|
|
+ u32 cr;
|
|
+
|
|
+ /* Enable transmitter */
|
|
+ cr = pl011_read(uap, REG_CR);
|
|
+ cr |= UART011_CR_TXE;
|
|
+
|
|
+ /* Disable receiver if half-duplex */
|
|
+ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
|
|
+ cr &= ~UART011_CR_RXE;
|
|
+
|
|
+ if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
|
|
+ cr &= ~UART011_CR_RTS;
|
|
+ else
|
|
+ cr |= UART011_CR_RTS;
|
|
+
|
|
+ pl011_write(cr, uap, REG_CR);
|
|
+
|
|
+ if (port->rs485.delay_rts_before_send)
|
|
+ mdelay(port->rs485.delay_rts_before_send);
|
|
+
|
|
+ uap->rs485_tx_started = true;
|
|
+}
|
|
+
|
|
static void pl011_start_tx(struct uart_port *port)
|
|
{
|
|
struct uart_amba_port *uap =
|
|
container_of(port, struct uart_amba_port, port);
|
|
|
|
+ if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
|
|
+ !uap->rs485_tx_started)
|
|
+ pl011_rs485_tx_start(uap);
|
|
+
|
|
if (!pl011_dma_tx_start(uap))
|
|
pl011_start_tx_pio(uap);
|
|
}
|
|
@@ -1431,42 +1461,12 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
|
|
return true;
|
|
}
|
|
|
|
-static void pl011_rs485_tx_start(struct uart_amba_port *uap)
|
|
-{
|
|
- struct uart_port *port = &uap->port;
|
|
- u32 cr;
|
|
-
|
|
- /* Enable transmitter */
|
|
- cr = pl011_read(uap, REG_CR);
|
|
- cr |= UART011_CR_TXE;
|
|
-
|
|
- /* Disable receiver if half-duplex */
|
|
- if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
|
|
- cr &= ~UART011_CR_RXE;
|
|
-
|
|
- if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
|
|
- cr &= ~UART011_CR_RTS;
|
|
- else
|
|
- cr |= UART011_CR_RTS;
|
|
-
|
|
- pl011_write(cr, uap, REG_CR);
|
|
-
|
|
- if (port->rs485.delay_rts_before_send)
|
|
- mdelay(port->rs485.delay_rts_before_send);
|
|
-
|
|
- uap->rs485_tx_started = true;
|
|
-}
|
|
-
|
|
/* Returns true if tx interrupts have to be (kept) enabled */
|
|
static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
|
|
{
|
|
struct circ_buf *xmit = &uap->port.state->xmit;
|
|
int count = uap->fifosize >> 1;
|
|
|
|
- if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
|
|
- !uap->rs485_tx_started)
|
|
- pl011_rs485_tx_start(uap);
|
|
-
|
|
if (uap->port.x_char) {
|
|
if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
|
|
return true;
|
|
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
|
|
index 9fd4e9ed93b8b..f3c25467e571f 100644
|
|
--- a/drivers/ufs/core/ufshcd.c
|
|
+++ b/drivers/ufs/core/ufshcd.c
|
|
@@ -6159,7 +6159,6 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
|
|
ufshcd_hold(hba, false);
|
|
if (!ufshcd_is_clkgating_allowed(hba))
|
|
ufshcd_setup_clocks(hba, true);
|
|
- ufshcd_release(hba);
|
|
pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
|
|
ufshcd_vops_resume(hba, pm_op);
|
|
} else {
|
|
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
|
|
index ccdd525bd7c80..2b8f98f0707e7 100644
|
|
--- a/drivers/usb/cdns3/cdns3-gadget.c
|
|
+++ b/drivers/usb/cdns3/cdns3-gadget.c
|
|
@@ -826,7 +826,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
|
|
return;
|
|
}
|
|
|
|
- if (request->complete) {
|
|
+ /*
|
|
+ * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify
|
|
+ * gadget composite driver.
|
|
+ */
|
|
+ if (request->complete && request->buf != priv_dev->zlp_buf) {
|
|
spin_unlock(&priv_dev->lock);
|
|
usb_gadget_giveback_request(&priv_ep->endpoint,
|
|
request);
|
|
@@ -2537,11 +2541,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
|
|
|
|
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
|
|
priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
|
|
+ list_del_init(&priv_req->list);
|
|
|
|
kfree(priv_req->request.buf);
|
|
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
|
|
&priv_req->request);
|
|
- list_del_init(&priv_req->list);
|
|
--priv_ep->wa2_counter;
|
|
}
|
|
|
|
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
|
|
index 7b20d2d5c262e..7242591b346bc 100644
|
|
--- a/drivers/usb/cdns3/core.c
|
|
+++ b/drivers/usb/cdns3/core.c
|
|
@@ -394,7 +394,6 @@ static int cdns_role_set(struct usb_role_switch *sw, enum usb_role role)
|
|
return ret;
|
|
}
|
|
|
|
-
|
|
/**
|
|
* cdns_wakeup_irq - interrupt handler for wakeup events
|
|
* @irq: irq number for cdns3/cdnsp core device
|
|
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
|
|
index d00ff98dffabf..33ba30f79b337 100644
|
|
--- a/drivers/usb/cdns3/drd.c
|
|
+++ b/drivers/usb/cdns3/drd.c
|
|
@@ -156,7 +156,8 @@ bool cdns_is_device(struct cdns *cdns)
|
|
*/
|
|
static void cdns_otg_disable_irq(struct cdns *cdns)
|
|
{
|
|
- writel(0, &cdns->otg_irq_regs->ien);
|
|
+ if (cdns->version)
|
|
+ writel(0, &cdns->otg_irq_regs->ien);
|
|
}
|
|
|
|
/**
|
|
@@ -418,15 +419,20 @@ int cdns_drd_init(struct cdns *cdns)
|
|
|
|
cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
|
|
|
|
- if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
|
|
+ state = readl(&cdns->otg_cdnsp_regs->did);
|
|
+
|
|
+ if (OTG_CDNSP_CHECK_DID(state)) {
|
|
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
|
|
&cdns->otg_cdnsp_regs->ien;
|
|
cdns->version = CDNSP_CONTROLLER_V2;
|
|
- } else {
|
|
+ } else if (OTG_CDNS3_CHECK_DID(state)) {
|
|
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
|
|
&cdns->otg_v1_regs->ien;
|
|
writel(1, &cdns->otg_v1_regs->simulate);
|
|
cdns->version = CDNS3_CONTROLLER_V1;
|
|
+ } else {
|
|
+ dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
|
|
@@ -479,7 +485,6 @@ int cdns_drd_exit(struct cdns *cdns)
|
|
return 0;
|
|
}
|
|
|
|
-
|
|
/* Indicate the cdns3 core was power lost before */
|
|
bool cdns_power_is_lost(struct cdns *cdns)
|
|
{
|
|
diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
|
|
index cbdf94f73ed91..d72370c321d39 100644
|
|
--- a/drivers/usb/cdns3/drd.h
|
|
+++ b/drivers/usb/cdns3/drd.h
|
|
@@ -79,7 +79,11 @@ struct cdnsp_otg_regs {
|
|
__le32 susp_timing_ctrl;
|
|
};
|
|
|
|
-#define OTG_CDNSP_DID 0x0004034E
|
|
+/* CDNSP driver supports 0x000403xx Cadence USB controller family. */
|
|
+#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300)
|
|
+
|
|
+/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */
|
|
+#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200)
|
|
|
|
/*
|
|
* Common registers interface for both CDNS3 and CDNSP version of DRD.
|
|
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
|
|
index 6164fc4c96a49..ceca4d839dfd4 100644
|
|
--- a/drivers/usb/cdns3/host.c
|
|
+++ b/drivers/usb/cdns3/host.c
|
|
@@ -18,6 +18,11 @@
|
|
#include "../host/xhci.h"
|
|
#include "../host/xhci-plat.h"
|
|
|
|
+/*
|
|
+ * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only
|
|
+ * in Cadence USB3 dual-role controller, so it can't be used
|
|
+ * with Cadence CDNSP dual-role controller.
|
|
+ */
|
|
#define XECP_PORT_CAP_REG 0x8000
|
|
#define XECP_AUX_CTRL_REG1 0x8120
|
|
|
|
@@ -57,6 +62,8 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
|
|
.resume_quirk = xhci_cdns3_resume_quirk,
|
|
};
|
|
|
|
+static const struct xhci_plat_priv xhci_plat_cdnsp_xhci;
|
|
+
|
|
static int __cdns_host_init(struct cdns *cdns)
|
|
{
|
|
struct platform_device *xhci;
|
|
@@ -81,8 +88,13 @@ static int __cdns_host_init(struct cdns *cdns)
|
|
goto err1;
|
|
}
|
|
|
|
- cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
|
|
- sizeof(struct xhci_plat_priv), GFP_KERNEL);
|
|
+ if (cdns->version < CDNSP_CONTROLLER_V2)
|
|
+ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
|
|
+ sizeof(struct xhci_plat_priv), GFP_KERNEL);
|
|
+ else
|
|
+ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci,
|
|
+ sizeof(struct xhci_plat_priv), GFP_KERNEL);
|
|
+
|
|
if (!cdns->xhci_plat_data) {
|
|
ret = -ENOMEM;
|
|
goto err1;
|
|
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
|
index 576c21bf77cda..b134110cc2ed5 100644
|
|
--- a/drivers/usb/dwc3/gadget.c
|
|
+++ b/drivers/usb/dwc3/gadget.c
|
|
@@ -2548,6 +2548,11 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
|
|
int ret;
|
|
|
|
spin_lock_irqsave(&dwc->lock, flags);
|
|
+ if (!dwc->pullups_connected) {
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
dwc->connected = false;
|
|
|
|
/*
|
|
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
|
|
index bbb6ff6b11aa1..5e78fcc63e4d3 100644
|
|
--- a/drivers/usb/gadget/function/f_ncm.c
|
|
+++ b/drivers/usb/gadget/function/f_ncm.c
|
|
@@ -1340,7 +1340,15 @@ static int ncm_unwrap_ntb(struct gether *port,
|
|
"Parsed NTB with %d frames\n", dgram_counter);
|
|
|
|
to_process -= block_len;
|
|
- if (to_process != 0) {
|
|
+
|
|
+ /*
|
|
+ * Windows NCM driver avoids USB ZLPs by adding a 1-byte
|
|
+ * zero pad as needed.
|
|
+ */
|
|
+ if (to_process == 1 &&
|
|
+ (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
|
|
+ to_process--;
|
|
+ } else if (to_process > 0) {
|
|
ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
|
|
goto parse_ntb;
|
|
}
|
|
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
|
|
index 32e6d19f7011a..a327f8bc57043 100644
|
|
--- a/drivers/usb/roles/class.c
|
|
+++ b/drivers/usb/roles/class.c
|
|
@@ -19,7 +19,9 @@ static struct class *role_class;
|
|
struct usb_role_switch {
|
|
struct device dev;
|
|
struct mutex lock; /* device lock*/
|
|
+ struct module *module; /* the module this device depends on */
|
|
enum usb_role role;
|
|
+ bool registered;
|
|
|
|
/* From descriptor */
|
|
struct device *usb2_port;
|
|
@@ -46,6 +48,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
|
|
if (IS_ERR_OR_NULL(sw))
|
|
return 0;
|
|
|
|
+ if (!sw->registered)
|
|
+ return -EOPNOTSUPP;
|
|
+
|
|
mutex_lock(&sw->lock);
|
|
|
|
ret = sw->set(sw, role);
|
|
@@ -71,7 +76,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
|
|
{
|
|
enum usb_role role;
|
|
|
|
- if (IS_ERR_OR_NULL(sw))
|
|
+ if (IS_ERR_OR_NULL(sw) || !sw->registered)
|
|
return USB_ROLE_NONE;
|
|
|
|
mutex_lock(&sw->lock);
|
|
@@ -133,7 +138,7 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
|
|
usb_role_switch_match);
|
|
|
|
if (!IS_ERR_OR_NULL(sw))
|
|
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
|
|
+ WARN_ON(!try_module_get(sw->module));
|
|
|
|
return sw;
|
|
}
|
|
@@ -155,7 +160,7 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
|
|
sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
|
|
NULL, usb_role_switch_match);
|
|
if (!IS_ERR_OR_NULL(sw))
|
|
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
|
|
+ WARN_ON(!try_module_get(sw->module));
|
|
|
|
return sw;
|
|
}
|
|
@@ -170,7 +175,7 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
|
|
void usb_role_switch_put(struct usb_role_switch *sw)
|
|
{
|
|
if (!IS_ERR_OR_NULL(sw)) {
|
|
- module_put(sw->dev.parent->driver->owner);
|
|
+ module_put(sw->module);
|
|
put_device(&sw->dev);
|
|
}
|
|
}
|
|
@@ -187,15 +192,18 @@ struct usb_role_switch *
|
|
usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
|
|
{
|
|
struct device *dev;
|
|
+ struct usb_role_switch *sw = NULL;
|
|
|
|
if (!fwnode)
|
|
return NULL;
|
|
|
|
dev = class_find_device_by_fwnode(role_class, fwnode);
|
|
- if (dev)
|
|
- WARN_ON(!try_module_get(dev->parent->driver->owner));
|
|
+ if (dev) {
|
|
+ sw = to_role_switch(dev);
|
|
+ WARN_ON(!try_module_get(sw->module));
|
|
+ }
|
|
|
|
- return dev ? to_role_switch(dev) : NULL;
|
|
+ return sw;
|
|
}
|
|
EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
|
|
|
|
@@ -337,6 +345,7 @@ usb_role_switch_register(struct device *parent,
|
|
sw->set = desc->set;
|
|
sw->get = desc->get;
|
|
|
|
+ sw->module = parent->driver->owner;
|
|
sw->dev.parent = parent;
|
|
sw->dev.fwnode = desc->fwnode;
|
|
sw->dev.class = role_class;
|
|
@@ -351,6 +360,8 @@ usb_role_switch_register(struct device *parent,
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
+ sw->registered = true;
|
|
+
|
|
/* TODO: Symlinks for the host port and the device controller. */
|
|
|
|
return sw;
|
|
@@ -365,8 +376,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
|
|
*/
|
|
void usb_role_switch_unregister(struct usb_role_switch *sw)
|
|
{
|
|
- if (!IS_ERR_OR_NULL(sw))
|
|
+ if (!IS_ERR_OR_NULL(sw)) {
|
|
+ sw->registered = false;
|
|
device_unregister(&sw->dev);
|
|
+ }
|
|
}
|
|
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
|
|
|
|
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
|
|
index 26171c5d3c61c..48130d636a020 100644
|
|
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
|
|
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
|
|
@@ -25,6 +25,8 @@ struct ucsi_acpi {
|
|
unsigned long flags;
|
|
guid_t guid;
|
|
u64 cmd;
|
|
+ bool dell_quirk_probed;
|
|
+ bool dell_quirk_active;
|
|
};
|
|
|
|
static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
|
|
@@ -126,12 +128,73 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
|
|
.async_write = ucsi_acpi_async_write
|
|
};
|
|
|
|
-static const struct dmi_system_id zenbook_dmi_id[] = {
|
|
+/*
|
|
+ * Some Dell laptops expect that an ACK command with the
|
|
+ * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)
|
|
+ * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.
|
|
+ * If this is not done events are not delivered to OSPM and
|
|
+ * subsequent commands will timeout.
|
|
+ */
|
|
+static int
|
|
+ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
|
|
+ const void *val, size_t val_len)
|
|
+{
|
|
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
|
|
+ u64 cmd = *(u64 *)val, ack = 0;
|
|
+ int ret;
|
|
+
|
|
+ if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&
|
|
+ cmd & UCSI_ACK_CONNECTOR_CHANGE)
|
|
+ ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
|
|
+
|
|
+ ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+ if (ack == 0)
|
|
+ return ret;
|
|
+
|
|
+ if (!ua->dell_quirk_probed) {
|
|
+ ua->dell_quirk_probed = true;
|
|
+
|
|
+ cmd = UCSI_GET_CAPABILITY;
|
|
+ ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,
|
|
+ sizeof(cmd));
|
|
+ if (ret == 0)
|
|
+ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,
|
|
+ &ack, sizeof(ack));
|
|
+ if (ret != -ETIMEDOUT)
|
|
+ return ret;
|
|
+
|
|
+ ua->dell_quirk_active = true;
|
|
+ dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");
|
|
+ dev_err(ua->dev, "Firmware bug: Enabling workaround\n");
|
|
+ }
|
|
+
|
|
+ if (!ua->dell_quirk_active)
|
|
+ return ret;
|
|
+
|
|
+ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));
|
|
+}
|
|
+
|
|
+static const struct ucsi_operations ucsi_dell_ops = {
|
|
+ .read = ucsi_acpi_read,
|
|
+ .sync_write = ucsi_dell_sync_write,
|
|
+ .async_write = ucsi_acpi_async_write
|
|
+};
|
|
+
|
|
+static const struct dmi_system_id ucsi_acpi_quirks[] = {
|
|
{
|
|
.matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
|
|
},
|
|
+ .driver_data = (void *)&ucsi_zenbook_ops,
|
|
+ },
|
|
+ {
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
|
+ },
|
|
+ .driver_data = (void *)&ucsi_dell_ops,
|
|
},
|
|
{ }
|
|
};
|
|
@@ -160,6 +223,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
|
|
{
|
|
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
|
|
const struct ucsi_operations *ops = &ucsi_acpi_ops;
|
|
+ const struct dmi_system_id *id;
|
|
struct ucsi_acpi *ua;
|
|
struct resource *res;
|
|
acpi_status status;
|
|
@@ -189,8 +253,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
|
|
init_completion(&ua->complete);
|
|
ua->dev = &pdev->dev;
|
|
|
|
- if (dmi_check_system(zenbook_dmi_id))
|
|
- ops = &ucsi_zenbook_ops;
|
|
+ id = dmi_first_match(ucsi_acpi_quirks);
|
|
+ if (id)
|
|
+ ops = id->driver_data;
|
|
|
|
ua->ucsi = ucsi_create(&pdev->dev, ops);
|
|
if (IS_ERR(ua->ucsi))
|
|
diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c
|
|
index 0f19d502f351b..dfab5b742191a 100644
|
|
--- a/drivers/vfio/iova_bitmap.c
|
|
+++ b/drivers/vfio/iova_bitmap.c
|
|
@@ -99,7 +99,7 @@ struct iova_bitmap {
|
|
struct iova_bitmap_map mapped;
|
|
|
|
/* userspace address of the bitmap */
|
|
- u64 __user *bitmap;
|
|
+ u8 __user *bitmap;
|
|
|
|
/* u64 index that @mapped points to */
|
|
unsigned long mapped_base_index;
|
|
@@ -161,7 +161,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
|
|
{
|
|
struct iova_bitmap_map *mapped = &bitmap->mapped;
|
|
unsigned long npages;
|
|
- u64 __user *addr;
|
|
+ u8 __user *addr;
|
|
long ret;
|
|
|
|
/*
|
|
@@ -174,18 +174,19 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
|
|
bitmap->mapped_base_index) *
|
|
sizeof(*bitmap->bitmap), PAGE_SIZE);
|
|
|
|
- /*
|
|
- * We always cap at max number of 'struct page' a base page can fit.
|
|
- * This is, for example, on x86 means 2M of bitmap data max.
|
|
- */
|
|
- npages = min(npages, PAGE_SIZE / sizeof(struct page *));
|
|
-
|
|
/*
|
|
* Bitmap address to be pinned is calculated via pointer arithmetic
|
|
* with bitmap u64 word index.
|
|
*/
|
|
addr = bitmap->bitmap + bitmap->mapped_base_index;
|
|
|
|
+ /*
|
|
+ * We always cap at max number of 'struct page' a base page can fit.
|
|
+ * This is, for example, on x86 means 2M of bitmap data max.
|
|
+ */
|
|
+ npages = min(npages + !!offset_in_page(addr),
|
|
+ PAGE_SIZE / sizeof(struct page *));
|
|
+
|
|
ret = pin_user_pages_fast((unsigned long)addr, npages,
|
|
FOLL_WRITE, mapped->pages);
|
|
if (ret <= 0)
|
|
@@ -246,7 +247,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
|
|
|
|
mapped = &bitmap->mapped;
|
|
mapped->pgshift = __ffs(page_size);
|
|
- bitmap->bitmap = data;
|
|
+ bitmap->bitmap = (u8 __user *)data;
|
|
bitmap->mapped_total_index =
|
|
iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
|
|
bitmap->iova = iova;
|
|
@@ -301,7 +302,7 @@ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
|
|
|
|
remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
|
|
remaining = min_t(unsigned long, remaining,
|
|
- bytes / sizeof(*bitmap->bitmap));
|
|
+ DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap)));
|
|
|
|
return remaining;
|
|
}
|
|
@@ -405,6 +406,7 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
|
|
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
|
|
unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
|
|
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
|
|
+ unsigned long last_page_idx = mapped->npages - 1;
|
|
|
|
do {
|
|
unsigned int page_idx = cur_bit / BITS_PER_PAGE;
|
|
@@ -413,6 +415,9 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
|
|
last_bit - cur_bit + 1);
|
|
void *kaddr;
|
|
|
|
+ if (unlikely(page_idx > last_page_idx))
|
|
+ break;
|
|
+
|
|
kaddr = kmap_local_page(mapped->pages[page_idx]);
|
|
bitmap_set(kaddr, offset, nbits);
|
|
kunmap_local(kaddr);
|
|
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
|
|
index b7818b652698f..a7b63c475f954 100644
|
|
--- a/drivers/video/fbdev/savage/savagefb_driver.c
|
|
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
|
|
@@ -869,6 +869,9 @@ static int savagefb_check_var(struct fb_var_screeninfo *var,
|
|
|
|
DBG("savagefb_check_var");
|
|
|
|
+ if (!var->pixclock)
|
|
+ return -EINVAL;
|
|
+
|
|
var->transp.offset = 0;
|
|
var->transp.length = 0;
|
|
switch (var->bits_per_pixel) {
|
|
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
|
|
index 1c197c3f95381..fe8996461b9ef 100644
|
|
--- a/drivers/video/fbdev/sis/sis_main.c
|
|
+++ b/drivers/video/fbdev/sis/sis_main.c
|
|
@@ -1475,6 +1475,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
|
|
|
|
vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
|
|
|
|
+ if (!var->pixclock)
|
|
+ return -EINVAL;
|
|
pixclock = var->pixclock;
|
|
|
|
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
|
|
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
|
|
index 1c9144e3e83ac..a146d70efa650 100644
|
|
--- a/fs/afs/volume.c
|
|
+++ b/fs/afs/volume.c
|
|
@@ -341,7 +341,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
|
|
{
|
|
struct afs_server_list *new, *old, *discard;
|
|
struct afs_vldb_entry *vldb;
|
|
- char idbuf[16];
|
|
+ char idbuf[24];
|
|
int ret, idsz;
|
|
|
|
_enter("");
|
|
@@ -349,7 +349,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
|
|
/* We look up an ID by passing it as a decimal string in the
|
|
* operation's name parameter.
|
|
*/
|
|
- idsz = sprintf(idbuf, "%llu", volume->vid);
|
|
+ idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid);
|
|
|
|
vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
|
|
if (IS_ERR(vldb)) {
|
|
diff --git a/fs/aio.c b/fs/aio.c
|
|
index e85ba0b77f596..849c3e3ed558b 100644
|
|
--- a/fs/aio.c
|
|
+++ b/fs/aio.c
|
|
@@ -595,6 +595,13 @@ void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
|
|
struct kioctx *ctx = req->ki_ctx;
|
|
unsigned long flags;
|
|
|
|
+ /*
|
|
+ * kiocb didn't come from aio or is neither a read nor a write, hence
|
|
+ * ignore it.
|
|
+ */
|
|
+ if (!(iocb->ki_flags & IOCB_AIO_RW))
|
|
+ return;
|
|
+
|
|
if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
|
|
return;
|
|
|
|
@@ -1476,7 +1483,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
|
|
req->ki_complete = aio_complete_rw;
|
|
req->private = NULL;
|
|
req->ki_pos = iocb->aio_offset;
|
|
- req->ki_flags = req->ki_filp->f_iocb_flags;
|
|
+ req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
|
|
if (iocb->aio_flags & IOCB_FLAG_RESFD)
|
|
req->ki_flags |= IOCB_EVENTFD;
|
|
if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
|
|
diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c
|
|
index 7077f72e6f474..f449f7340aad0 100644
|
|
--- a/fs/cachefiles/cache.c
|
|
+++ b/fs/cachefiles/cache.c
|
|
@@ -168,6 +168,8 @@ int cachefiles_add_cache(struct cachefiles_cache *cache)
|
|
dput(root);
|
|
error_open_root:
|
|
cachefiles_end_secure(cache, saved_cred);
|
|
+ put_cred(cache->cache_cred);
|
|
+ cache->cache_cred = NULL;
|
|
error_getsec:
|
|
fscache_relinquish_cache(cache_cookie);
|
|
cache->cache = NULL;
|
|
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
|
|
index aa4efcabb5e37..5f4df9588620f 100644
|
|
--- a/fs/cachefiles/daemon.c
|
|
+++ b/fs/cachefiles/daemon.c
|
|
@@ -805,6 +805,7 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
|
|
cachefiles_put_directory(cache->graveyard);
|
|
cachefiles_put_directory(cache->store);
|
|
mntput(cache->mnt);
|
|
+ put_cred(cache->cache_cred);
|
|
|
|
kfree(cache->rootdirname);
|
|
kfree(cache->secctx);
|
|
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
|
|
index 26fa170090b8f..c4a3187bdb8fc 100644
|
|
--- a/fs/erofs/compress.h
|
|
+++ b/fs/erofs/compress.h
|
|
@@ -21,6 +21,8 @@ struct z_erofs_decompress_req {
|
|
};
|
|
|
|
struct z_erofs_decompressor {
|
|
+ int (*config)(struct super_block *sb, struct erofs_super_block *dsb,
|
|
+ void *data, int size);
|
|
int (*decompress)(struct z_erofs_decompress_req *rq,
|
|
struct page **pagepool);
|
|
char *name;
|
|
@@ -93,6 +95,8 @@ int z_erofs_decompress(struct z_erofs_decompress_req *rq,
|
|
struct page **pagepool);
|
|
|
|
/* prototypes for specific algorithms */
|
|
+int z_erofs_load_lzma_config(struct super_block *sb,
|
|
+ struct erofs_super_block *dsb, void *data, int size);
|
|
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
|
|
struct page **pagepool);
|
|
#endif
|
|
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
|
|
index 0cfad74374ca9..1eefa4411e066 100644
|
|
--- a/fs/erofs/decompressor.c
|
|
+++ b/fs/erofs/decompressor.c
|
|
@@ -24,11 +24,11 @@ struct z_erofs_lz4_decompress_ctx {
|
|
unsigned int oend;
|
|
};
|
|
|
|
-int z_erofs_load_lz4_config(struct super_block *sb,
|
|
- struct erofs_super_block *dsb,
|
|
- struct z_erofs_lz4_cfgs *lz4, int size)
|
|
+static int z_erofs_load_lz4_config(struct super_block *sb,
|
|
+ struct erofs_super_block *dsb, void *data, int size)
|
|
{
|
|
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
|
+ struct z_erofs_lz4_cfgs *lz4 = data;
|
|
u16 distance;
|
|
|
|
if (lz4) {
|
|
@@ -374,17 +374,71 @@ static struct z_erofs_decompressor decompressors[] = {
|
|
.name = "interlaced"
|
|
},
|
|
[Z_EROFS_COMPRESSION_LZ4] = {
|
|
+ .config = z_erofs_load_lz4_config,
|
|
.decompress = z_erofs_lz4_decompress,
|
|
.name = "lz4"
|
|
},
|
|
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
|
|
[Z_EROFS_COMPRESSION_LZMA] = {
|
|
+ .config = z_erofs_load_lzma_config,
|
|
.decompress = z_erofs_lzma_decompress,
|
|
.name = "lzma"
|
|
},
|
|
#endif
|
|
};
|
|
|
|
+int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
|
|
+{
|
|
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
|
|
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
|
+ unsigned int algs, alg;
|
|
+ erofs_off_t offset;
|
|
+ int size, ret = 0;
|
|
+
|
|
+ if (!erofs_sb_has_compr_cfgs(sbi)) {
|
|
+ sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
|
|
+ return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
|
|
+ }
|
|
+
|
|
+ sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
|
|
+ if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
|
|
+ erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
|
|
+ sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
+ offset = EROFS_SUPER_OFFSET + sbi->sb_size;
|
|
+ alg = 0;
|
|
+ for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
|
|
+ void *data;
|
|
+
|
|
+ if (!(algs & 1))
|
|
+ continue;
|
|
+
|
|
+ data = erofs_read_metadata(sb, &buf, &offset, &size);
|
|
+ if (IS_ERR(data)) {
|
|
+ ret = PTR_ERR(data);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (alg >= ARRAY_SIZE(decompressors) ||
|
|
+ !decompressors[alg].config) {
|
|
+ erofs_err(sb, "algorithm %d isn't enabled on this kernel",
|
|
+ alg);
|
|
+ ret = -EOPNOTSUPP;
|
|
+ } else {
|
|
+ ret = decompressors[alg].config(sb,
|
|
+ dsb, data, size);
|
|
+ }
|
|
+
|
|
+ kfree(data);
|
|
+ if (ret)
|
|
+ break;
|
|
+ }
|
|
+ erofs_put_metabuf(&buf);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
int z_erofs_decompress(struct z_erofs_decompress_req *rq,
|
|
struct page **pagepool)
|
|
{
|
|
diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
|
|
index 49addc345aebe..970464c4b6769 100644
|
|
--- a/fs/erofs/decompressor_lzma.c
|
|
+++ b/fs/erofs/decompressor_lzma.c
|
|
@@ -72,10 +72,10 @@ int z_erofs_lzma_init(void)
|
|
}
|
|
|
|
int z_erofs_load_lzma_config(struct super_block *sb,
|
|
- struct erofs_super_block *dsb,
|
|
- struct z_erofs_lzma_cfgs *lzma, int size)
|
|
+ struct erofs_super_block *dsb, void *data, int size)
|
|
{
|
|
static DEFINE_MUTEX(lzma_resize_mutex);
|
|
+ struct z_erofs_lzma_cfgs *lzma = data;
|
|
unsigned int dict_size, i;
|
|
struct z_erofs_lzma *strm, *head = NULL;
|
|
int err;
|
|
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
|
|
index d8d09fc3ed655..79a7a5815ea63 100644
|
|
--- a/fs/erofs/internal.h
|
|
+++ b/fs/erofs/internal.h
|
|
@@ -471,6 +471,8 @@ struct erofs_map_dev {
|
|
|
|
/* data.c */
|
|
extern const struct file_operations erofs_file_fops;
|
|
+void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
|
+ erofs_off_t *offset, int *lengthp);
|
|
void erofs_unmap_metabuf(struct erofs_buf *buf);
|
|
void erofs_put_metabuf(struct erofs_buf *buf);
|
|
void *erofs_bread(struct erofs_buf *buf, struct inode *inode,
|
|
@@ -565,9 +567,7 @@ void z_erofs_exit_zip_subsystem(void);
|
|
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
|
struct erofs_workgroup *egrp);
|
|
int erofs_try_to_free_cached_page(struct page *page);
|
|
-int z_erofs_load_lz4_config(struct super_block *sb,
|
|
- struct erofs_super_block *dsb,
|
|
- struct z_erofs_lz4_cfgs *lz4, int len);
|
|
+int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
|
|
#else
|
|
static inline void erofs_shrinker_register(struct super_block *sb) {}
|
|
static inline void erofs_shrinker_unregister(struct super_block *sb) {}
|
|
@@ -575,36 +575,14 @@ static inline int erofs_init_shrinker(void) { return 0; }
|
|
static inline void erofs_exit_shrinker(void) {}
|
|
static inline int z_erofs_init_zip_subsystem(void) { return 0; }
|
|
static inline void z_erofs_exit_zip_subsystem(void) {}
|
|
-static inline int z_erofs_load_lz4_config(struct super_block *sb,
|
|
- struct erofs_super_block *dsb,
|
|
- struct z_erofs_lz4_cfgs *lz4, int len)
|
|
-{
|
|
- if (lz4 || dsb->u1.lz4_max_distance) {
|
|
- erofs_err(sb, "lz4 algorithm isn't enabled");
|
|
- return -EINVAL;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
#endif /* !CONFIG_EROFS_FS_ZIP */
|
|
|
|
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
|
|
int z_erofs_lzma_init(void);
|
|
void z_erofs_lzma_exit(void);
|
|
-int z_erofs_load_lzma_config(struct super_block *sb,
|
|
- struct erofs_super_block *dsb,
|
|
- struct z_erofs_lzma_cfgs *lzma, int size);
|
|
#else
|
|
static inline int z_erofs_lzma_init(void) { return 0; }
|
|
static inline int z_erofs_lzma_exit(void) { return 0; }
|
|
-static inline int z_erofs_load_lzma_config(struct super_block *sb,
|
|
- struct erofs_super_block *dsb,
|
|
- struct z_erofs_lzma_cfgs *lzma, int size) {
|
|
- if (lzma) {
|
|
- erofs_err(sb, "lzma algorithm isn't enabled");
|
|
- return -EINVAL;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
#endif /* !CONFIG_EROFS_FS_ZIP */
|
|
|
|
/* flags for erofs_fscache_register_cookie() */
|
|
diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
|
|
index 0dc34721080c7..e8ccaa761bd63 100644
|
|
--- a/fs/erofs/namei.c
|
|
+++ b/fs/erofs/namei.c
|
|
@@ -137,24 +137,24 @@ static void *find_target_block_classic(struct erofs_buf *target,
|
|
/* string comparison without already matched prefix */
|
|
diff = erofs_dirnamecmp(name, &dname, &matched);
|
|
|
|
- if (!diff) {
|
|
- *_ndirents = 0;
|
|
- goto out;
|
|
- } else if (diff > 0) {
|
|
- head = mid + 1;
|
|
- startprfx = matched;
|
|
-
|
|
- if (!IS_ERR(candidate))
|
|
- erofs_put_metabuf(target);
|
|
- *target = buf;
|
|
- candidate = de;
|
|
- *_ndirents = ndirents;
|
|
- } else {
|
|
+ if (diff < 0) {
|
|
erofs_put_metabuf(&buf);
|
|
-
|
|
back = mid - 1;
|
|
endprfx = matched;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (!IS_ERR(candidate))
|
|
+ erofs_put_metabuf(target);
|
|
+ *target = buf;
|
|
+ if (!diff) {
|
|
+ *_ndirents = 0;
|
|
+ return de;
|
|
}
|
|
+ head = mid + 1;
|
|
+ startprfx = matched;
|
|
+ candidate = de;
|
|
+ *_ndirents = ndirents;
|
|
continue;
|
|
}
|
|
out: /* free if the candidate is valid */
|
|
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
|
|
index bd8bf8fc2f5df..f2647126cb2fb 100644
|
|
--- a/fs/erofs/super.c
|
|
+++ b/fs/erofs/super.c
|
|
@@ -126,8 +126,8 @@ static bool check_layout_compatibility(struct super_block *sb,
|
|
|
|
#ifdef CONFIG_EROFS_FS_ZIP
|
|
/* read variable-sized metadata, offset will be aligned by 4-byte */
|
|
-static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
|
- erofs_off_t *offset, int *lengthp)
|
|
+void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
|
+ erofs_off_t *offset, int *lengthp)
|
|
{
|
|
u8 *buffer, *ptr;
|
|
int len, i, cnt;
|
|
@@ -159,64 +159,15 @@ static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
|
|
}
|
|
return buffer;
|
|
}
|
|
-
|
|
-static int erofs_load_compr_cfgs(struct super_block *sb,
|
|
- struct erofs_super_block *dsb)
|
|
-{
|
|
- struct erofs_sb_info *sbi = EROFS_SB(sb);
|
|
- struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
|
- unsigned int algs, alg;
|
|
- erofs_off_t offset;
|
|
- int size, ret = 0;
|
|
-
|
|
- sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
|
|
- if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
|
|
- erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
|
|
- sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- offset = EROFS_SUPER_OFFSET + sbi->sb_size;
|
|
- alg = 0;
|
|
- for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
|
|
- void *data;
|
|
-
|
|
- if (!(algs & 1))
|
|
- continue;
|
|
-
|
|
- data = erofs_read_metadata(sb, &buf, &offset, &size);
|
|
- if (IS_ERR(data)) {
|
|
- ret = PTR_ERR(data);
|
|
- break;
|
|
- }
|
|
-
|
|
- switch (alg) {
|
|
- case Z_EROFS_COMPRESSION_LZ4:
|
|
- ret = z_erofs_load_lz4_config(sb, dsb, data, size);
|
|
- break;
|
|
- case Z_EROFS_COMPRESSION_LZMA:
|
|
- ret = z_erofs_load_lzma_config(sb, dsb, data, size);
|
|
- break;
|
|
- default:
|
|
- DBG_BUGON(1);
|
|
- ret = -EFAULT;
|
|
- }
|
|
- kfree(data);
|
|
- if (ret)
|
|
- break;
|
|
- }
|
|
- erofs_put_metabuf(&buf);
|
|
- return ret;
|
|
-}
|
|
#else
|
|
-static int erofs_load_compr_cfgs(struct super_block *sb,
|
|
- struct erofs_super_block *dsb)
|
|
+static int z_erofs_parse_cfgs(struct super_block *sb,
|
|
+ struct erofs_super_block *dsb)
|
|
{
|
|
- if (dsb->u1.available_compr_algs) {
|
|
- erofs_err(sb, "try to load compressed fs when compression is disabled");
|
|
- return -EINVAL;
|
|
- }
|
|
- return 0;
|
|
+ if (!dsb->u1.available_compr_algs)
|
|
+ return 0;
|
|
+
|
|
+ erofs_err(sb, "compression disabled, unable to mount compressed EROFS");
|
|
+ return -EOPNOTSUPP;
|
|
}
|
|
#endif
|
|
|
|
@@ -398,10 +349,7 @@ static int erofs_read_superblock(struct super_block *sb)
|
|
}
|
|
|
|
/* parse on-disk compression configurations */
|
|
- if (erofs_sb_has_compr_cfgs(sbi))
|
|
- ret = erofs_load_compr_cfgs(sb, dsb);
|
|
- else
|
|
- ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
|
|
+ ret = z_erofs_parse_cfgs(sb, dsb);
|
|
if (ret < 0)
|
|
goto out;
|
|
|
|
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
|
|
index 0337b70b2dac4..abcded1acd194 100644
|
|
--- a/fs/erofs/zmap.c
|
|
+++ b/fs/erofs/zmap.c
|
|
@@ -610,7 +610,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
|
|
.map = map,
|
|
};
|
|
int err = 0;
|
|
- unsigned int lclusterbits, endoff;
|
|
+ unsigned int lclusterbits, endoff, afmt;
|
|
unsigned long initial_lcn;
|
|
unsigned long long ofs, end;
|
|
|
|
@@ -700,17 +700,20 @@ static int z_erofs_do_map_blocks(struct inode *inode,
|
|
err = -EFSCORRUPTED;
|
|
goto unmap_out;
|
|
}
|
|
- if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
|
|
- map->m_algorithmformat =
|
|
- Z_EROFS_COMPRESSION_INTERLACED;
|
|
- else
|
|
- map->m_algorithmformat =
|
|
- Z_EROFS_COMPRESSION_SHIFTED;
|
|
- } else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
|
|
- map->m_algorithmformat = vi->z_algorithmtype[1];
|
|
+ afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
|
|
+ Z_EROFS_COMPRESSION_INTERLACED :
|
|
+ Z_EROFS_COMPRESSION_SHIFTED;
|
|
} else {
|
|
- map->m_algorithmformat = vi->z_algorithmtype[0];
|
|
+ afmt = m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 ?
|
|
+ vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
|
|
+ if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
|
|
+ erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
|
|
+ afmt, vi->nid);
|
|
+ err = -EFSCORRUPTED;
|
|
+ goto unmap_out;
|
|
+ }
|
|
}
|
|
+ map->m_algorithmformat = afmt;
|
|
|
|
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
|
|
((flags & EROFS_GET_BLOCKS_READMORE) &&
|
|
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
|
|
index aa5aadd70bbc2..67af684e44e6e 100644
|
|
--- a/fs/ext4/extents.c
|
|
+++ b/fs/ext4/extents.c
|
|
@@ -2229,7 +2229,7 @@ static int ext4_fill_es_cache_info(struct inode *inode,
|
|
|
|
|
|
/*
|
|
- * ext4_ext_determine_hole - determine hole around given block
|
|
+ * ext4_ext_find_hole - find hole around given block according to the given path
|
|
* @inode: inode we lookup in
|
|
* @path: path in extent tree to @lblk
|
|
* @lblk: pointer to logical block around which we want to determine hole
|
|
@@ -2241,9 +2241,9 @@ static int ext4_fill_es_cache_info(struct inode *inode,
|
|
* The function returns the length of a hole starting at @lblk. We update @lblk
|
|
* to the beginning of the hole if we managed to find it.
|
|
*/
|
|
-static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
|
|
- struct ext4_ext_path *path,
|
|
- ext4_lblk_t *lblk)
|
|
+static ext4_lblk_t ext4_ext_find_hole(struct inode *inode,
|
|
+ struct ext4_ext_path *path,
|
|
+ ext4_lblk_t *lblk)
|
|
{
|
|
int depth = ext_depth(inode);
|
|
struct ext4_extent *ex;
|
|
@@ -2270,30 +2270,6 @@ static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
|
|
return len;
|
|
}
|
|
|
|
-/*
|
|
- * ext4_ext_put_gap_in_cache:
|
|
- * calculate boundaries of the gap that the requested block fits into
|
|
- * and cache this gap
|
|
- */
|
|
-static void
|
|
-ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
|
|
- ext4_lblk_t hole_len)
|
|
-{
|
|
- struct extent_status es;
|
|
-
|
|
- ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
|
|
- hole_start + hole_len - 1, &es);
|
|
- if (es.es_len) {
|
|
- /* There's delayed extent containing lblock? */
|
|
- if (es.es_lblk <= hole_start)
|
|
- return;
|
|
- hole_len = min(es.es_lblk - hole_start, hole_len);
|
|
- }
|
|
- ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
|
|
- ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
|
|
- EXTENT_STATUS_HOLE);
|
|
-}
|
|
-
|
|
/*
|
|
* ext4_ext_rm_idx:
|
|
* removes index from the index block.
|
|
@@ -4064,6 +4040,69 @@ static int get_implied_cluster_alloc(struct super_block *sb,
|
|
return 0;
|
|
}
|
|
|
|
+/*
|
|
+ * Determine hole length around the given logical block, first try to
|
|
+ * locate and expand the hole from the given @path, and then adjust it
|
|
+ * if it's partially or completely converted to delayed extents, insert
|
|
+ * it into the extent cache tree if it's indeed a hole, finally return
|
|
+ * the length of the determined extent.
|
|
+ */
|
|
+static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
|
|
+ struct ext4_ext_path *path,
|
|
+ ext4_lblk_t lblk)
|
|
+{
|
|
+ ext4_lblk_t hole_start, len;
|
|
+ struct extent_status es;
|
|
+
|
|
+ hole_start = lblk;
|
|
+ len = ext4_ext_find_hole(inode, path, &hole_start);
|
|
+again:
|
|
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
|
|
+ hole_start + len - 1, &es);
|
|
+ if (!es.es_len)
|
|
+ goto insert_hole;
|
|
+
|
|
+ /*
|
|
+ * There's a delalloc extent in the hole, handle it if the delalloc
|
|
+ * extent is in front of, behind and straddle the queried range.
|
|
+ */
|
|
+ if (lblk >= es.es_lblk + es.es_len) {
|
|
+ /*
|
|
+ * The delalloc extent is in front of the queried range,
|
|
+ * find again from the queried start block.
|
|
+ */
|
|
+ len -= lblk - hole_start;
|
|
+ hole_start = lblk;
|
|
+ goto again;
|
|
+ } else if (in_range(lblk, es.es_lblk, es.es_len)) {
|
|
+ /*
|
|
+ * The delalloc extent containing lblk, it must have been
|
|
+ * added after ext4_map_blocks() checked the extent status
|
|
+ * tree, adjust the length to the delalloc extent's after
|
|
+ * lblk.
|
|
+ */
|
|
+ len = es.es_lblk + es.es_len - lblk;
|
|
+ return len;
|
|
+ } else {
|
|
+ /*
|
|
+ * The delalloc extent is partially or completely behind
|
|
+ * the queried range, update hole length until the
|
|
+ * beginning of the delalloc extent.
|
|
+ */
|
|
+ len = min(es.es_lblk - hole_start, len);
|
|
+ }
|
|
+
|
|
+insert_hole:
|
|
+ /* Put just found gap into cache to speed up subsequent requests */
|
|
+ ext_debug(inode, " -> %u:%u\n", hole_start, len);
|
|
+ ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE);
|
|
+
|
|
+ /* Update hole_len to reflect hole size after lblk */
|
|
+ if (hole_start != lblk)
|
|
+ len -= lblk - hole_start;
|
|
+
|
|
+ return len;
|
|
+}
|
|
|
|
/*
|
|
* Block allocation/map/preallocation routine for extents based files
|
|
@@ -4181,22 +4220,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
* we couldn't try to create block if create flag is zero
|
|
*/
|
|
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
|
|
- ext4_lblk_t hole_start, hole_len;
|
|
+ ext4_lblk_t len;
|
|
|
|
- hole_start = map->m_lblk;
|
|
- hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
|
|
- /*
|
|
- * put just found gap into cache to speed up
|
|
- * subsequent requests
|
|
- */
|
|
- ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
|
|
+ len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
|
|
|
|
- /* Update hole_len to reflect hole size after map->m_lblk */
|
|
- if (hole_start != map->m_lblk)
|
|
- hole_len -= map->m_lblk - hole_start;
|
|
map->m_pblk = 0;
|
|
- map->m_len = min_t(unsigned int, map->m_len, hole_len);
|
|
-
|
|
+ map->m_len = min_t(unsigned int, map->m_len, len);
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
|
|
index 1a310ee7d9e55..6a3e27771df73 100644
|
|
--- a/fs/ext4/mballoc.c
|
|
+++ b/fs/ext4/mballoc.c
|
|
@@ -831,7 +831,7 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
int new_order;
|
|
|
|
- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
|
|
+ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
|
|
return;
|
|
|
|
new_order = mb_avg_fragment_size_order(sb,
|
|
@@ -2176,6 +2176,9 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
|
|
return err;
|
|
|
|
ext4_lock_group(ac->ac_sb, group);
|
|
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
|
|
+ goto out;
|
|
+
|
|
max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
|
|
|
|
if (max > 0) {
|
|
@@ -2183,6 +2186,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
|
|
ext4_mb_use_best_found(ac, e4b);
|
|
}
|
|
|
|
+out:
|
|
ext4_unlock_group(ac->ac_sb, group);
|
|
ext4_mb_unload_buddy(e4b);
|
|
|
|
@@ -2211,12 +2215,10 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
|
|
if (err)
|
|
return err;
|
|
|
|
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
|
|
- ext4_mb_unload_buddy(e4b);
|
|
- return 0;
|
|
- }
|
|
-
|
|
ext4_lock_group(ac->ac_sb, group);
|
|
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
|
|
+ goto out;
|
|
+
|
|
max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
|
|
ac->ac_g_ex.fe_len, &ex);
|
|
ex.fe_logical = 0xDEADFA11; /* debug value */
|
|
@@ -2249,6 +2251,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
|
|
ac->ac_b_ex = ex;
|
|
ext4_mb_use_best_found(ac, e4b);
|
|
}
|
|
+out:
|
|
ext4_unlock_group(ac->ac_sb, group);
|
|
ext4_mb_unload_buddy(e4b);
|
|
|
|
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
|
|
index 2215179c925b3..2618bf5a37892 100644
|
|
--- a/fs/ntfs3/attrib.c
|
|
+++ b/fs/ntfs3/attrib.c
|
|
@@ -1658,8 +1658,10 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
|
|
le_b = NULL;
|
|
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
|
|
0, NULL, &mi_b);
|
|
- if (!attr_b)
|
|
- return -ENOENT;
|
|
+ if (!attr_b) {
|
|
+ err = -ENOENT;
|
|
+ goto out;
|
|
+ }
|
|
|
|
attr = attr_b;
|
|
le = le_b;
|
|
@@ -1740,13 +1742,15 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
|
|
ok:
|
|
run_truncate_around(run, vcn);
|
|
out:
|
|
- if (new_valid > data_size)
|
|
- new_valid = data_size;
|
|
+ if (attr_b) {
|
|
+ if (new_valid > data_size)
|
|
+ new_valid = data_size;
|
|
|
|
- valid_size = le64_to_cpu(attr_b->nres.valid_size);
|
|
- if (new_valid != valid_size) {
|
|
- attr_b->nres.valid_size = cpu_to_le64(valid_size);
|
|
- mi_b->dirty = true;
|
|
+ valid_size = le64_to_cpu(attr_b->nres.valid_size);
|
|
+ if (new_valid != valid_size) {
|
|
+ attr_b->nres.valid_size = cpu_to_le64(valid_size);
|
|
+ mi_b->dirty = true;
|
|
+ }
|
|
}
|
|
|
|
return err;
|
|
diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
|
|
index 0c6a68e71e7d4..723e49ec83ce7 100644
|
|
--- a/fs/ntfs3/attrlist.c
|
|
+++ b/fs/ntfs3/attrlist.c
|
|
@@ -127,12 +127,13 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
|
|
{
|
|
size_t off;
|
|
u16 sz;
|
|
+ const unsigned le_min_size = le_size(0);
|
|
|
|
if (!le) {
|
|
le = ni->attr_list.le;
|
|
} else {
|
|
sz = le16_to_cpu(le->size);
|
|
- if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
|
|
+ if (sz < le_min_size) {
|
|
/* Impossible 'cause we should not return such le. */
|
|
return NULL;
|
|
}
|
|
@@ -141,7 +142,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
|
|
|
|
/* Check boundary. */
|
|
off = PtrOffset(ni->attr_list.le, le);
|
|
- if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
|
|
+ if (off + le_min_size > ni->attr_list.size) {
|
|
/* The regular end of list. */
|
|
return NULL;
|
|
}
|
|
@@ -149,8 +150,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
|
|
sz = le16_to_cpu(le->size);
|
|
|
|
/* Check le for errors. */
|
|
- if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
|
|
- off + sz > ni->attr_list.size ||
|
|
+ if (sz < le_min_size || off + sz > ni->attr_list.size ||
|
|
sz < le->name_off + le->name_len * sizeof(short)) {
|
|
return NULL;
|
|
}
|
|
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
|
|
index d4d9f4ffb6d9a..72cdfa8727d3c 100644
|
|
--- a/fs/ntfs3/dir.c
|
|
+++ b/fs/ntfs3/dir.c
|
|
@@ -309,11 +309,31 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
|
|
return 0;
|
|
}
|
|
|
|
- /* NTFS: symlinks are "dir + reparse" or "file + reparse" */
|
|
- if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
|
|
- dt_type = DT_LNK;
|
|
- else
|
|
- dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
|
|
+ /*
|
|
+ * NTFS: symlinks are "dir + reparse" or "file + reparse"
|
|
+ * Unfortunately reparse attribute is used for many purposes (several dozens).
|
|
+ * It is not possible here to know is this name symlink or not.
|
|
+ * To get exactly the type of name we should to open inode (read mft).
|
|
+ * getattr for opened file (fstat) correctly returns symlink.
|
|
+ */
|
|
+ dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
|
|
+
|
|
+ /*
|
|
+ * It is not reliable to detect the type of name using duplicated information
|
|
+ * stored in parent directory.
|
|
+ * The only correct way to get the type of name - read MFT record and find ATTR_STD.
|
|
+ * The code below is not good idea.
|
|
+ * It does additional locks/reads just to get the type of name.
|
|
+ * Should we use additional mount option to enable branch below?
|
|
+ */
|
|
+ if ((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) &&
|
|
+ ino != ni->mi.rno) {
|
|
+ struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL);
|
|
+ if (!IS_ERR_OR_NULL(inode)) {
|
|
+ dt_type = fs_umode_to_dtype(inode->i_mode);
|
|
+ iput(inode);
|
|
+ }
|
|
+ }
|
|
|
|
return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
|
|
}
|
|
@@ -495,11 +515,9 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
|
|
struct INDEX_HDR *hdr;
|
|
const struct ATTR_FILE_NAME *fname;
|
|
u32 e_size, off, end;
|
|
- u64 vbo = 0;
|
|
size_t drs = 0, fles = 0, bit = 0;
|
|
- loff_t i_size = ni->vfs_inode.i_size;
|
|
struct indx_node *node = NULL;
|
|
- u8 index_bits = ni->dir.index_bits;
|
|
+ size_t max_indx = ni->vfs_inode.i_size >> ni->dir.index_bits;
|
|
|
|
if (is_empty)
|
|
*is_empty = true;
|
|
@@ -543,7 +561,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
|
|
fles += 1;
|
|
}
|
|
|
|
- if (vbo >= i_size)
|
|
+ if (bit >= max_indx)
|
|
goto out;
|
|
|
|
err = indx_used_bit(&ni->dir, ni, &bit);
|
|
@@ -553,8 +571,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
|
|
if (bit == MINUS_ONE_T)
|
|
goto out;
|
|
|
|
- vbo = (u64)bit << index_bits;
|
|
- if (vbo >= i_size)
|
|
+ if (bit >= max_indx)
|
|
goto out;
|
|
|
|
err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
|
|
@@ -564,7 +581,6 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
|
|
|
|
hdr = &node->index->ihdr;
|
|
bit += 1;
|
|
- vbo = (u64)bit << ni->dir.idx2vbn_bits;
|
|
}
|
|
|
|
out:
|
|
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
|
|
index f31c0389a2e7d..14efe46df91ef 100644
|
|
--- a/fs/ntfs3/file.c
|
|
+++ b/fs/ntfs3/file.c
|
|
@@ -1110,6 +1110,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
|
|
iocb->ki_pos += written;
|
|
if (iocb->ki_pos > ni->i_valid)
|
|
ni->i_valid = iocb->ki_pos;
|
|
+ if (iocb->ki_pos > i_size)
|
|
+ i_size_write(inode, iocb->ki_pos);
|
|
|
|
return written;
|
|
}
|
|
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
|
|
index 710cb5aa5a65b..d53ef128fa733 100644
|
|
--- a/fs/ntfs3/fslog.c
|
|
+++ b/fs/ntfs3/fslog.c
|
|
@@ -465,7 +465,7 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
|
|
{
|
|
const struct RESTART_AREA *ra;
|
|
u16 cl, fl, ul;
|
|
- u32 off, l_size, file_dat_bits, file_size_round;
|
|
+ u32 off, l_size, seq_bits;
|
|
u16 ro = le16_to_cpu(rhdr->ra_off);
|
|
u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
|
|
|
|
@@ -511,13 +511,15 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
|
|
/* Make sure the sequence number bits match the log file size. */
|
|
l_size = le64_to_cpu(ra->l_size);
|
|
|
|
- file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
|
|
- file_size_round = 1u << (file_dat_bits + 3);
|
|
- if (file_size_round != l_size &&
|
|
- (file_size_round < l_size || (file_size_round / 2) > l_size)) {
|
|
- return false;
|
|
+ seq_bits = sizeof(u64) * 8 + 3;
|
|
+ while (l_size) {
|
|
+ l_size >>= 1;
|
|
+ seq_bits -= 1;
|
|
}
|
|
|
|
+ if (seq_bits != ra->seq_num_bits)
|
|
+ return false;
|
|
+
|
|
/* The log page data offset and record header length must be quad-aligned. */
|
|
if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
|
|
!IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
|
|
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
|
|
index 4b72bc7f12ca3..1eac80d55b554 100644
|
|
--- a/fs/ntfs3/fsntfs.c
|
|
+++ b/fs/ntfs3/fsntfs.c
|
|
@@ -976,6 +976,30 @@ static inline __le32 security_hash(const void *sd, size_t bytes)
|
|
return cpu_to_le32(hash);
|
|
}
|
|
|
|
+/*
|
|
+ * simple wrapper for sb_bread_unmovable.
|
|
+ */
|
|
+struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
|
|
+{
|
|
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
|
|
+ struct buffer_head *bh;
|
|
+
|
|
+ if (unlikely(block >= sbi->volume.blocks)) {
|
|
+ /* prevent generic message "attempt to access beyond end of device" */
|
|
+ ntfs_err(sb, "try to read out of volume at offset 0x%llx",
|
|
+ (u64)block << sb->s_blocksize_bits);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ bh = sb_bread_unmovable(sb, block);
|
|
+ if (bh)
|
|
+ return bh;
|
|
+
|
|
+ ntfs_err(sb, "failed to read volume at offset 0x%llx",
|
|
+ (u64)block << sb->s_blocksize_bits);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
|
|
{
|
|
struct block_device *bdev = sb->s_bdev;
|
|
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
|
|
index dc937089a464a..42dd9fdaf4151 100644
|
|
--- a/fs/ntfs3/inode.c
|
|
+++ b/fs/ntfs3/inode.c
|
|
@@ -402,7 +402,6 @@ static struct inode *ntfs_read_mft(struct inode *inode,
|
|
goto out;
|
|
|
|
if (!is_match && name) {
|
|
- /* Reuse rec as buffer for ascii name. */
|
|
err = -ENOENT;
|
|
goto out;
|
|
}
|
|
@@ -417,6 +416,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
|
|
|
|
if (names != le16_to_cpu(rec->hard_links)) {
|
|
/* Correct minor error on the fly. Do not mark inode as dirty. */
|
|
+ ntfs_inode_warn(inode, "Correct links count -> %u.", names);
|
|
rec->hard_links = cpu_to_le16(names);
|
|
ni->mi.dirty = true;
|
|
}
|
|
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
|
|
index 0f38d558169a1..ba26a465b3091 100644
|
|
--- a/fs/ntfs3/ntfs.h
|
|
+++ b/fs/ntfs3/ntfs.h
|
|
@@ -517,12 +517,10 @@ struct ATTR_LIST_ENTRY {
|
|
__le64 vcn; // 0x08: Starting VCN of this attribute.
|
|
struct MFT_REF ref; // 0x10: MFT record number with attribute.
|
|
__le16 id; // 0x18: struct ATTRIB ID.
|
|
- __le16 name[3]; // 0x1A: Just to align. To get real name can use bNameOffset.
|
|
+ __le16 name[]; // 0x1A: Just to align. To get real name can use name_off.
|
|
|
|
}; // sizeof(0x20)
|
|
|
|
-static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20);
|
|
-
|
|
static inline u32 le_size(u8 name_len)
|
|
{
|
|
return ALIGN(offsetof(struct ATTR_LIST_ENTRY, name) +
|
|
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
|
|
index 74482ef569ab7..0f9bec29f2b70 100644
|
|
--- a/fs/ntfs3/ntfs_fs.h
|
|
+++ b/fs/ntfs3/ntfs_fs.h
|
|
@@ -580,6 +580,7 @@ bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes);
|
|
int log_replay(struct ntfs_inode *ni, bool *initialized);
|
|
|
|
/* Globals from fsntfs.c */
|
|
+struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block);
|
|
bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
|
|
int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
|
|
bool simple);
|
|
@@ -1012,19 +1013,6 @@ static inline u64 bytes_to_block(const struct super_block *sb, u64 size)
|
|
return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
|
|
}
|
|
|
|
-static inline struct buffer_head *ntfs_bread(struct super_block *sb,
|
|
- sector_t block)
|
|
-{
|
|
- struct buffer_head *bh = sb_bread(sb, block);
|
|
-
|
|
- if (bh)
|
|
- return bh;
|
|
-
|
|
- ntfs_err(sb, "failed to read volume at offset 0x%llx",
|
|
- (u64)block << sb->s_blocksize_bits);
|
|
- return NULL;
|
|
-}
|
|
-
|
|
static inline struct ntfs_inode *ntfs_i(struct inode *inode)
|
|
{
|
|
return container_of(inode, struct ntfs_inode, vfs_inode);
|
|
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
|
|
index ba336c7280b85..a8d4ed7bca025 100644
|
|
--- a/fs/ntfs3/record.c
|
|
+++ b/fs/ntfs3/record.c
|
|
@@ -226,11 +226,6 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
|
|
return NULL;
|
|
}
|
|
|
|
- if (off + asize < off) {
|
|
- /* overflow check */
|
|
- return NULL;
|
|
- }
|
|
-
|
|
attr = Add2Ptr(attr, asize);
|
|
off += asize;
|
|
}
|
|
@@ -253,8 +248,8 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
|
|
if ((t32 & 0xf) || (t32 > 0x100))
|
|
return NULL;
|
|
|
|
- /* Check boundary. */
|
|
- if (off + asize > used)
|
|
+ /* Check overflow and boundary. */
|
|
+ if (off + asize < off || off + asize > used)
|
|
return NULL;
|
|
|
|
/* Check size of attribute. */
|
|
@@ -491,8 +486,20 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
|
|
return false;
|
|
|
|
if (ni && is_attr_indexed(attr)) {
|
|
- le16_add_cpu(&ni->mi.mrec->hard_links, -1);
|
|
- ni->mi.dirty = true;
|
|
+ u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
|
|
+ struct ATTR_FILE_NAME *fname =
|
|
+ attr->type != ATTR_NAME ?
|
|
+ NULL :
|
|
+ resident_data_ex(attr,
|
|
+ SIZEOF_ATTRIBUTE_FILENAME);
|
|
+ if (fname && fname->type == FILE_NAME_DOS) {
|
|
+ /* Do not decrease links count deleting DOS name. */
|
|
+ } else if (!links) {
|
|
+ /* minor error. Not critical. */
|
|
+ } else {
|
|
+ ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
|
|
+ ni->mi.dirty = true;
|
|
+ }
|
|
}
|
|
|
|
used -= asize;
|
|
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
|
|
index df15e00c2a3a0..d98cf7b382bcc 100644
|
|
--- a/fs/ntfs3/xattr.c
|
|
+++ b/fs/ntfs3/xattr.c
|
|
@@ -217,6 +217,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
|
|
if (!ea->name_len)
|
|
break;
|
|
|
|
+ if (ea->name_len > ea_size)
|
|
+ break;
|
|
+
|
|
if (buffer) {
|
|
/* Check if we can use field ea->name */
|
|
if (off + ea_size > size)
|
|
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
|
|
index 6f4d7aa70e5a2..fd082151c5f9b 100644
|
|
--- a/fs/smb/client/cached_dir.c
|
|
+++ b/fs/smb/client/cached_dir.c
|
|
@@ -149,7 +149,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
|
|
return -EOPNOTSUPP;
|
|
|
|
ses = tcon->ses;
|
|
- server = ses->server;
|
|
+ server = cifs_pick_channel(ses);
|
|
cfids = tcon->cfids;
|
|
|
|
if (!server->ops->new_lease_key)
|
|
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
|
|
index d0ac2648c0d61..d3d4cf6321fd5 100644
|
|
--- a/fs/smb/client/cifsencrypt.c
|
|
+++ b/fs/smb/client/cifsencrypt.c
|
|
@@ -444,7 +444,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
|
|
len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
|
|
UniStrupr(user);
|
|
} else {
|
|
- memset(user, '\0', 2);
|
|
+ *(u16 *)user = 0;
|
|
}
|
|
|
|
rc = crypto_shash_update(ses->server->secmech.hmacmd5,
|
|
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
|
|
index 7f1aea4c11b9c..58bb54994e22a 100644
|
|
--- a/fs/smb/client/cifsglob.h
|
|
+++ b/fs/smb/client/cifsglob.h
|
|
@@ -86,7 +86,7 @@
|
|
#define SMB_INTERFACE_POLL_INTERVAL 600
|
|
|
|
/* maximum number of PDUs in one compound */
|
|
-#define MAX_COMPOUND 5
|
|
+#define MAX_COMPOUND 7
|
|
|
|
/*
|
|
* Default number of credits to keep available for SMB3.
|
|
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
|
|
index f4818599c00a2..4d5302b58b534 100644
|
|
--- a/fs/smb/client/fs_context.c
|
|
+++ b/fs/smb/client/fs_context.c
|
|
@@ -209,7 +209,7 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c
|
|
|
|
switch (match_token(value, cifs_secflavor_tokens, args)) {
|
|
case Opt_sec_krb5p:
|
|
- cifs_errorf(fc, "sec=krb5p is not supported!\n");
|
|
+ cifs_errorf(fc, "sec=krb5p is not supported. Use sec=krb5,seal instead\n");
|
|
return 1;
|
|
case Opt_sec_krb5i:
|
|
ctx->sign = true;
|
|
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
|
|
index 2d75ba5aaa8ad..5990bdbae598f 100644
|
|
--- a/fs/smb/client/readdir.c
|
|
+++ b/fs/smb/client/readdir.c
|
|
@@ -304,14 +304,16 @@ cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
|
|
}
|
|
|
|
static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
|
|
- SEARCH_ID_FULL_DIR_INFO *info,
|
|
+ const void *info,
|
|
struct cifs_sb_info *cifs_sb)
|
|
{
|
|
+ const FILE_FULL_DIRECTORY_INFO *di = info;
|
|
+
|
|
__dir_info_to_fattr(fattr, info);
|
|
|
|
- /* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */
|
|
+ /* See MS-FSCC 2.4.14, 2.4.19 */
|
|
if (fattr->cf_cifsattrs & ATTR_REPARSE)
|
|
- fattr->cf_cifstag = le32_to_cpu(info->EaSize);
|
|
+ fattr->cf_cifstag = le32_to_cpu(di->EaSize);
|
|
cifs_fill_common_info(fattr, cifs_sb);
|
|
}
|
|
|
|
@@ -425,7 +427,7 @@ _initiate_cifs_search(const unsigned int xid, struct file *file,
|
|
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
|
|
cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
|
|
} else /* not srvinos - BB fixme add check for backlevel? */ {
|
|
- cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
|
|
+ cifsFile->srch_inf.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO;
|
|
}
|
|
|
|
search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
|
|
@@ -1019,10 +1021,9 @@ static int cifs_filldir(char *find_entry, struct file *file,
|
|
(FIND_FILE_STANDARD_INFO *)find_entry,
|
|
cifs_sb);
|
|
break;
|
|
+ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
|
|
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
|
|
- cifs_fulldir_info_to_fattr(&fattr,
|
|
- (SEARCH_ID_FULL_DIR_INFO *)find_entry,
|
|
- cifs_sb);
|
|
+ cifs_fulldir_info_to_fattr(&fattr, find_entry, cifs_sb);
|
|
break;
|
|
default:
|
|
cifs_dir_info_to_fattr(&fattr,
|
|
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
|
|
index c1fc1651d8b69..4c1231496a725 100644
|
|
--- a/fs/smb/client/smb2pdu.c
|
|
+++ b/fs/smb/client/smb2pdu.c
|
|
@@ -5010,6 +5010,9 @@ int SMB2_query_directory_init(const unsigned int xid,
|
|
case SMB_FIND_FILE_POSIX_INFO:
|
|
req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
|
|
break;
|
|
+ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
|
|
+ req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION;
|
|
+ break;
|
|
default:
|
|
cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
|
|
info_level);
|
|
@@ -5079,6 +5082,9 @@ smb2_parse_query_directory(struct cifs_tcon *tcon,
|
|
/* note that posix payload are variable size */
|
|
info_buf_size = sizeof(struct smb2_posix_info);
|
|
break;
|
|
+ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
|
|
+ info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO);
|
|
+ break;
|
|
default:
|
|
cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
|
|
srch_inf->info_level);
|
|
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
|
|
index 8a1dd8407a3a7..df44acaec9ae9 100644
|
|
--- a/fs/smb/client/transport.c
|
|
+++ b/fs/smb/client/transport.c
|
|
@@ -427,10 +427,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
|
|
server->conn_id, server->hostname);
|
|
}
|
|
smbd_done:
|
|
- if (rc < 0 && rc != -EINTR)
|
|
+ /*
|
|
+ * there's hardly any use for the layers above to know the
|
|
+ * actual error code here. All they should do at this point is
|
|
+ * to retry the connection and hope it goes away.
|
|
+ */
|
|
+ if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
|
|
cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
|
|
rc);
|
|
- else if (rc > 0)
|
|
+ rc = -ECONNABORTED;
|
|
+ cifs_signal_cifsd_for_reconnect(server, false);
|
|
+ } else if (rc > 0)
|
|
rc = 0;
|
|
out:
|
|
cifs_in_send_dec(server);
|
|
@@ -449,8 +456,8 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
|
|
if (!(flags & CIFS_TRANSFORM_REQ))
|
|
return __smb_send_rqst(server, num_rqst, rqst);
|
|
|
|
- if (num_rqst > MAX_COMPOUND - 1)
|
|
- return -ENOMEM;
|
|
+ if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
|
|
+ return -EIO;
|
|
|
|
if (!server->ops->init_transform_rq) {
|
|
cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
|
|
diff --git a/include/linux/fs.h b/include/linux/fs.h
|
|
index 4a1911dcf834b..67313881f8ac1 100644
|
|
--- a/include/linux/fs.h
|
|
+++ b/include/linux/fs.h
|
|
@@ -337,6 +337,8 @@ enum rw_hint {
|
|
#define IOCB_NOIO (1 << 20)
|
|
/* can use bio alloc cache */
|
|
#define IOCB_ALLOC_CACHE (1 << 21)
|
|
+/* kiocb is a read or write operation submitted by fs/aio.c. */
|
|
+#define IOCB_AIO_RW (1 << 23)
|
|
|
|
struct kiocb {
|
|
struct file *ki_filp;
|
|
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
|
|
index 50ad19662a322..6790f08066b72 100644
|
|
--- a/include/linux/memblock.h
|
|
+++ b/include/linux/memblock.h
|
|
@@ -118,6 +118,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
|
|
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
|
|
#endif
|
|
void memblock_trim_memory(phys_addr_t align);
|
|
+unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
|
|
+ phys_addr_t base2, phys_addr_t size2);
|
|
bool memblock_overlaps_region(struct memblock_type *type,
|
|
phys_addr_t base, phys_addr_t size);
|
|
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
|
|
diff --git a/include/linux/socket.h b/include/linux/socket.h
|
|
index b3c58042bd254..d79efd0268809 100644
|
|
--- a/include/linux/socket.h
|
|
+++ b/include/linux/socket.h
|
|
@@ -33,7 +33,10 @@ typedef __kernel_sa_family_t sa_family_t;
|
|
|
|
struct sockaddr {
|
|
sa_family_t sa_family; /* address family, AF_xxx */
|
|
- char sa_data[14]; /* 14 bytes of protocol address */
|
|
+ union {
|
|
+ char sa_data_min[14]; /* Minimum 14 bytes of protocol address */
|
|
+ DECLARE_FLEX_ARRAY(char, sa_data);
|
|
+ };
|
|
};
|
|
|
|
struct linger {
|
|
diff --git a/include/linux/swap.h b/include/linux/swap.h
|
|
index a18cf4b7c724c..add47f43e568e 100644
|
|
--- a/include/linux/swap.h
|
|
+++ b/include/linux/swap.h
|
|
@@ -571,6 +571,11 @@ static inline int swap_duplicate(swp_entry_t swp)
|
|
return 0;
|
|
}
|
|
|
|
+static inline int swapcache_prepare(swp_entry_t swp)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static inline void swap_free(swp_entry_t swp)
|
|
{
|
|
}
|
|
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
|
|
index dde4dd9c4012c..4a767b3d20b9d 100644
|
|
--- a/include/net/netfilter/nf_flow_table.h
|
|
+++ b/include/net/netfilter/nf_flow_table.h
|
|
@@ -274,8 +274,8 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
|
|
flow_table->type->put(flow_table);
|
|
}
|
|
|
|
-int flow_offload_route_init(struct flow_offload *flow,
|
|
- const struct nf_flow_route *route);
|
|
+void flow_offload_route_init(struct flow_offload *flow,
|
|
+ struct nf_flow_route *route);
|
|
|
|
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
|
|
void flow_offload_refresh(struct nf_flowtable *flow_table,
|
|
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
|
|
index 7dcdc97c0bc33..a3d8f013adcd5 100644
|
|
--- a/include/net/switchdev.h
|
|
+++ b/include/net/switchdev.h
|
|
@@ -303,6 +303,9 @@ void switchdev_deferred_process(void);
|
|
int switchdev_port_attr_set(struct net_device *dev,
|
|
const struct switchdev_attr *attr,
|
|
struct netlink_ext_ack *extack);
|
|
+bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
|
|
+ enum switchdev_notifier_type nt,
|
|
+ const struct switchdev_obj *obj);
|
|
int switchdev_port_obj_add(struct net_device *dev,
|
|
const struct switchdev_obj *obj,
|
|
struct netlink_ext_ack *extack);
|
|
diff --git a/include/net/tcp.h b/include/net/tcp.h
|
|
index 4c838f7290dd9..8ea1fba84eff9 100644
|
|
--- a/include/net/tcp.h
|
|
+++ b/include/net/tcp.h
|
|
@@ -2290,7 +2290,7 @@ struct tcp_ulp_ops {
|
|
/* cleanup ulp */
|
|
void (*release)(struct sock *sk);
|
|
/* diagnostic */
|
|
- int (*get_info)(const struct sock *sk, struct sk_buff *skb);
|
|
+ int (*get_info)(struct sock *sk, struct sk_buff *skb);
|
|
size_t (*get_info_size)(const struct sock *sk);
|
|
/* clone ulp */
|
|
void (*clone)(const struct request_sock *req, struct sock *newsk,
|
|
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
|
|
index fdc31fdb612da..d2751ed536df2 100644
|
|
--- a/include/scsi/scsi_device.h
|
|
+++ b/include/scsi/scsi_device.h
|
|
@@ -100,10 +100,6 @@ struct scsi_vpd {
|
|
unsigned char data[];
|
|
};
|
|
|
|
-enum scsi_vpd_parameters {
|
|
- SCSI_VPD_HEADER_SIZE = 4,
|
|
-};
|
|
-
|
|
struct scsi_device {
|
|
struct Scsi_Host *host;
|
|
struct request_queue *request_queue;
|
|
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
|
|
index 6a61a98d602cd..83f8f67e933df 100644
|
|
--- a/kernel/bpf/helpers.c
|
|
+++ b/kernel/bpf/helpers.c
|
|
@@ -1091,6 +1091,7 @@ struct bpf_hrtimer {
|
|
struct bpf_prog *prog;
|
|
void __rcu *callback_fn;
|
|
void *value;
|
|
+ struct rcu_head rcu;
|
|
};
|
|
|
|
/* the actual struct hidden inside uapi struct bpf_timer */
|
|
@@ -1312,6 +1313,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
|
|
|
|
if (in_nmi())
|
|
return -EOPNOTSUPP;
|
|
+ rcu_read_lock();
|
|
__bpf_spin_lock_irqsave(&timer->lock);
|
|
t = timer->timer;
|
|
if (!t) {
|
|
@@ -1333,6 +1335,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
|
|
* if it was running.
|
|
*/
|
|
ret = ret ?: hrtimer_cancel(&t->timer);
|
|
+ rcu_read_unlock();
|
|
return ret;
|
|
}
|
|
|
|
@@ -1387,7 +1390,7 @@ void bpf_timer_cancel_and_free(void *val)
|
|
*/
|
|
if (this_cpu_read(hrtimer_running) != t)
|
|
hrtimer_cancel(&t->timer);
|
|
- kfree(t);
|
|
+ kfree_rcu(t, rcu);
|
|
}
|
|
|
|
BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
|
|
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
|
index 76bafa8d331a7..3a2335bc1d58b 100644
|
|
--- a/kernel/sched/rt.c
|
|
+++ b/kernel/sched/rt.c
|
|
@@ -37,6 +37,8 @@ static struct ctl_table sched_rt_sysctls[] = {
|
|
.maxlen = sizeof(unsigned int),
|
|
.mode = 0644,
|
|
.proc_handler = sched_rt_handler,
|
|
+ .extra1 = SYSCTL_ONE,
|
|
+ .extra2 = SYSCTL_INT_MAX,
|
|
},
|
|
{
|
|
.procname = "sched_rt_runtime_us",
|
|
@@ -44,6 +46,8 @@ static struct ctl_table sched_rt_sysctls[] = {
|
|
.maxlen = sizeof(int),
|
|
.mode = 0644,
|
|
.proc_handler = sched_rt_handler,
|
|
+ .extra1 = SYSCTL_NEG_ONE,
|
|
+ .extra2 = SYSCTL_INT_MAX,
|
|
},
|
|
{
|
|
.procname = "sched_rr_timeslice_ms",
|
|
@@ -2970,9 +2974,6 @@ static int sched_rt_global_constraints(void)
|
|
#ifdef CONFIG_SYSCTL
|
|
static int sched_rt_global_validate(void)
|
|
{
|
|
- if (sysctl_sched_rt_period <= 0)
|
|
- return -EINVAL;
|
|
-
|
|
if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
|
|
((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
|
|
((u64)sysctl_sched_rt_runtime *
|
|
@@ -3003,7 +3004,7 @@ static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
|
|
old_period = sysctl_sched_rt_period;
|
|
old_runtime = sysctl_sched_rt_runtime;
|
|
|
|
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
|
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
|
|
|
if (!ret && write) {
|
|
ret = sched_rt_global_validate();
|
|
@@ -3047,6 +3048,9 @@ static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
|
|
sched_rr_timeslice =
|
|
sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
|
|
msecs_to_jiffies(sysctl_sched_rr_timeslice);
|
|
+
|
|
+ if (sysctl_sched_rr_timeslice <= 0)
|
|
+ sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
|
|
}
|
|
mutex_unlock(&mutex);
|
|
|
|
diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
|
|
index 63bdad20dbaf8..98a678129b067 100644
|
|
--- a/mm/damon/lru_sort.c
|
|
+++ b/mm/damon/lru_sort.c
|
|
@@ -185,9 +185,21 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
|
|
return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
|
|
}
|
|
|
|
+static void damon_lru_sort_copy_quota_status(struct damos_quota *dst,
|
|
+ struct damos_quota *src)
|
|
+{
|
|
+ dst->total_charged_sz = src->total_charged_sz;
|
|
+ dst->total_charged_ns = src->total_charged_ns;
|
|
+ dst->charged_sz = src->charged_sz;
|
|
+ dst->charged_from = src->charged_from;
|
|
+ dst->charge_target_from = src->charge_target_from;
|
|
+ dst->charge_addr_from = src->charge_addr_from;
|
|
+}
|
|
+
|
|
static int damon_lru_sort_apply_parameters(void)
|
|
{
|
|
- struct damos *scheme;
|
|
+ struct damos *scheme, *hot_scheme, *cold_scheme;
|
|
+ struct damos *old_hot_scheme = NULL, *old_cold_scheme = NULL;
|
|
unsigned int hot_thres, cold_thres;
|
|
int err = 0;
|
|
|
|
@@ -195,18 +207,35 @@ static int damon_lru_sort_apply_parameters(void)
|
|
if (err)
|
|
return err;
|
|
|
|
+ damon_for_each_scheme(scheme, ctx) {
|
|
+ if (!old_hot_scheme) {
|
|
+ old_hot_scheme = scheme;
|
|
+ continue;
|
|
+ }
|
|
+ old_cold_scheme = scheme;
|
|
+ }
|
|
+
|
|
hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
|
|
hot_thres_access_freq / 1000;
|
|
- scheme = damon_lru_sort_new_hot_scheme(hot_thres);
|
|
- if (!scheme)
|
|
+ hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres);
|
|
+ if (!hot_scheme)
|
|
return -ENOMEM;
|
|
- damon_set_schemes(ctx, &scheme, 1);
|
|
+ if (old_hot_scheme)
|
|
+ damon_lru_sort_copy_quota_status(&hot_scheme->quota,
|
|
+ &old_hot_scheme->quota);
|
|
|
|
cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
|
|
- scheme = damon_lru_sort_new_cold_scheme(cold_thres);
|
|
- if (!scheme)
|
|
+ cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres);
|
|
+ if (!cold_scheme) {
|
|
+ damon_destroy_scheme(hot_scheme);
|
|
return -ENOMEM;
|
|
- damon_add_scheme(ctx, scheme);
|
|
+ }
|
|
+ if (old_cold_scheme)
|
|
+ damon_lru_sort_copy_quota_status(&cold_scheme->quota,
|
|
+ &old_cold_scheme->quota);
|
|
+
|
|
+ damon_set_schemes(ctx, &hot_scheme, 1);
|
|
+ damon_add_scheme(ctx, cold_scheme);
|
|
|
|
return damon_set_region_biggest_system_ram_default(target,
|
|
&monitor_region_start,
|
|
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
|
|
index 162c9b1ca00fd..cc337e94acfda 100644
|
|
--- a/mm/damon/reclaim.c
|
|
+++ b/mm/damon/reclaim.c
|
|
@@ -141,9 +141,20 @@ static struct damos *damon_reclaim_new_scheme(void)
|
|
&damon_reclaim_wmarks);
|
|
}
|
|
|
|
+static void damon_reclaim_copy_quota_status(struct damos_quota *dst,
|
|
+ struct damos_quota *src)
|
|
+{
|
|
+ dst->total_charged_sz = src->total_charged_sz;
|
|
+ dst->total_charged_ns = src->total_charged_ns;
|
|
+ dst->charged_sz = src->charged_sz;
|
|
+ dst->charged_from = src->charged_from;
|
|
+ dst->charge_target_from = src->charge_target_from;
|
|
+ dst->charge_addr_from = src->charge_addr_from;
|
|
+}
|
|
+
|
|
static int damon_reclaim_apply_parameters(void)
|
|
{
|
|
- struct damos *scheme;
|
|
+ struct damos *scheme, *old_scheme;
|
|
int err = 0;
|
|
|
|
err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
|
|
@@ -154,6 +165,11 @@ static int damon_reclaim_apply_parameters(void)
|
|
scheme = damon_reclaim_new_scheme();
|
|
if (!scheme)
|
|
return -ENOMEM;
|
|
+ if (!list_empty(&ctx->schemes)) {
|
|
+ damon_for_each_scheme(old_scheme, ctx)
|
|
+ damon_reclaim_copy_quota_status(&scheme->quota,
|
|
+ &old_scheme->quota);
|
|
+ }
|
|
damon_set_schemes(ctx, &scheme, 1);
|
|
|
|
return damon_set_region_biggest_system_ram_default(target,
|
|
diff --git a/mm/memblock.c b/mm/memblock.c
|
|
index 511d4783dcf1d..516efec80851a 100644
|
|
--- a/mm/memblock.c
|
|
+++ b/mm/memblock.c
|
|
@@ -175,8 +175,9 @@ static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
|
|
/*
|
|
* Address comparison utilities
|
|
*/
|
|
-static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
|
|
- phys_addr_t base2, phys_addr_t size2)
|
|
+unsigned long __init_memblock
|
|
+memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2,
|
|
+ phys_addr_t size2)
|
|
{
|
|
return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
|
|
}
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
|
index 9da98e3e71cfe..4570d3e315cf1 100644
|
|
--- a/mm/memcontrol.c
|
|
+++ b/mm/memcontrol.c
|
|
@@ -7517,9 +7517,13 @@ bool mem_cgroup_swap_full(struct folio *folio)
|
|
|
|
static int __init setup_swap_account(char *s)
|
|
{
|
|
- pr_warn_once("The swapaccount= commandline option is deprecated. "
|
|
- "Please report your usecase to linux-mm@kvack.org if you "
|
|
- "depend on this functionality.\n");
|
|
+ bool res;
|
|
+
|
|
+ if (!kstrtobool(s, &res) && !res)
|
|
+ pr_warn_once("The swapaccount=0 commandline option is deprecated "
|
|
+ "in favor of configuring swap control via cgroupfs. "
|
|
+ "Please report your usecase to linux-mm@kvack.org if you "
|
|
+ "depend on this functionality.\n");
|
|
return 1;
|
|
}
|
|
__setup("swapaccount=", setup_swap_account);
|
|
diff --git a/mm/memory.c b/mm/memory.c
|
|
index fc8b264ec0cac..fb83cf56377ab 100644
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -3761,6 +3761,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|
struct page *page;
|
|
struct swap_info_struct *si = NULL;
|
|
rmap_t rmap_flags = RMAP_NONE;
|
|
+ bool need_clear_cache = false;
|
|
bool exclusive = false;
|
|
swp_entry_t entry;
|
|
pte_t pte;
|
|
@@ -3822,6 +3823,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|
if (!folio) {
|
|
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
|
|
__swap_count(entry) == 1) {
|
|
+ /*
|
|
+ * Prevent parallel swapin from proceeding with
|
|
+ * the cache flag. Otherwise, another thread may
|
|
+ * finish swapin first, free the entry, and swapout
|
|
+ * reusing the same entry. It's undetectable as
|
|
+ * pte_same() returns true due to entry reuse.
|
|
+ */
|
|
+ if (swapcache_prepare(entry)) {
|
|
+ /* Relax a bit to prevent rapid repeated page faults */
|
|
+ schedule_timeout_uninterruptible(1);
|
|
+ goto out;
|
|
+ }
|
|
+ need_clear_cache = true;
|
|
+
|
|
/* skip swapcache */
|
|
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
|
|
vma, vmf->address, false);
|
|
@@ -4073,6 +4088,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|
unlock:
|
|
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
|
out:
|
|
+ /* Clear the swap cache pin for direct swapin after PTL unlock */
|
|
+ if (need_clear_cache)
|
|
+ swapcache_clear(si, entry);
|
|
if (si)
|
|
put_swap_device(si);
|
|
return ret;
|
|
@@ -4086,6 +4104,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|
folio_unlock(swapcache);
|
|
folio_put(swapcache);
|
|
}
|
|
+ if (need_clear_cache)
|
|
+ swapcache_clear(si, entry);
|
|
if (si)
|
|
put_swap_device(si);
|
|
return ret;
|
|
diff --git a/mm/swap.h b/mm/swap.h
|
|
index cc08c459c6190..5eff40ef76934 100644
|
|
--- a/mm/swap.h
|
|
+++ b/mm/swap.h
|
|
@@ -39,6 +39,7 @@ void __delete_from_swap_cache(struct folio *folio,
|
|
void delete_from_swap_cache(struct folio *folio);
|
|
void clear_shadow_from_swap_cache(int type, unsigned long begin,
|
|
unsigned long end);
|
|
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
|
|
struct folio *swap_cache_get_folio(swp_entry_t entry,
|
|
struct vm_area_struct *vma, unsigned long addr);
|
|
struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
|
|
@@ -98,6 +99,10 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
|
|
return 0;
|
|
}
|
|
|
|
+static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
|
|
+{
|
|
+}
|
|
+
|
|
static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
|
|
struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
diff --git a/mm/swapfile.c b/mm/swapfile.c
|
|
index 71db6d8a1ea30..cca9fda9d036f 100644
|
|
--- a/mm/swapfile.c
|
|
+++ b/mm/swapfile.c
|
|
@@ -3373,6 +3373,19 @@ int swapcache_prepare(swp_entry_t entry)
|
|
return __swap_duplicate(entry, SWAP_HAS_CACHE);
|
|
}
|
|
|
|
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
|
|
+{
|
|
+ struct swap_cluster_info *ci;
|
|
+ unsigned long offset = swp_offset(entry);
|
|
+ unsigned char usage;
|
|
+
|
|
+ ci = lock_cluster_or_swap_info(si, offset);
|
|
+ usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
|
|
+ unlock_cluster_or_swap_info(si, ci);
|
|
+ if (!usage)
|
|
+ free_swap_slot(entry);
|
|
+}
|
|
+
|
|
struct swap_info_struct *swp_swap_info(swp_entry_t entry)
|
|
{
|
|
return swap_type_to_swap_info(swp_type(entry));
|
|
diff --git a/mm/zswap.c b/mm/zswap.c
|
|
index b3829ada4a413..b7cb126797f9e 100644
|
|
--- a/mm/zswap.c
|
|
+++ b/mm/zswap.c
|
|
@@ -1013,6 +1013,8 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
|
|
if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) {
|
|
spin_unlock(&tree->lock);
|
|
delete_from_swap_cache(page_folio(page));
|
|
+ unlock_page(page);
|
|
+ put_page(page);
|
|
ret = -ENOMEM;
|
|
goto fail;
|
|
}
|
|
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
|
|
index 4b3982c368b35..b61ef2dff7a4b 100644
|
|
--- a/net/bridge/br_switchdev.c
|
|
+++ b/net/bridge/br_switchdev.c
|
|
@@ -593,21 +593,40 @@ br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
|
|
}
|
|
|
|
static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
|
|
+ struct net_device *dev,
|
|
+ unsigned long action,
|
|
enum switchdev_obj_id id,
|
|
const struct net_bridge_mdb_entry *mp,
|
|
struct net_device *orig_dev)
|
|
{
|
|
- struct switchdev_obj_port_mdb *mdb;
|
|
+ struct switchdev_obj_port_mdb mdb = {
|
|
+ .obj = {
|
|
+ .id = id,
|
|
+ .orig_dev = orig_dev,
|
|
+ },
|
|
+ };
|
|
+ struct switchdev_obj_port_mdb *pmdb;
|
|
|
|
- mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
|
|
- if (!mdb)
|
|
- return -ENOMEM;
|
|
+ br_switchdev_mdb_populate(&mdb, mp);
|
|
+
|
|
+ if (action == SWITCHDEV_PORT_OBJ_ADD &&
|
|
+ switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) {
|
|
+ /* This event is already in the deferred queue of
|
|
+ * events, so this replay must be elided, lest the
|
|
+ * driver receives duplicate events for it. This can
|
|
+ * only happen when replaying additions, since
|
|
+ * modifications are always immediately visible in
|
|
+ * br->mdb_list, whereas actual event delivery may be
|
|
+ * delayed.
|
|
+ */
|
|
+ return 0;
|
|
+ }
|
|
|
|
- mdb->obj.id = id;
|
|
- mdb->obj.orig_dev = orig_dev;
|
|
- br_switchdev_mdb_populate(mdb, mp);
|
|
- list_add_tail(&mdb->obj.list, mdb_list);
|
|
+ pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC);
|
|
+ if (!pmdb)
|
|
+ return -ENOMEM;
|
|
|
|
+ list_add_tail(&pmdb->obj.list, mdb_list);
|
|
return 0;
|
|
}
|
|
|
|
@@ -675,51 +694,50 @@ br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
|
|
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
|
|
return 0;
|
|
|
|
- /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
|
|
- * because the write-side protection is br->multicast_lock. But we
|
|
- * need to emulate the [ blocking ] calling context of a regular
|
|
- * switchdev event, so since both br->multicast_lock and RCU read side
|
|
- * critical sections are atomic, we have no choice but to pick the RCU
|
|
- * read side lock, queue up all our events, leave the critical section
|
|
- * and notify switchdev from blocking context.
|
|
+ if (adding)
|
|
+ action = SWITCHDEV_PORT_OBJ_ADD;
|
|
+ else
|
|
+ action = SWITCHDEV_PORT_OBJ_DEL;
|
|
+
|
|
+ /* br_switchdev_mdb_queue_one() will take care to not queue a
|
|
+ * replay of an event that is already pending in the switchdev
|
|
+ * deferred queue. In order to safely determine that, there
|
|
+ * must be no new deferred MDB notifications enqueued for the
|
|
+ * duration of the MDB scan. Therefore, grab the write-side
|
|
+ * lock to avoid racing with any concurrent IGMP/MLD snooping.
|
|
*/
|
|
- rcu_read_lock();
|
|
+ spin_lock_bh(&br->multicast_lock);
|
|
|
|
- hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
|
|
+ hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
|
|
struct net_bridge_port_group __rcu * const *pp;
|
|
const struct net_bridge_port_group *p;
|
|
|
|
if (mp->host_joined) {
|
|
- err = br_switchdev_mdb_queue_one(&mdb_list,
|
|
+ err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
|
|
SWITCHDEV_OBJ_ID_HOST_MDB,
|
|
mp, br_dev);
|
|
if (err) {
|
|
- rcu_read_unlock();
|
|
+ spin_unlock_bh(&br->multicast_lock);
|
|
goto out_free_mdb;
|
|
}
|
|
}
|
|
|
|
- for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
|
|
+ for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
|
|
pp = &p->next) {
|
|
if (p->key.port->dev != dev)
|
|
continue;
|
|
|
|
- err = br_switchdev_mdb_queue_one(&mdb_list,
|
|
+ err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
|
|
SWITCHDEV_OBJ_ID_PORT_MDB,
|
|
mp, dev);
|
|
if (err) {
|
|
- rcu_read_unlock();
|
|
+ spin_unlock_bh(&br->multicast_lock);
|
|
goto out_free_mdb;
|
|
}
|
|
}
|
|
}
|
|
|
|
- rcu_read_unlock();
|
|
-
|
|
- if (adding)
|
|
- action = SWITCHDEV_PORT_OBJ_ADD;
|
|
- else
|
|
- action = SWITCHDEV_PORT_OBJ_DEL;
|
|
+ spin_unlock_bh(&br->multicast_lock);
|
|
|
|
list_for_each_entry(obj, &mdb_list, list) {
|
|
err = br_switchdev_mdb_replay_one(nb, dev,
|
|
@@ -780,6 +798,16 @@ static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
|
|
br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
|
|
|
|
br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
|
|
+
|
|
+ /* Make sure that the device leaving this bridge has seen all
|
|
+ * relevant events before it is disassociated. In the normal
|
|
+ * case, when the device is directly attached to the bridge,
|
|
+ * this is covered by del_nbp(). If the association was indirect
|
|
+ * however, e.g. via a team or bond, and the device is leaving
|
|
+ * that intermediate device, then the bridge port remains in
|
|
+ * place.
|
|
+ */
|
|
+ switchdev_deferred_process();
|
|
}
|
|
|
|
/* Let the bridge know that this port is offloaded, so that it can assign a
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 1ba3662faf0aa..60619fe8af5fc 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -8861,7 +8861,7 @@ EXPORT_SYMBOL(dev_set_mac_address_user);
|
|
|
|
int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
|
|
{
|
|
- size_t size = sizeof(sa->sa_data);
|
|
+ size_t size = sizeof(sa->sa_data_min);
|
|
struct net_device *dev;
|
|
int ret = 0;
|
|
|
|
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
|
|
index 7674bb9f3076c..5cdbfbf9a7dcf 100644
|
|
--- a/net/core/dev_ioctl.c
|
|
+++ b/net/core/dev_ioctl.c
|
|
@@ -342,7 +342,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data,
|
|
if (ifr->ifr_hwaddr.sa_family != dev->type)
|
|
return -EINVAL;
|
|
memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
|
|
- min(sizeof(ifr->ifr_hwaddr.sa_data),
|
|
+ min(sizeof(ifr->ifr_hwaddr.sa_data_min),
|
|
(size_t)dev->addr_len));
|
|
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
|
|
return 0;
|
|
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
|
|
index 3818035ea0021..39643f78cf782 100644
|
|
--- a/net/core/skmsg.c
|
|
+++ b/net/core/skmsg.c
|
|
@@ -1217,8 +1217,11 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
|
|
|
|
rcu_read_lock();
|
|
psock = sk_psock(sk);
|
|
- if (psock)
|
|
- psock->saved_data_ready(sk);
|
|
+ if (psock) {
|
|
+ read_lock_bh(&sk->sk_callback_lock);
|
|
+ sk_psock_data_ready(sk, psock);
|
|
+ read_unlock_bh(&sk->sk_callback_lock);
|
|
+ }
|
|
rcu_read_unlock();
|
|
}
|
|
}
|
|
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
|
|
index 9456f5bb35e5d..ccff96820a703 100644
|
|
--- a/net/ipv4/arp.c
|
|
+++ b/net/ipv4/arp.c
|
|
@@ -1125,7 +1125,8 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
|
|
if (neigh) {
|
|
if (!(READ_ONCE(neigh->nud_state) & NUD_NOARP)) {
|
|
read_lock_bh(&neigh->lock);
|
|
- memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
|
|
+ memcpy(r->arp_ha.sa_data, neigh->ha,
|
|
+ min(dev->addr_len, (unsigned char)sizeof(r->arp_ha.sa_data_min)));
|
|
r->arp_flags = arp_state_to_flags(neigh);
|
|
read_unlock_bh(&neigh->lock);
|
|
r->arp_ha.sa_family = dev->type;
|
|
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
|
|
index 35d6e74be8406..bb0d1252cad86 100644
|
|
--- a/net/ipv4/devinet.c
|
|
+++ b/net/ipv4/devinet.c
|
|
@@ -1804,6 +1804,21 @@ static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
|
|
return err;
|
|
}
|
|
|
|
+/* Combine dev_addr_genid and dev_base_seq to detect changes.
|
|
+ */
|
|
+static u32 inet_base_seq(const struct net *net)
|
|
+{
|
|
+ u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
|
|
+ net->dev_base_seq;
|
|
+
|
|
+ /* Must not return 0 (see nl_dump_check_consistent()).
|
|
+ * Chose a value far away from 0.
|
|
+ */
|
|
+ if (!res)
|
|
+ res = 0x80000000;
|
|
+ return res;
|
|
+}
|
|
+
|
|
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
|
|
{
|
|
const struct nlmsghdr *nlh = cb->nlh;
|
|
@@ -1855,8 +1870,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
|
|
idx = 0;
|
|
head = &tgt_net->dev_index_head[h];
|
|
rcu_read_lock();
|
|
- cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
|
|
- tgt_net->dev_base_seq;
|
|
+ cb->seq = inet_base_seq(tgt_net);
|
|
hlist_for_each_entry_rcu(dev, head, index_hlist) {
|
|
if (idx < s_idx)
|
|
goto cont;
|
|
@@ -2257,8 +2271,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
|
|
idx = 0;
|
|
head = &net->dev_index_head[h];
|
|
rcu_read_lock();
|
|
- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
|
|
- net->dev_base_seq;
|
|
+ cb->seq = inet_base_seq(net);
|
|
hlist_for_each_entry_rcu(dev, head, index_hlist) {
|
|
if (idx < s_idx)
|
|
goto cont;
|
|
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
|
|
index f2ed2aed08ab3..56776e1b1de52 100644
|
|
--- a/net/ipv4/inet_hashtables.c
|
|
+++ b/net/ipv4/inet_hashtables.c
|
|
@@ -1111,10 +1111,33 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
|
return 0;
|
|
|
|
error:
|
|
+ if (sk_hashed(sk)) {
|
|
+ spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash);
|
|
+
|
|
+ sock_prot_inuse_add(net, sk->sk_prot, -1);
|
|
+
|
|
+ spin_lock(lock);
|
|
+ sk_nulls_del_node_init_rcu(sk);
|
|
+ spin_unlock(lock);
|
|
+
|
|
+ sk->sk_hash = 0;
|
|
+ inet_sk(sk)->inet_sport = 0;
|
|
+ inet_sk(sk)->inet_num = 0;
|
|
+
|
|
+ if (tw)
|
|
+ inet_twsk_bind_unhash(tw, hinfo);
|
|
+ }
|
|
+
|
|
spin_unlock(&head2->lock);
|
|
if (tb_created)
|
|
inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
|
|
- spin_unlock_bh(&head->lock);
|
|
+ spin_unlock(&head->lock);
|
|
+
|
|
+ if (tw)
|
|
+ inet_twsk_deschedule_put(tw);
|
|
+
|
|
+ local_bh_enable();
|
|
+
|
|
return -ENOMEM;
|
|
}
|
|
|
|
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
|
|
index b8dc20fe7a4e2..46527b5cc8f0c 100644
|
|
--- a/net/ipv6/addrconf.c
|
|
+++ b/net/ipv6/addrconf.c
|
|
@@ -706,6 +706,22 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
|
|
return err;
|
|
}
|
|
|
|
+/* Combine dev_addr_genid and dev_base_seq to detect changes.
|
|
+ */
|
|
+static u32 inet6_base_seq(const struct net *net)
|
|
+{
|
|
+ u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
|
|
+ net->dev_base_seq;
|
|
+
|
|
+ /* Must not return 0 (see nl_dump_check_consistent()).
|
|
+ * Chose a value far away from 0.
|
|
+ */
|
|
+ if (!res)
|
|
+ res = 0x80000000;
|
|
+ return res;
|
|
+}
|
|
+
|
|
+
|
|
static int inet6_netconf_dump_devconf(struct sk_buff *skb,
|
|
struct netlink_callback *cb)
|
|
{
|
|
@@ -739,8 +755,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
|
|
idx = 0;
|
|
head = &net->dev_index_head[h];
|
|
rcu_read_lock();
|
|
- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
|
|
- net->dev_base_seq;
|
|
+ cb->seq = inet6_base_seq(net);
|
|
hlist_for_each_entry_rcu(dev, head, index_hlist) {
|
|
if (idx < s_idx)
|
|
goto cont;
|
|
@@ -5326,7 +5341,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
|
|
}
|
|
|
|
rcu_read_lock();
|
|
- cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
|
|
+ cb->seq = inet6_base_seq(tgt_net);
|
|
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
|
|
idx = 0;
|
|
head = &tgt_net->dev_index_head[h];
|
|
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
|
|
index 5fa0e37305d9d..1cfdd9d950123 100644
|
|
--- a/net/ipv6/exthdrs.c
|
|
+++ b/net/ipv6/exthdrs.c
|
|
@@ -180,6 +180,8 @@ static bool ip6_parse_tlv(bool hopbyhop,
|
|
case IPV6_TLV_IOAM:
|
|
if (!ipv6_hop_ioam(skb, off))
|
|
return false;
|
|
+
|
|
+ nh = skb_network_header(skb);
|
|
break;
|
|
case IPV6_TLV_JUMBO:
|
|
if (!ipv6_hop_jumbo(skb, off))
|
|
@@ -974,6 +976,14 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
|
|
if (!skb_valid_dst(skb))
|
|
ip6_route_input(skb);
|
|
|
|
+ /* About to mangle packet header */
|
|
+ if (skb_ensure_writable(skb, optoff + 2 + hdr->opt_len))
|
|
+ goto drop;
|
|
+
|
|
+ /* Trace pointer may have changed */
|
|
+ trace = (struct ioam6_trace_hdr *)(skb_network_header(skb)
|
|
+ + optoff + sizeof(*hdr));
|
|
+
|
|
ioam6_fill_trace_data(skb, ns, trace, true);
|
|
break;
|
|
default:
|
|
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
|
|
index 29346a6eec9ff..35508abd76f43 100644
|
|
--- a/net/ipv6/seg6.c
|
|
+++ b/net/ipv6/seg6.c
|
|
@@ -512,22 +512,24 @@ int __init seg6_init(void)
|
|
{
|
|
int err;
|
|
|
|
- err = genl_register_family(&seg6_genl_family);
|
|
+ err = register_pernet_subsys(&ip6_segments_ops);
|
|
if (err)
|
|
goto out;
|
|
|
|
- err = register_pernet_subsys(&ip6_segments_ops);
|
|
+ err = genl_register_family(&seg6_genl_family);
|
|
if (err)
|
|
- goto out_unregister_genl;
|
|
+ goto out_unregister_pernet;
|
|
|
|
#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
|
|
err = seg6_iptunnel_init();
|
|
if (err)
|
|
- goto out_unregister_pernet;
|
|
+ goto out_unregister_genl;
|
|
|
|
err = seg6_local_init();
|
|
- if (err)
|
|
- goto out_unregister_pernet;
|
|
+ if (err) {
|
|
+ seg6_iptunnel_exit();
|
|
+ goto out_unregister_genl;
|
|
+ }
|
|
#endif
|
|
|
|
#ifdef CONFIG_IPV6_SEG6_HMAC
|
|
@@ -548,11 +550,11 @@ int __init seg6_init(void)
|
|
#endif
|
|
#endif
|
|
#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
|
|
-out_unregister_pernet:
|
|
- unregister_pernet_subsys(&ip6_segments_ops);
|
|
-#endif
|
|
out_unregister_genl:
|
|
genl_unregister_family(&seg6_genl_family);
|
|
+#endif
|
|
+out_unregister_pernet:
|
|
+ unregister_pernet_subsys(&ip6_segments_ops);
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
|
|
index 314ec3a51e8de..bb92dc8b82f39 100644
|
|
--- a/net/l2tp/l2tp_ip6.c
|
|
+++ b/net/l2tp/l2tp_ip6.c
|
|
@@ -630,7 +630,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|
|
|
back_from_confirm:
|
|
lock_sock(sk);
|
|
- ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
|
|
+ ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
|
|
err = ip6_append_data(sk, ip_generic_getfrag, msg,
|
|
ulen, transhdrlen, &ipc6,
|
|
&fl6, (struct rt6_info *)dst,
|
|
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
|
|
index a2c4866080bd7..6cf0b77839d1d 100644
|
|
--- a/net/mac80211/cfg.c
|
|
+++ b/net/mac80211/cfg.c
|
|
@@ -1775,6 +1775,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
|
|
sband->band);
|
|
}
|
|
|
|
+ ieee80211_sta_set_rx_nss(link_sta);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
|
|
index c6f0da028a2a4..f25dc6931a5b1 100644
|
|
--- a/net/mac80211/mlme.c
|
|
+++ b/net/mac80211/mlme.c
|
|
@@ -7294,6 +7294,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
|
|
ieee80211_report_disconnect(sdata, frame_buf,
|
|
sizeof(frame_buf), true,
|
|
req->reason_code, false);
|
|
+ drv_mgd_complete_tx(sdata->local, sdata, &info);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
|
|
index f3d6c3e4c970e..bd56015b29258 100644
|
|
--- a/net/mac80211/sta_info.c
|
|
+++ b/net/mac80211/sta_info.c
|
|
@@ -891,6 +891,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
|
|
if (ieee80211_vif_is_mesh(&sdata->vif))
|
|
mesh_accept_plinks_update(sdata);
|
|
|
|
+ ieee80211_check_fast_xmit(sta);
|
|
+
|
|
return 0;
|
|
out_remove:
|
|
if (sta->sta.valid_links)
|
|
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
|
|
index 322a035f75929..3d62e8b718740 100644
|
|
--- a/net/mac80211/tx.c
|
|
+++ b/net/mac80211/tx.c
|
|
@@ -3044,7 +3044,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
|
|
sdata->vif.type == NL80211_IFTYPE_STATION)
|
|
goto out;
|
|
|
|
- if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
|
|
+ if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded)
|
|
goto out;
|
|
|
|
if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
|
|
diff --git a/net/mctp/route.c b/net/mctp/route.c
|
|
index 68be8f2b622dd..256bf0b89e6ca 100644
|
|
--- a/net/mctp/route.c
|
|
+++ b/net/mctp/route.c
|
|
@@ -663,7 +663,7 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
|
|
spin_unlock_irqrestore(&mns->keys_lock, flags);
|
|
|
|
if (!tagbits) {
|
|
- kfree(key);
|
|
+ mctp_key_unref(key);
|
|
return ERR_PTR(-EBUSY);
|
|
}
|
|
|
|
diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
|
|
index a536586742f28..e57c5f47f0351 100644
|
|
--- a/net/mptcp/diag.c
|
|
+++ b/net/mptcp/diag.c
|
|
@@ -13,17 +13,19 @@
|
|
#include <uapi/linux/mptcp.h>
|
|
#include "protocol.h"
|
|
|
|
-static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
|
|
+static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
struct mptcp_subflow_context *sf;
|
|
struct nlattr *start;
|
|
u32 flags = 0;
|
|
+ bool slow;
|
|
int err;
|
|
|
|
start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
|
|
if (!start)
|
|
return -EMSGSIZE;
|
|
|
|
+ slow = lock_sock_fast(sk);
|
|
rcu_read_lock();
|
|
sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
|
|
if (!sf) {
|
|
@@ -69,11 +71,13 @@ static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
+ unlock_sock_fast(sk, slow);
|
|
nla_nest_end(skb, start);
|
|
return 0;
|
|
|
|
nla_failure:
|
|
rcu_read_unlock();
|
|
+ unlock_sock_fast(sk, slow);
|
|
nla_nest_cancel(skb, start);
|
|
return err;
|
|
}
|
|
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
|
|
index 980050f6b456f..70a1025f093cf 100644
|
|
--- a/net/mptcp/pm_netlink.c
|
|
+++ b/net/mptcp/pm_netlink.c
|
|
@@ -900,7 +900,8 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
|
|
}
|
|
|
|
static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
|
|
- struct mptcp_pm_addr_entry *entry)
|
|
+ struct mptcp_pm_addr_entry *entry,
|
|
+ bool needs_id)
|
|
{
|
|
struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
|
|
unsigned int addr_max;
|
|
@@ -942,7 +943,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
|
|
}
|
|
}
|
|
|
|
- if (!entry->addr.id) {
|
|
+ if (!entry->addr.id && needs_id) {
|
|
find_next:
|
|
entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
|
|
MPTCP_PM_MAX_ADDR_ID + 1,
|
|
@@ -953,7 +954,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
|
|
}
|
|
}
|
|
|
|
- if (!entry->addr.id)
|
|
+ if (!entry->addr.id && needs_id)
|
|
goto out;
|
|
|
|
__set_bit(entry->addr.id, pernet->id_bitmap);
|
|
@@ -1095,7 +1096,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
|
|
entry->ifindex = 0;
|
|
entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
|
|
entry->lsk = NULL;
|
|
- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
|
|
+ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
|
|
if (ret < 0)
|
|
kfree(entry);
|
|
|
|
@@ -1311,6 +1312,18 @@ static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
|
|
return 0;
|
|
}
|
|
|
|
+static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr,
|
|
+ struct genl_info *info)
|
|
+{
|
|
+ struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
|
|
+
|
|
+ if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
|
|
+ mptcp_pm_addr_policy, info->extack) &&
|
|
+ tb[MPTCP_PM_ADDR_ATTR_ID])
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+
|
|
static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
|
|
{
|
|
struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
|
|
@@ -1352,7 +1365,8 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
|
|
goto out_free;
|
|
}
|
|
}
|
|
- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
|
|
+ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
|
|
+ !mptcp_pm_has_addr_attr_id(attr, info));
|
|
if (ret < 0) {
|
|
GENL_SET_ERR_MSG(info, "too many addresses or duplicate one");
|
|
goto out_free;
|
|
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
|
|
index 2e1e0d0e3ec60..631fa104617c3 100644
|
|
--- a/net/mptcp/pm_userspace.c
|
|
+++ b/net/mptcp/pm_userspace.c
|
|
@@ -25,8 +25,9 @@ void mptcp_free_local_addr_list(struct mptcp_sock *msk)
|
|
}
|
|
}
|
|
|
|
-int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
|
|
- struct mptcp_pm_addr_entry *entry)
|
|
+static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
|
|
+ struct mptcp_pm_addr_entry *entry,
|
|
+ bool needs_id)
|
|
{
|
|
DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
|
|
struct mptcp_pm_addr_entry *match = NULL;
|
|
@@ -41,7 +42,7 @@ int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
|
|
spin_lock_bh(&msk->pm.lock);
|
|
list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
|
|
addr_match = mptcp_addresses_equal(&e->addr, &entry->addr, true);
|
|
- if (addr_match && entry->addr.id == 0)
|
|
+ if (addr_match && entry->addr.id == 0 && needs_id)
|
|
entry->addr.id = e->addr.id;
|
|
id_match = (e->addr.id == entry->addr.id);
|
|
if (addr_match && id_match) {
|
|
@@ -64,7 +65,7 @@ int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
|
|
}
|
|
|
|
*e = *entry;
|
|
- if (!e->addr.id)
|
|
+ if (!e->addr.id && needs_id)
|
|
e->addr.id = find_next_zero_bit(id_bitmap,
|
|
MPTCP_PM_MAX_ADDR_ID + 1,
|
|
1);
|
|
@@ -155,7 +156,7 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
|
|
if (new_entry.addr.port == msk_sport)
|
|
new_entry.addr.port = 0;
|
|
|
|
- return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry);
|
|
+ return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
|
|
}
|
|
|
|
int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
|
|
@@ -197,7 +198,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
|
|
goto announce_err;
|
|
}
|
|
|
|
- err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val);
|
|
+ err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val, false);
|
|
if (err < 0) {
|
|
GENL_SET_ERR_MSG(info, "did not match address and id");
|
|
goto announce_err;
|
|
@@ -221,6 +222,40 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
|
|
return err;
|
|
}
|
|
|
|
+static int mptcp_userspace_pm_remove_id_zero_address(struct mptcp_sock *msk,
|
|
+ struct genl_info *info)
|
|
+{
|
|
+ struct mptcp_rm_list list = { .nr = 0 };
|
|
+ struct mptcp_subflow_context *subflow;
|
|
+ struct sock *sk = (struct sock *)msk;
|
|
+ bool has_id_0 = false;
|
|
+ int err = -EINVAL;
|
|
+
|
|
+ lock_sock(sk);
|
|
+ mptcp_for_each_subflow(msk, subflow) {
|
|
+ if (subflow->local_id == 0) {
|
|
+ has_id_0 = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (!has_id_0) {
|
|
+ GENL_SET_ERR_MSG(info, "address with id 0 not found");
|
|
+ goto remove_err;
|
|
+ }
|
|
+
|
|
+ list.ids[list.nr++] = 0;
|
|
+
|
|
+ spin_lock_bh(&msk->pm.lock);
|
|
+ mptcp_pm_remove_addr(msk, &list);
|
|
+ spin_unlock_bh(&msk->pm.lock);
|
|
+
|
|
+ err = 0;
|
|
+
|
|
+remove_err:
|
|
+ release_sock(sk);
|
|
+ return err;
|
|
+}
|
|
+
|
|
int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
|
|
{
|
|
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
|
|
@@ -252,6 +287,11 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
|
|
goto remove_err;
|
|
}
|
|
|
|
+ if (id_val == 0) {
|
|
+ err = mptcp_userspace_pm_remove_id_zero_address(msk, info);
|
|
+ goto remove_err;
|
|
+ }
|
|
+
|
|
lock_sock((struct sock *)msk);
|
|
|
|
list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
|
|
@@ -335,7 +375,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
|
|
}
|
|
|
|
local.addr = addr_l;
|
|
- err = mptcp_userspace_pm_append_new_local_addr(msk, &local);
|
|
+ err = mptcp_userspace_pm_append_new_local_addr(msk, &local, false);
|
|
if (err < 0) {
|
|
GENL_SET_ERR_MSG(info, "did not match address and id");
|
|
goto create_err;
|
|
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
|
|
index 259672cc344f3..b092205213234 100644
|
|
--- a/net/mptcp/protocol.h
|
|
+++ b/net/mptcp/protocol.h
|
|
@@ -834,8 +834,6 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
|
|
void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
|
|
struct list_head *rm_list);
|
|
|
|
-int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
|
|
- struct mptcp_pm_addr_entry *entry);
|
|
void mptcp_free_local_addr_list(struct mptcp_sock *msk);
|
|
int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info);
|
|
int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info);
|
|
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
|
|
index c94a9971d790c..7ffd698497f2a 100644
|
|
--- a/net/netfilter/nf_conntrack_proto_sctp.c
|
|
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
|
|
@@ -299,7 +299,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
|
pr_debug("Setting vtag %x for secondary conntrack\n",
|
|
sh->vtag);
|
|
ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
|
|
- } else {
|
|
+ } else if (sch->type == SCTP_CID_SHUTDOWN_ACK) {
|
|
/* If it is a shutdown ack OOTB packet, we expect a return
|
|
shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
|
|
pr_debug("Setting vtag %x for new conn OOTB\n",
|
|
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
|
|
index c1d99cb370b44..99195cf6b2657 100644
|
|
--- a/net/netfilter/nf_flow_table_core.c
|
|
+++ b/net/netfilter/nf_flow_table_core.c
|
|
@@ -87,12 +87,22 @@ static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
|
|
return 0;
|
|
}
|
|
|
|
+static struct dst_entry *nft_route_dst_fetch(struct nf_flow_route *route,
|
|
+ enum flow_offload_tuple_dir dir)
|
|
+{
|
|
+ struct dst_entry *dst = route->tuple[dir].dst;
|
|
+
|
|
+ route->tuple[dir].dst = NULL;
|
|
+
|
|
+ return dst;
|
|
+}
|
|
+
|
|
static int flow_offload_fill_route(struct flow_offload *flow,
|
|
- const struct nf_flow_route *route,
|
|
+ struct nf_flow_route *route,
|
|
enum flow_offload_tuple_dir dir)
|
|
{
|
|
struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
|
|
- struct dst_entry *dst = route->tuple[dir].dst;
|
|
+ struct dst_entry *dst = nft_route_dst_fetch(route, dir);
|
|
int i, j = 0;
|
|
|
|
switch (flow_tuple->l3proto) {
|
|
@@ -122,12 +132,10 @@ static int flow_offload_fill_route(struct flow_offload *flow,
|
|
ETH_ALEN);
|
|
flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
|
|
flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
|
|
+ dst_release(dst);
|
|
break;
|
|
case FLOW_OFFLOAD_XMIT_XFRM:
|
|
case FLOW_OFFLOAD_XMIT_NEIGH:
|
|
- if (!dst_hold_safe(route->tuple[dir].dst))
|
|
- return -1;
|
|
-
|
|
flow_tuple->dst_cache = dst;
|
|
flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
|
|
break;
|
|
@@ -148,27 +156,12 @@ static void nft_flow_dst_release(struct flow_offload *flow,
|
|
dst_release(flow->tuplehash[dir].tuple.dst_cache);
|
|
}
|
|
|
|
-int flow_offload_route_init(struct flow_offload *flow,
|
|
- const struct nf_flow_route *route)
|
|
+void flow_offload_route_init(struct flow_offload *flow,
|
|
+ struct nf_flow_route *route)
|
|
{
|
|
- int err;
|
|
-
|
|
- err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
|
|
- if (err < 0)
|
|
- return err;
|
|
-
|
|
- err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
|
|
- if (err < 0)
|
|
- goto err_route_reply;
|
|
-
|
|
+ flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
|
|
+ flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
|
|
flow->type = NF_FLOW_OFFLOAD_ROUTE;
|
|
-
|
|
- return 0;
|
|
-
|
|
-err_route_reply:
|
|
- nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
|
|
-
|
|
- return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(flow_offload_route_init);
|
|
|
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
|
index 49acb89ba9c56..e21ec3ad80939 100644
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -686,15 +686,16 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
|
|
return err;
|
|
}
|
|
|
|
-static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
|
|
- struct nft_flowtable *flowtable)
|
|
+static struct nft_trans *
|
|
+nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
|
|
+ struct nft_flowtable *flowtable)
|
|
{
|
|
struct nft_trans *trans;
|
|
|
|
trans = nft_trans_alloc(ctx, msg_type,
|
|
sizeof(struct nft_trans_flowtable));
|
|
if (trans == NULL)
|
|
- return -ENOMEM;
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
if (msg_type == NFT_MSG_NEWFLOWTABLE)
|
|
nft_activate_next(ctx->net, flowtable);
|
|
@@ -703,22 +704,22 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
|
|
nft_trans_flowtable(trans) = flowtable;
|
|
nft_trans_commit_list_add_tail(ctx->net, trans);
|
|
|
|
- return 0;
|
|
+ return trans;
|
|
}
|
|
|
|
static int nft_delflowtable(struct nft_ctx *ctx,
|
|
struct nft_flowtable *flowtable)
|
|
{
|
|
- int err;
|
|
+ struct nft_trans *trans;
|
|
|
|
- err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
|
|
- if (err < 0)
|
|
- return err;
|
|
+ trans = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
|
|
+ if (IS_ERR(trans))
|
|
+ return PTR_ERR(trans);
|
|
|
|
nft_deactivate_next(ctx->net, flowtable);
|
|
nft_use_dec(&ctx->table->use);
|
|
|
|
- return err;
|
|
+ return 0;
|
|
}
|
|
|
|
static void __nft_reg_track_clobber(struct nft_regs_track *track, u8 dreg)
|
|
@@ -1245,6 +1246,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
|
|
return 0;
|
|
|
|
err_register_hooks:
|
|
+ ctx->table->flags |= NFT_TABLE_F_DORMANT;
|
|
nft_trans_destroy(trans);
|
|
return ret;
|
|
}
|
|
@@ -2057,7 +2059,7 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
|
|
struct nft_hook *hook;
|
|
int err;
|
|
|
|
- hook = kmalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
|
|
+ hook = kzalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
|
|
if (!hook) {
|
|
err = -ENOMEM;
|
|
goto err_hook_alloc;
|
|
@@ -2458,19 +2460,15 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
|
RCU_INIT_POINTER(chain->blob_gen_0, blob);
|
|
RCU_INIT_POINTER(chain->blob_gen_1, blob);
|
|
|
|
- err = nf_tables_register_hook(net, table, chain);
|
|
- if (err < 0)
|
|
- goto err_destroy_chain;
|
|
-
|
|
if (!nft_use_inc(&table->use)) {
|
|
err = -EMFILE;
|
|
- goto err_use;
|
|
+ goto err_destroy_chain;
|
|
}
|
|
|
|
trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
|
|
if (IS_ERR(trans)) {
|
|
err = PTR_ERR(trans);
|
|
- goto err_unregister_hook;
|
|
+ goto err_trans;
|
|
}
|
|
|
|
nft_trans_chain_policy(trans) = NFT_CHAIN_POLICY_UNSET;
|
|
@@ -2478,17 +2476,22 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
|
nft_trans_chain_policy(trans) = policy;
|
|
|
|
err = nft_chain_add(table, chain);
|
|
- if (err < 0) {
|
|
- nft_trans_destroy(trans);
|
|
- goto err_unregister_hook;
|
|
- }
|
|
+ if (err < 0)
|
|
+ goto err_chain_add;
|
|
+
|
|
+ /* This must be LAST to ensure no packets are walking over this chain. */
|
|
+ err = nf_tables_register_hook(net, table, chain);
|
|
+ if (err < 0)
|
|
+ goto err_register_hook;
|
|
|
|
return 0;
|
|
|
|
-err_unregister_hook:
|
|
+err_register_hook:
|
|
+ nft_chain_del(chain);
|
|
+err_chain_add:
|
|
+ nft_trans_destroy(trans);
|
|
+err_trans:
|
|
nft_use_dec_restore(&table->use);
|
|
-err_use:
|
|
- nf_tables_unregister_hook(net, table, chain);
|
|
err_destroy_chain:
|
|
nf_tables_chain_destroy(ctx);
|
|
|
|
@@ -7937,7 +7940,7 @@ static int nft_register_flowtable_net_hooks(struct net *net,
|
|
return err;
|
|
}
|
|
|
|
-static void nft_flowtable_hooks_destroy(struct list_head *hook_list)
|
|
+static void nft_hooks_destroy(struct list_head *hook_list)
|
|
{
|
|
struct nft_hook *hook, *next;
|
|
|
|
@@ -8030,9 +8033,9 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
|
|
u8 family = info->nfmsg->nfgen_family;
|
|
const struct nf_flowtable_type *type;
|
|
struct nft_flowtable *flowtable;
|
|
- struct nft_hook *hook, *next;
|
|
struct net *net = info->net;
|
|
struct nft_table *table;
|
|
+ struct nft_trans *trans;
|
|
struct nft_ctx ctx;
|
|
int err;
|
|
|
|
@@ -8112,34 +8115,34 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
|
|
err = nft_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
|
|
&flowtable_hook, flowtable, true);
|
|
if (err < 0)
|
|
- goto err4;
|
|
+ goto err_flowtable_parse_hooks;
|
|
|
|
list_splice(&flowtable_hook.list, &flowtable->hook_list);
|
|
flowtable->data.priority = flowtable_hook.priority;
|
|
flowtable->hooknum = flowtable_hook.num;
|
|
|
|
+ trans = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
|
|
+ if (IS_ERR(trans)) {
|
|
+ err = PTR_ERR(trans);
|
|
+ goto err_flowtable_trans;
|
|
+ }
|
|
+
|
|
+ /* This must be LAST to ensure no packets are walking over this flowtable. */
|
|
err = nft_register_flowtable_net_hooks(ctx.net, table,
|
|
&flowtable->hook_list,
|
|
flowtable);
|
|
- if (err < 0) {
|
|
- nft_flowtable_hooks_destroy(&flowtable->hook_list);
|
|
- goto err4;
|
|
- }
|
|
-
|
|
- err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
|
|
if (err < 0)
|
|
- goto err5;
|
|
+ goto err_flowtable_hooks;
|
|
|
|
list_add_tail_rcu(&flowtable->list, &table->flowtables);
|
|
|
|
return 0;
|
|
-err5:
|
|
- list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
|
|
- nft_unregister_flowtable_hook(net, flowtable, hook);
|
|
- list_del_rcu(&hook->list);
|
|
- kfree_rcu(hook, rcu);
|
|
- }
|
|
-err4:
|
|
+
|
|
+err_flowtable_hooks:
|
|
+ nft_trans_destroy(trans);
|
|
+err_flowtable_trans:
|
|
+ nft_hooks_destroy(&flowtable->hook_list);
|
|
+err_flowtable_parse_hooks:
|
|
flowtable->data.type->free(&flowtable->data);
|
|
err3:
|
|
module_put(type->owner);
|
|
@@ -8892,7 +8895,7 @@ static void nft_commit_release(struct nft_trans *trans)
|
|
break;
|
|
case NFT_MSG_DELFLOWTABLE:
|
|
if (nft_trans_flowtable_update(trans))
|
|
- nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
|
|
+ nft_hooks_destroy(&nft_trans_flowtable_hooks(trans));
|
|
else
|
|
nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
|
|
break;
|
|
@@ -9849,7 +9852,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
|
|
break;
|
|
case NFT_MSG_NEWFLOWTABLE:
|
|
if (nft_trans_flowtable_update(trans))
|
|
- nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
|
|
+ nft_hooks_destroy(&nft_trans_flowtable_hooks(trans));
|
|
else
|
|
nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
|
|
break;
|
|
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
|
|
index 3d9f6dda5aeb2..7a8707632a815 100644
|
|
--- a/net/netfilter/nft_flow_offload.c
|
|
+++ b/net/netfilter/nft_flow_offload.c
|
|
@@ -250,9 +250,14 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
|
|
break;
|
|
}
|
|
|
|
+ if (!dst_hold_safe(this_dst))
|
|
+ return -ENOENT;
|
|
+
|
|
nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
|
|
- if (!other_dst)
|
|
+ if (!other_dst) {
|
|
+ dst_release(this_dst);
|
|
return -ENOENT;
|
|
+ }
|
|
|
|
nft_default_forward_path(route, this_dst, dir);
|
|
nft_default_forward_path(route, other_dst, !dir);
|
|
@@ -349,8 +354,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
|
if (!flow)
|
|
goto err_flow_alloc;
|
|
|
|
- if (flow_offload_route_init(flow, &route) < 0)
|
|
- goto err_flow_add;
|
|
+ flow_offload_route_init(flow, &route);
|
|
|
|
if (tcph) {
|
|
ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
|
|
@@ -361,12 +365,12 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
|
if (ret < 0)
|
|
goto err_flow_add;
|
|
|
|
- dst_release(route.tuple[!dir].dst);
|
|
return;
|
|
|
|
err_flow_add:
|
|
flow_offload_free(flow);
|
|
err_flow_alloc:
|
|
+ dst_release(route.tuple[dir].dst);
|
|
dst_release(route.tuple[!dir].dst);
|
|
err_flow_route:
|
|
clear_bit(IPS_OFFLOAD_BIT, &ct->status);
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index 51882f07ef70c..c3117350f5fbb 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -3284,7 +3284,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
|
|
int addr_len)
|
|
{
|
|
struct sock *sk = sock->sk;
|
|
- char name[sizeof(uaddr->sa_data) + 1];
|
|
+ char name[sizeof(uaddr->sa_data_min) + 1];
|
|
|
|
/*
|
|
* Check legality
|
|
@@ -3295,8 +3295,8 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
|
|
/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
|
|
* zero-terminated.
|
|
*/
|
|
- memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
|
|
- name[sizeof(uaddr->sa_data)] = 0;
|
|
+ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
|
|
+ name[sizeof(uaddr->sa_data_min)] = 0;
|
|
|
|
return packet_do_bind(sk, name, 0, 0);
|
|
}
|
|
@@ -3566,11 +3566,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
|
|
return -EOPNOTSUPP;
|
|
|
|
uaddr->sa_family = AF_PACKET;
|
|
- memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
|
|
+ memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
|
|
rcu_read_lock();
|
|
dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
|
|
if (dev)
|
|
- strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
|
|
+ strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
|
|
rcu_read_unlock();
|
|
|
|
return sizeof(*uaddr);
|
|
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
|
|
index ff5f49ab236ed..39a6c5713d0b2 100644
|
|
--- a/net/phonet/datagram.c
|
|
+++ b/net/phonet/datagram.c
|
|
@@ -35,10 +35,10 @@ static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
|
|
|
switch (cmd) {
|
|
case SIOCINQ:
|
|
- lock_sock(sk);
|
|
+ spin_lock_bh(&sk->sk_receive_queue.lock);
|
|
skb = skb_peek(&sk->sk_receive_queue);
|
|
answ = skb ? skb->len : 0;
|
|
- release_sock(sk);
|
|
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
|
|
return put_user(answ, (int __user *)arg);
|
|
|
|
case SIOCPNADDRESOURCE:
|
|
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
|
|
index 83ea13a50690b..607f54c23647a 100644
|
|
--- a/net/phonet/pep.c
|
|
+++ b/net/phonet/pep.c
|
|
@@ -917,6 +917,37 @@ static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
|
|
return 0;
|
|
}
|
|
|
|
+static unsigned int pep_first_packet_length(struct sock *sk)
|
|
+{
|
|
+ struct pep_sock *pn = pep_sk(sk);
|
|
+ struct sk_buff_head *q;
|
|
+ struct sk_buff *skb;
|
|
+ unsigned int len = 0;
|
|
+ bool found = false;
|
|
+
|
|
+ if (sock_flag(sk, SOCK_URGINLINE)) {
|
|
+ q = &pn->ctrlreq_queue;
|
|
+ spin_lock_bh(&q->lock);
|
|
+ skb = skb_peek(q);
|
|
+ if (skb) {
|
|
+ len = skb->len;
|
|
+ found = true;
|
|
+ }
|
|
+ spin_unlock_bh(&q->lock);
|
|
+ }
|
|
+
|
|
+ if (likely(!found)) {
|
|
+ q = &sk->sk_receive_queue;
|
|
+ spin_lock_bh(&q->lock);
|
|
+ skb = skb_peek(q);
|
|
+ if (skb)
|
|
+ len = skb->len;
|
|
+ spin_unlock_bh(&q->lock);
|
|
+ }
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
|
{
|
|
struct pep_sock *pn = pep_sk(sk);
|
|
@@ -930,15 +961,7 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
|
break;
|
|
}
|
|
|
|
- lock_sock(sk);
|
|
- if (sock_flag(sk, SOCK_URGINLINE) &&
|
|
- !skb_queue_empty(&pn->ctrlreq_queue))
|
|
- answ = skb_peek(&pn->ctrlreq_queue)->len;
|
|
- else if (!skb_queue_empty(&sk->sk_receive_queue))
|
|
- answ = skb_peek(&sk->sk_receive_queue)->len;
|
|
- else
|
|
- answ = 0;
|
|
- release_sock(sk);
|
|
+ answ = pep_first_packet_length(sk);
|
|
ret = put_user(answ, (int __user *)arg);
|
|
break;
|
|
|
|
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
|
|
index 24cf0bf7c80e5..9c4a80fce794f 100644
|
|
--- a/net/sched/Kconfig
|
|
+++ b/net/sched/Kconfig
|
|
@@ -45,23 +45,6 @@ if NET_SCHED
|
|
|
|
comment "Queueing/Scheduling"
|
|
|
|
-config NET_SCH_CBQ
|
|
- tristate "Class Based Queueing (CBQ)"
|
|
- help
|
|
- Say Y here if you want to use the Class-Based Queueing (CBQ) packet
|
|
- scheduling algorithm. This algorithm classifies the waiting packets
|
|
- into a tree-like hierarchy of classes; the leaves of this tree are
|
|
- in turn scheduled by separate algorithms.
|
|
-
|
|
- See the top of <file:net/sched/sch_cbq.c> for more details.
|
|
-
|
|
- CBQ is a commonly used scheduler, so if you're unsure, you should
|
|
- say Y here. Then say Y to all the queueing algorithms below that you
|
|
- want to use as leaf disciplines.
|
|
-
|
|
- To compile this code as a module, choose M here: the
|
|
- module will be called sch_cbq.
|
|
-
|
|
config NET_SCH_HTB
|
|
tristate "Hierarchical Token Bucket (HTB)"
|
|
help
|
|
@@ -85,20 +68,6 @@ config NET_SCH_HFSC
|
|
To compile this code as a module, choose M here: the
|
|
module will be called sch_hfsc.
|
|
|
|
-config NET_SCH_ATM
|
|
- tristate "ATM Virtual Circuits (ATM)"
|
|
- depends on ATM
|
|
- help
|
|
- Say Y here if you want to use the ATM pseudo-scheduler. This
|
|
- provides a framework for invoking classifiers, which in turn
|
|
- select classes of this queuing discipline. Each class maps
|
|
- the flow(s) it is handling to a given virtual circuit.
|
|
-
|
|
- See the top of <file:net/sched/sch_atm.c> for more details.
|
|
-
|
|
- To compile this code as a module, choose M here: the
|
|
- module will be called sch_atm.
|
|
-
|
|
config NET_SCH_PRIO
|
|
tristate "Multi Band Priority Queueing (PRIO)"
|
|
help
|
|
@@ -217,17 +186,6 @@ config NET_SCH_GRED
|
|
To compile this code as a module, choose M here: the
|
|
module will be called sch_gred.
|
|
|
|
-config NET_SCH_DSMARK
|
|
- tristate "Differentiated Services marker (DSMARK)"
|
|
- help
|
|
- Say Y if you want to schedule packets according to the
|
|
- Differentiated Services architecture proposed in RFC 2475.
|
|
- Technical information on this method, with pointers to associated
|
|
- RFCs, is available at <http://www.gta.ufrj.br/diffserv/>.
|
|
-
|
|
- To compile this code as a module, choose M here: the
|
|
- module will be called sch_dsmark.
|
|
-
|
|
config NET_SCH_NETEM
|
|
tristate "Network emulator (NETEM)"
|
|
help
|
|
diff --git a/net/sched/Makefile b/net/sched/Makefile
|
|
index 8a33a35fc50d5..a66ac1e7b79b5 100644
|
|
--- a/net/sched/Makefile
|
|
+++ b/net/sched/Makefile
|
|
@@ -33,20 +33,17 @@ obj-$(CONFIG_NET_ACT_TUNNEL_KEY)+= act_tunnel_key.o
|
|
obj-$(CONFIG_NET_ACT_CT) += act_ct.o
|
|
obj-$(CONFIG_NET_ACT_GATE) += act_gate.o
|
|
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
|
|
-obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
|
|
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
|
|
obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o
|
|
obj-$(CONFIG_NET_SCH_RED) += sch_red.o
|
|
obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o
|
|
obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
|
|
-obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
|
|
obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o
|
|
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
|
|
obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
|
|
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
|
|
obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
|
|
obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
|
|
-obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
|
|
obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
|
|
obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
|
|
obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
|
|
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
|
|
deleted file mode 100644
|
|
index 4a981ca90b0bf..0000000000000
|
|
--- a/net/sched/sch_atm.c
|
|
+++ /dev/null
|
|
@@ -1,706 +0,0 @@
|
|
-// SPDX-License-Identifier: GPL-2.0-only
|
|
-/* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
|
|
-
|
|
-/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
|
|
-
|
|
-#include <linux/module.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/init.h>
|
|
-#include <linux/interrupt.h>
|
|
-#include <linux/string.h>
|
|
-#include <linux/errno.h>
|
|
-#include <linux/skbuff.h>
|
|
-#include <linux/atmdev.h>
|
|
-#include <linux/atmclip.h>
|
|
-#include <linux/rtnetlink.h>
|
|
-#include <linux/file.h> /* for fput */
|
|
-#include <net/netlink.h>
|
|
-#include <net/pkt_sched.h>
|
|
-#include <net/pkt_cls.h>
|
|
-
|
|
-/*
|
|
- * The ATM queuing discipline provides a framework for invoking classifiers
|
|
- * (aka "filters"), which in turn select classes of this queuing discipline.
|
|
- * Each class maps the flow(s) it is handling to a given VC. Multiple classes
|
|
- * may share the same VC.
|
|
- *
|
|
- * When creating a class, VCs are specified by passing the number of the open
|
|
- * socket descriptor by which the calling process references the VC. The kernel
|
|
- * keeps the VC open at least until all classes using it are removed.
|
|
- *
|
|
- * In this file, most functions are named atm_tc_* to avoid confusion with all
|
|
- * the atm_* in net/atm. This naming convention differs from what's used in the
|
|
- * rest of net/sched.
|
|
- *
|
|
- * Known bugs:
|
|
- * - sometimes messes up the IP stack
|
|
- * - any manipulations besides the few operations described in the README, are
|
|
- * untested and likely to crash the system
|
|
- * - should lock the flow while there is data in the queue (?)
|
|
- */
|
|
-
|
|
-#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
|
|
-
|
|
-struct atm_flow_data {
|
|
- struct Qdisc_class_common common;
|
|
- struct Qdisc *q; /* FIFO, TBF, etc. */
|
|
- struct tcf_proto __rcu *filter_list;
|
|
- struct tcf_block *block;
|
|
- struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
|
|
- void (*old_pop)(struct atm_vcc *vcc,
|
|
- struct sk_buff *skb); /* chaining */
|
|
- struct atm_qdisc_data *parent; /* parent qdisc */
|
|
- struct socket *sock; /* for closing */
|
|
- int ref; /* reference count */
|
|
- struct gnet_stats_basic_sync bstats;
|
|
- struct gnet_stats_queue qstats;
|
|
- struct list_head list;
|
|
- struct atm_flow_data *excess; /* flow for excess traffic;
|
|
- NULL to set CLP instead */
|
|
- int hdr_len;
|
|
- unsigned char hdr[]; /* header data; MUST BE LAST */
|
|
-};
|
|
-
|
|
-struct atm_qdisc_data {
|
|
- struct atm_flow_data link; /* unclassified skbs go here */
|
|
- struct list_head flows; /* NB: "link" is also on this
|
|
- list */
|
|
- struct tasklet_struct task; /* dequeue tasklet */
|
|
-};
|
|
-
|
|
-/* ------------------------- Class/flow operations ------------------------- */
|
|
-
|
|
-static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow;
|
|
-
|
|
- list_for_each_entry(flow, &p->flows, list) {
|
|
- if (flow->common.classid == classid)
|
|
- return flow;
|
|
- }
|
|
- return NULL;
|
|
-}
|
|
-
|
|
-static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
|
|
- struct Qdisc *new, struct Qdisc **old,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow = (struct atm_flow_data *)arg;
|
|
-
|
|
- pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
|
|
- sch, p, flow, new, old);
|
|
- if (list_empty(&flow->list))
|
|
- return -EINVAL;
|
|
- if (!new)
|
|
- new = &noop_qdisc;
|
|
- *old = flow->q;
|
|
- flow->q = new;
|
|
- if (*old)
|
|
- qdisc_reset(*old);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
|
|
-{
|
|
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
|
|
-
|
|
- pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
|
|
- return flow ? flow->q : NULL;
|
|
-}
|
|
-
|
|
-static unsigned long atm_tc_find(struct Qdisc *sch, u32 classid)
|
|
-{
|
|
- struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow;
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
|
|
- flow = lookup_flow(sch, classid);
|
|
- pr_debug("%s: flow %p\n", __func__, flow);
|
|
- return (unsigned long)flow;
|
|
-}
|
|
-
|
|
-static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
|
|
- unsigned long parent, u32 classid)
|
|
-{
|
|
- struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow;
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
|
|
- flow = lookup_flow(sch, classid);
|
|
- if (flow)
|
|
- flow->ref++;
|
|
- pr_debug("%s: flow %p\n", __func__, flow);
|
|
- return (unsigned long)flow;
|
|
-}
|
|
-
|
|
-/*
|
|
- * atm_tc_put handles all destructions, including the ones that are explicitly
|
|
- * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
|
|
- * anything that still seems to be in use.
|
|
- */
|
|
-static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
|
|
-
|
|
- pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
|
|
- if (--flow->ref)
|
|
- return;
|
|
- pr_debug("atm_tc_put: destroying\n");
|
|
- list_del_init(&flow->list);
|
|
- pr_debug("atm_tc_put: qdisc %p\n", flow->q);
|
|
- qdisc_put(flow->q);
|
|
- tcf_block_put(flow->block);
|
|
- if (flow->sock) {
|
|
- pr_debug("atm_tc_put: f_count %ld\n",
|
|
- file_count(flow->sock->file));
|
|
- flow->vcc->pop = flow->old_pop;
|
|
- sockfd_put(flow->sock);
|
|
- }
|
|
- if (flow->excess)
|
|
- atm_tc_put(sch, (unsigned long)flow->excess);
|
|
- if (flow != &p->link)
|
|
- kfree(flow);
|
|
- /*
|
|
- * If flow == &p->link, the qdisc no longer works at this point and
|
|
- * needs to be removed. (By the caller of atm_tc_put.)
|
|
- */
|
|
-}
|
|
-
|
|
-static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
-{
|
|
- struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
|
|
-
|
|
- pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
|
|
- VCC2FLOW(vcc)->old_pop(vcc, skb);
|
|
- tasklet_schedule(&p->task);
|
|
-}
|
|
-
|
|
-static const u8 llc_oui_ip[] = {
|
|
- 0xaa, /* DSAP: non-ISO */
|
|
- 0xaa, /* SSAP: non-ISO */
|
|
- 0x03, /* Ctrl: Unnumbered Information Command PDU */
|
|
- 0x00, /* OUI: EtherType */
|
|
- 0x00, 0x00,
|
|
- 0x08, 0x00
|
|
-}; /* Ethertype IP (0800) */
|
|
-
|
|
-static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
|
|
- [TCA_ATM_FD] = { .type = NLA_U32 },
|
|
- [TCA_ATM_EXCESS] = { .type = NLA_U32 },
|
|
-};
|
|
-
|
|
-static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
|
|
- struct nlattr **tca, unsigned long *arg,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
|
|
- struct atm_flow_data *excess = NULL;
|
|
- struct nlattr *opt = tca[TCA_OPTIONS];
|
|
- struct nlattr *tb[TCA_ATM_MAX + 1];
|
|
- struct socket *sock;
|
|
- int fd, error, hdr_len;
|
|
- void *hdr;
|
|
-
|
|
- pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
|
|
- "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
|
|
- /*
|
|
- * The concept of parents doesn't apply for this qdisc.
|
|
- */
|
|
- if (parent && parent != TC_H_ROOT && parent != sch->handle)
|
|
- return -EINVAL;
|
|
- /*
|
|
- * ATM classes cannot be changed. In order to change properties of the
|
|
- * ATM connection, that socket needs to be modified directly (via the
|
|
- * native ATM API. In order to send a flow to a different VC, the old
|
|
- * class needs to be removed and a new one added. (This may be changed
|
|
- * later.)
|
|
- */
|
|
- if (flow)
|
|
- return -EBUSY;
|
|
- if (opt == NULL)
|
|
- return -EINVAL;
|
|
-
|
|
- error = nla_parse_nested_deprecated(tb, TCA_ATM_MAX, opt, atm_policy,
|
|
- NULL);
|
|
- if (error < 0)
|
|
- return error;
|
|
-
|
|
- if (!tb[TCA_ATM_FD])
|
|
- return -EINVAL;
|
|
- fd = nla_get_u32(tb[TCA_ATM_FD]);
|
|
- pr_debug("atm_tc_change: fd %d\n", fd);
|
|
- if (tb[TCA_ATM_HDR]) {
|
|
- hdr_len = nla_len(tb[TCA_ATM_HDR]);
|
|
- hdr = nla_data(tb[TCA_ATM_HDR]);
|
|
- } else {
|
|
- hdr_len = RFC1483LLC_LEN;
|
|
- hdr = NULL; /* default LLC/SNAP for IP */
|
|
- }
|
|
- if (!tb[TCA_ATM_EXCESS])
|
|
- excess = NULL;
|
|
- else {
|
|
- excess = (struct atm_flow_data *)
|
|
- atm_tc_find(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
|
|
- if (!excess)
|
|
- return -ENOENT;
|
|
- }
|
|
- pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
|
|
- opt->nla_type, nla_len(opt), hdr_len);
|
|
- sock = sockfd_lookup(fd, &error);
|
|
- if (!sock)
|
|
- return error; /* f_count++ */
|
|
- pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
|
|
- if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
|
|
- error = -EPROTOTYPE;
|
|
- goto err_out;
|
|
- }
|
|
- /* @@@ should check if the socket is really operational or we'll crash
|
|
- on vcc->send */
|
|
- if (classid) {
|
|
- if (TC_H_MAJ(classid ^ sch->handle)) {
|
|
- pr_debug("atm_tc_change: classid mismatch\n");
|
|
- error = -EINVAL;
|
|
- goto err_out;
|
|
- }
|
|
- } else {
|
|
- int i;
|
|
- unsigned long cl;
|
|
-
|
|
- for (i = 1; i < 0x8000; i++) {
|
|
- classid = TC_H_MAKE(sch->handle, 0x8000 | i);
|
|
- cl = atm_tc_find(sch, classid);
|
|
- if (!cl)
|
|
- break;
|
|
- }
|
|
- }
|
|
- pr_debug("atm_tc_change: new id %x\n", classid);
|
|
- flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
|
|
- pr_debug("atm_tc_change: flow %p\n", flow);
|
|
- if (!flow) {
|
|
- error = -ENOBUFS;
|
|
- goto err_out;
|
|
- }
|
|
-
|
|
- error = tcf_block_get(&flow->block, &flow->filter_list, sch,
|
|
- extack);
|
|
- if (error) {
|
|
- kfree(flow);
|
|
- goto err_out;
|
|
- }
|
|
-
|
|
- flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
|
|
- extack);
|
|
- if (!flow->q)
|
|
- flow->q = &noop_qdisc;
|
|
- pr_debug("atm_tc_change: qdisc %p\n", flow->q);
|
|
- flow->sock = sock;
|
|
- flow->vcc = ATM_SD(sock); /* speedup */
|
|
- flow->vcc->user_back = flow;
|
|
- pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
|
|
- flow->old_pop = flow->vcc->pop;
|
|
- flow->parent = p;
|
|
- flow->vcc->pop = sch_atm_pop;
|
|
- flow->common.classid = classid;
|
|
- flow->ref = 1;
|
|
- flow->excess = excess;
|
|
- list_add(&flow->list, &p->link.list);
|
|
- flow->hdr_len = hdr_len;
|
|
- if (hdr)
|
|
- memcpy(flow->hdr, hdr, hdr_len);
|
|
- else
|
|
- memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
|
|
- *arg = (unsigned long)flow;
|
|
- return 0;
|
|
-err_out:
|
|
- sockfd_put(sock);
|
|
- return error;
|
|
-}
|
|
-
|
|
-static int atm_tc_delete(struct Qdisc *sch, unsigned long arg,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow = (struct atm_flow_data *)arg;
|
|
-
|
|
- pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
|
|
- if (list_empty(&flow->list))
|
|
- return -EINVAL;
|
|
- if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
|
|
- return -EBUSY;
|
|
- /*
|
|
- * Reference count must be 2: one for "keepalive" (set at class
|
|
- * creation), and one for the reference held when calling delete.
|
|
- */
|
|
- if (flow->ref < 2) {
|
|
- pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
|
|
- return -EINVAL;
|
|
- }
|
|
- if (flow->ref > 2)
|
|
- return -EBUSY; /* catch references via excess, etc. */
|
|
- atm_tc_put(sch, arg);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow;
|
|
-
|
|
- pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
|
|
- if (walker->stop)
|
|
- return;
|
|
- list_for_each_entry(flow, &p->flows, list) {
|
|
- if (!tc_qdisc_stats_dump(sch, (unsigned long)flow, walker))
|
|
- break;
|
|
- }
|
|
-}
|
|
-
|
|
-static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
|
|
-
|
|
- pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
|
|
- return flow ? flow->block : p->link.block;
|
|
-}
|
|
-
|
|
-/* --------------------------- Qdisc operations ---------------------------- */
|
|
-
|
|
-static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|
- struct sk_buff **to_free)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow;
|
|
- struct tcf_result res;
|
|
- int result;
|
|
- int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
|
-
|
|
- pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
|
|
- result = TC_ACT_OK; /* be nice to gcc */
|
|
- flow = NULL;
|
|
- if (TC_H_MAJ(skb->priority) != sch->handle ||
|
|
- !(flow = (struct atm_flow_data *)atm_tc_find(sch, skb->priority))) {
|
|
- struct tcf_proto *fl;
|
|
-
|
|
- list_for_each_entry(flow, &p->flows, list) {
|
|
- fl = rcu_dereference_bh(flow->filter_list);
|
|
- if (fl) {
|
|
- result = tcf_classify(skb, NULL, fl, &res, true);
|
|
- if (result < 0)
|
|
- continue;
|
|
- if (result == TC_ACT_SHOT)
|
|
- goto done;
|
|
-
|
|
- flow = (struct atm_flow_data *)res.class;
|
|
- if (!flow)
|
|
- flow = lookup_flow(sch, res.classid);
|
|
- goto drop;
|
|
- }
|
|
- }
|
|
- flow = NULL;
|
|
-done:
|
|
- ;
|
|
- }
|
|
- if (!flow) {
|
|
- flow = &p->link;
|
|
- } else {
|
|
- if (flow->vcc)
|
|
- ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
|
|
- /*@@@ looks good ... but it's not supposed to work :-) */
|
|
-#ifdef CONFIG_NET_CLS_ACT
|
|
- switch (result) {
|
|
- case TC_ACT_QUEUED:
|
|
- case TC_ACT_STOLEN:
|
|
- case TC_ACT_TRAP:
|
|
- __qdisc_drop(skb, to_free);
|
|
- return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
|
|
- case TC_ACT_SHOT:
|
|
- __qdisc_drop(skb, to_free);
|
|
- goto drop;
|
|
- case TC_ACT_RECLASSIFY:
|
|
- if (flow->excess)
|
|
- flow = flow->excess;
|
|
- else
|
|
- ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
|
|
- break;
|
|
- }
|
|
-#endif
|
|
- }
|
|
-
|
|
- ret = qdisc_enqueue(skb, flow->q, to_free);
|
|
- if (ret != NET_XMIT_SUCCESS) {
|
|
-drop: __maybe_unused
|
|
- if (net_xmit_drop_count(ret)) {
|
|
- qdisc_qstats_drop(sch);
|
|
- if (flow)
|
|
- flow->qstats.drops++;
|
|
- }
|
|
- return ret;
|
|
- }
|
|
- /*
|
|
- * Okay, this may seem weird. We pretend we've dropped the packet if
|
|
- * it goes via ATM. The reason for this is that the outer qdisc
|
|
- * expects to be able to q->dequeue the packet later on if we return
|
|
- * success at this place. Also, sch->q.qdisc needs to reflect whether
|
|
- * there is a packet egligible for dequeuing or not. Note that the
|
|
- * statistics of the outer qdisc are necessarily wrong because of all
|
|
- * this. There's currently no correct solution for this.
|
|
- */
|
|
- if (flow == &p->link) {
|
|
- sch->q.qlen++;
|
|
- return NET_XMIT_SUCCESS;
|
|
- }
|
|
- tasklet_schedule(&p->task);
|
|
- return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
|
-}
|
|
-
|
|
-/*
|
|
- * Dequeue packets and send them over ATM. Note that we quite deliberately
|
|
- * avoid checking net_device's flow control here, simply because sch_atm
|
|
- * uses its own channels, which have nothing to do with any CLIP/LANE/or
|
|
- * non-ATM interfaces.
|
|
- */
|
|
-
|
|
-static void sch_atm_dequeue(struct tasklet_struct *t)
|
|
-{
|
|
- struct atm_qdisc_data *p = from_tasklet(p, t, task);
|
|
- struct Qdisc *sch = qdisc_from_priv(p);
|
|
- struct atm_flow_data *flow;
|
|
- struct sk_buff *skb;
|
|
-
|
|
- pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
|
|
- list_for_each_entry(flow, &p->flows, list) {
|
|
- if (flow == &p->link)
|
|
- continue;
|
|
- /*
|
|
- * If traffic is properly shaped, this won't generate nasty
|
|
- * little bursts. Otherwise, it may ... (but that's okay)
|
|
- */
|
|
- while ((skb = flow->q->ops->peek(flow->q))) {
|
|
- if (!atm_may_send(flow->vcc, skb->truesize))
|
|
- break;
|
|
-
|
|
- skb = qdisc_dequeue_peeked(flow->q);
|
|
- if (unlikely(!skb))
|
|
- break;
|
|
-
|
|
- qdisc_bstats_update(sch, skb);
|
|
- bstats_update(&flow->bstats, skb);
|
|
- pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
|
|
- /* remove any LL header somebody else has attached */
|
|
- skb_pull(skb, skb_network_offset(skb));
|
|
- if (skb_headroom(skb) < flow->hdr_len) {
|
|
- struct sk_buff *new;
|
|
-
|
|
- new = skb_realloc_headroom(skb, flow->hdr_len);
|
|
- dev_kfree_skb(skb);
|
|
- if (!new)
|
|
- continue;
|
|
- skb = new;
|
|
- }
|
|
- pr_debug("sch_atm_dequeue: ip %p, data %p\n",
|
|
- skb_network_header(skb), skb->data);
|
|
- ATM_SKB(skb)->vcc = flow->vcc;
|
|
- memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
|
|
- flow->hdr_len);
|
|
- refcount_add(skb->truesize,
|
|
- &sk_atm(flow->vcc)->sk_wmem_alloc);
|
|
- /* atm.atm_options are already set by atm_tc_enqueue */
|
|
- flow->vcc->send(flow->vcc, skb);
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct sk_buff *skb;
|
|
-
|
|
- pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
|
|
- tasklet_schedule(&p->task);
|
|
- skb = qdisc_dequeue_peeked(p->link.q);
|
|
- if (skb)
|
|
- sch->q.qlen--;
|
|
- return skb;
|
|
-}
|
|
-
|
|
-static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
-
|
|
- pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
|
|
-
|
|
- return p->link.q->ops->peek(p->link.q);
|
|
-}
|
|
-
|
|
-static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- int err;
|
|
-
|
|
- pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
|
|
- INIT_LIST_HEAD(&p->flows);
|
|
- INIT_LIST_HEAD(&p->link.list);
|
|
- gnet_stats_basic_sync_init(&p->link.bstats);
|
|
- list_add(&p->link.list, &p->flows);
|
|
- p->link.q = qdisc_create_dflt(sch->dev_queue,
|
|
- &pfifo_qdisc_ops, sch->handle, extack);
|
|
- if (!p->link.q)
|
|
- p->link.q = &noop_qdisc;
|
|
- pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
|
|
- p->link.vcc = NULL;
|
|
- p->link.sock = NULL;
|
|
- p->link.common.classid = sch->handle;
|
|
- p->link.ref = 1;
|
|
-
|
|
- err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
|
|
- extack);
|
|
- if (err)
|
|
- return err;
|
|
-
|
|
- tasklet_setup(&p->task, sch_atm_dequeue);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void atm_tc_reset(struct Qdisc *sch)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow;
|
|
-
|
|
- pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
|
|
- list_for_each_entry(flow, &p->flows, list)
|
|
- qdisc_reset(flow->q);
|
|
-}
|
|
-
|
|
-static void atm_tc_destroy(struct Qdisc *sch)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow, *tmp;
|
|
-
|
|
- pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
|
|
- list_for_each_entry(flow, &p->flows, list) {
|
|
- tcf_block_put(flow->block);
|
|
- flow->block = NULL;
|
|
- }
|
|
-
|
|
- list_for_each_entry_safe(flow, tmp, &p->flows, list) {
|
|
- if (flow->ref > 1)
|
|
- pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
|
|
- atm_tc_put(sch, (unsigned long)flow);
|
|
- }
|
|
- tasklet_kill(&p->task);
|
|
-}
|
|
-
|
|
-static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
|
|
- struct sk_buff *skb, struct tcmsg *tcm)
|
|
-{
|
|
- struct atm_qdisc_data *p = qdisc_priv(sch);
|
|
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
|
|
- struct nlattr *nest;
|
|
-
|
|
- pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
|
|
- sch, p, flow, skb, tcm);
|
|
- if (list_empty(&flow->list))
|
|
- return -EINVAL;
|
|
- tcm->tcm_handle = flow->common.classid;
|
|
- tcm->tcm_info = flow->q->handle;
|
|
-
|
|
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
|
|
- if (nest == NULL)
|
|
- goto nla_put_failure;
|
|
-
|
|
- if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
|
|
- goto nla_put_failure;
|
|
- if (flow->vcc) {
|
|
- struct sockaddr_atmpvc pvc;
|
|
- int state;
|
|
-
|
|
- memset(&pvc, 0, sizeof(pvc));
|
|
- pvc.sap_family = AF_ATMPVC;
|
|
- pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
|
|
- pvc.sap_addr.vpi = flow->vcc->vpi;
|
|
- pvc.sap_addr.vci = flow->vcc->vci;
|
|
- if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
|
|
- goto nla_put_failure;
|
|
- state = ATM_VF2VS(flow->vcc->flags);
|
|
- if (nla_put_u32(skb, TCA_ATM_STATE, state))
|
|
- goto nla_put_failure;
|
|
- }
|
|
- if (flow->excess) {
|
|
- if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid))
|
|
- goto nla_put_failure;
|
|
- } else {
|
|
- if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
|
|
- goto nla_put_failure;
|
|
- }
|
|
- return nla_nest_end(skb, nest);
|
|
-
|
|
-nla_put_failure:
|
|
- nla_nest_cancel(skb, nest);
|
|
- return -1;
|
|
-}
|
|
-static int
|
|
-atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
|
- struct gnet_dump *d)
|
|
-{
|
|
- struct atm_flow_data *flow = (struct atm_flow_data *)arg;
|
|
-
|
|
- if (gnet_stats_copy_basic(d, NULL, &flow->bstats, true) < 0 ||
|
|
- gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
|
|
- return -1;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|
-{
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static const struct Qdisc_class_ops atm_class_ops = {
|
|
- .graft = atm_tc_graft,
|
|
- .leaf = atm_tc_leaf,
|
|
- .find = atm_tc_find,
|
|
- .change = atm_tc_change,
|
|
- .delete = atm_tc_delete,
|
|
- .walk = atm_tc_walk,
|
|
- .tcf_block = atm_tc_tcf_block,
|
|
- .bind_tcf = atm_tc_bind_filter,
|
|
- .unbind_tcf = atm_tc_put,
|
|
- .dump = atm_tc_dump_class,
|
|
- .dump_stats = atm_tc_dump_class_stats,
|
|
-};
|
|
-
|
|
-static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
|
|
- .cl_ops = &atm_class_ops,
|
|
- .id = "atm",
|
|
- .priv_size = sizeof(struct atm_qdisc_data),
|
|
- .enqueue = atm_tc_enqueue,
|
|
- .dequeue = atm_tc_dequeue,
|
|
- .peek = atm_tc_peek,
|
|
- .init = atm_tc_init,
|
|
- .reset = atm_tc_reset,
|
|
- .destroy = atm_tc_destroy,
|
|
- .dump = atm_tc_dump,
|
|
- .owner = THIS_MODULE,
|
|
-};
|
|
-
|
|
-static int __init atm_init(void)
|
|
-{
|
|
- return register_qdisc(&atm_qdisc_ops);
|
|
-}
|
|
-
|
|
-static void __exit atm_exit(void)
|
|
-{
|
|
- unregister_qdisc(&atm_qdisc_ops);
|
|
-}
|
|
-
|
|
-module_init(atm_init)
|
|
-module_exit(atm_exit)
|
|
-MODULE_LICENSE("GPL");
|
|
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
|
|
deleted file mode 100644
|
|
index 36db5f6782f2c..0000000000000
|
|
--- a/net/sched/sch_cbq.c
|
|
+++ /dev/null
|
|
@@ -1,1727 +0,0 @@
|
|
-// SPDX-License-Identifier: GPL-2.0-or-later
|
|
-/*
|
|
- * net/sched/sch_cbq.c Class-Based Queueing discipline.
|
|
- *
|
|
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
|
|
- */
|
|
-
|
|
-#include <linux/module.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/types.h>
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/string.h>
|
|
-#include <linux/errno.h>
|
|
-#include <linux/skbuff.h>
|
|
-#include <net/netlink.h>
|
|
-#include <net/pkt_sched.h>
|
|
-#include <net/pkt_cls.h>
|
|
-
|
|
-
|
|
-/* Class-Based Queueing (CBQ) algorithm.
|
|
- =======================================
|
|
-
|
|
- Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
|
|
- Management Models for Packet Networks",
|
|
- IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
|
|
-
|
|
- [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
|
|
-
|
|
- [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
|
|
- Parameters", 1996
|
|
-
|
|
- [4] Sally Floyd and Michael Speer, "Experimental Results
|
|
- for Class-Based Queueing", 1998, not published.
|
|
-
|
|
- -----------------------------------------------------------------------
|
|
-
|
|
- Algorithm skeleton was taken from NS simulator cbq.cc.
|
|
- If someone wants to check this code against the LBL version,
|
|
- he should take into account that ONLY the skeleton was borrowed,
|
|
- the implementation is different. Particularly:
|
|
-
|
|
- --- The WRR algorithm is different. Our version looks more
|
|
- reasonable (I hope) and works when quanta are allowed to be
|
|
- less than MTU, which is always the case when real time classes
|
|
- have small rates. Note, that the statement of [3] is
|
|
- incomplete, delay may actually be estimated even if class
|
|
- per-round allotment is less than MTU. Namely, if per-round
|
|
- allotment is W*r_i, and r_1+...+r_k = r < 1
|
|
-
|
|
- delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
|
|
-
|
|
- In the worst case we have IntServ estimate with D = W*r+k*MTU
|
|
- and C = MTU*r. The proof (if correct at all) is trivial.
|
|
-
|
|
-
|
|
- --- It seems that cbq-2.0 is not very accurate. At least, I cannot
|
|
- interpret some places, which look like wrong translations
|
|
- from NS. Anyone is advised to find these differences
|
|
- and explain to me, why I am wrong 8).
|
|
-
|
|
- --- Linux has no EOI event, so that we cannot estimate true class
|
|
- idle time. Workaround is to consider the next dequeue event
|
|
- as sign that previous packet is finished. This is wrong because of
|
|
- internal device queueing, but on a permanently loaded link it is true.
|
|
- Moreover, combined with clock integrator, this scheme looks
|
|
- very close to an ideal solution. */
|
|
-
|
|
-struct cbq_sched_data;
|
|
-
|
|
-
|
|
-struct cbq_class {
|
|
- struct Qdisc_class_common common;
|
|
- struct cbq_class *next_alive; /* next class with backlog in this priority band */
|
|
-
|
|
-/* Parameters */
|
|
- unsigned char priority; /* class priority */
|
|
- unsigned char priority2; /* priority to be used after overlimit */
|
|
- unsigned char ewma_log; /* time constant for idle time calculation */
|
|
-
|
|
- u32 defmap;
|
|
-
|
|
- /* Link-sharing scheduler parameters */
|
|
- long maxidle; /* Class parameters: see below. */
|
|
- long offtime;
|
|
- long minidle;
|
|
- u32 avpkt;
|
|
- struct qdisc_rate_table *R_tab;
|
|
-
|
|
- /* General scheduler (WRR) parameters */
|
|
- long allot;
|
|
- long quantum; /* Allotment per WRR round */
|
|
- long weight; /* Relative allotment: see below */
|
|
-
|
|
- struct Qdisc *qdisc; /* Ptr to CBQ discipline */
|
|
- struct cbq_class *split; /* Ptr to split node */
|
|
- struct cbq_class *share; /* Ptr to LS parent in the class tree */
|
|
- struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
|
|
- struct cbq_class *borrow; /* NULL if class is bandwidth limited;
|
|
- parent otherwise */
|
|
- struct cbq_class *sibling; /* Sibling chain */
|
|
- struct cbq_class *children; /* Pointer to children chain */
|
|
-
|
|
- struct Qdisc *q; /* Elementary queueing discipline */
|
|
-
|
|
-
|
|
-/* Variables */
|
|
- unsigned char cpriority; /* Effective priority */
|
|
- unsigned char delayed;
|
|
- unsigned char level; /* level of the class in hierarchy:
|
|
- 0 for leaf classes, and maximal
|
|
- level of children + 1 for nodes.
|
|
- */
|
|
-
|
|
- psched_time_t last; /* Last end of service */
|
|
- psched_time_t undertime;
|
|
- long avgidle;
|
|
- long deficit; /* Saved deficit for WRR */
|
|
- psched_time_t penalized;
|
|
- struct gnet_stats_basic_sync bstats;
|
|
- struct gnet_stats_queue qstats;
|
|
- struct net_rate_estimator __rcu *rate_est;
|
|
- struct tc_cbq_xstats xstats;
|
|
-
|
|
- struct tcf_proto __rcu *filter_list;
|
|
- struct tcf_block *block;
|
|
-
|
|
- int filters;
|
|
-
|
|
- struct cbq_class *defaults[TC_PRIO_MAX + 1];
|
|
-};
|
|
-
|
|
-struct cbq_sched_data {
|
|
- struct Qdisc_class_hash clhash; /* Hash table of all classes */
|
|
- int nclasses[TC_CBQ_MAXPRIO + 1];
|
|
- unsigned int quanta[TC_CBQ_MAXPRIO + 1];
|
|
-
|
|
- struct cbq_class link;
|
|
-
|
|
- unsigned int activemask;
|
|
- struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
|
|
- with backlog */
|
|
-
|
|
-#ifdef CONFIG_NET_CLS_ACT
|
|
- struct cbq_class *rx_class;
|
|
-#endif
|
|
- struct cbq_class *tx_class;
|
|
- struct cbq_class *tx_borrowed;
|
|
- int tx_len;
|
|
- psched_time_t now; /* Cached timestamp */
|
|
- unsigned int pmask;
|
|
-
|
|
- struct qdisc_watchdog watchdog; /* Watchdog timer,
|
|
- started when CBQ has
|
|
- backlog, but cannot
|
|
- transmit just now */
|
|
- psched_tdiff_t wd_expires;
|
|
- int toplevel;
|
|
- u32 hgenerator;
|
|
-};
|
|
-
|
|
-
|
|
-#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
|
|
-
|
|
-static inline struct cbq_class *
|
|
-cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
|
|
-{
|
|
- struct Qdisc_class_common *clc;
|
|
-
|
|
- clc = qdisc_class_find(&q->clhash, classid);
|
|
- if (clc == NULL)
|
|
- return NULL;
|
|
- return container_of(clc, struct cbq_class, common);
|
|
-}
|
|
-
|
|
-#ifdef CONFIG_NET_CLS_ACT
|
|
-
|
|
-static struct cbq_class *
|
|
-cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
|
|
-{
|
|
- struct cbq_class *cl;
|
|
-
|
|
- for (cl = this->tparent; cl; cl = cl->tparent) {
|
|
- struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
|
|
-
|
|
- if (new != NULL && new != this)
|
|
- return new;
|
|
- }
|
|
- return NULL;
|
|
-}
|
|
-
|
|
-#endif
|
|
-
|
|
-/* Classify packet. The procedure is pretty complicated, but
|
|
- * it allows us to combine link sharing and priority scheduling
|
|
- * transparently.
|
|
- *
|
|
- * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
|
|
- * so that it resolves to split nodes. Then packets are classified
|
|
- * by logical priority, or a more specific classifier may be attached
|
|
- * to the split node.
|
|
- */
|
|
-
|
|
-static struct cbq_class *
|
|
-cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct cbq_class *head = &q->link;
|
|
- struct cbq_class **defmap;
|
|
- struct cbq_class *cl = NULL;
|
|
- u32 prio = skb->priority;
|
|
- struct tcf_proto *fl;
|
|
- struct tcf_result res;
|
|
-
|
|
- /*
|
|
- * Step 1. If skb->priority points to one of our classes, use it.
|
|
- */
|
|
- if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
|
|
- (cl = cbq_class_lookup(q, prio)) != NULL)
|
|
- return cl;
|
|
-
|
|
- *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
|
- for (;;) {
|
|
- int result = 0;
|
|
- defmap = head->defaults;
|
|
-
|
|
- fl = rcu_dereference_bh(head->filter_list);
|
|
- /*
|
|
- * Step 2+n. Apply classifier.
|
|
- */
|
|
- result = tcf_classify(skb, NULL, fl, &res, true);
|
|
- if (!fl || result < 0)
|
|
- goto fallback;
|
|
- if (result == TC_ACT_SHOT)
|
|
- return NULL;
|
|
-
|
|
- cl = (void *)res.class;
|
|
- if (!cl) {
|
|
- if (TC_H_MAJ(res.classid))
|
|
- cl = cbq_class_lookup(q, res.classid);
|
|
- else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
|
|
- cl = defmap[TC_PRIO_BESTEFFORT];
|
|
-
|
|
- if (cl == NULL)
|
|
- goto fallback;
|
|
- }
|
|
- if (cl->level >= head->level)
|
|
- goto fallback;
|
|
-#ifdef CONFIG_NET_CLS_ACT
|
|
- switch (result) {
|
|
- case TC_ACT_QUEUED:
|
|
- case TC_ACT_STOLEN:
|
|
- case TC_ACT_TRAP:
|
|
- *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
|
|
- fallthrough;
|
|
- case TC_ACT_RECLASSIFY:
|
|
- return cbq_reclassify(skb, cl);
|
|
- }
|
|
-#endif
|
|
- if (cl->level == 0)
|
|
- return cl;
|
|
-
|
|
- /*
|
|
- * Step 3+n. If classifier selected a link sharing class,
|
|
- * apply agency specific classifier.
|
|
- * Repeat this procedure until we hit a leaf node.
|
|
- */
|
|
- head = cl;
|
|
- }
|
|
-
|
|
-fallback:
|
|
- cl = head;
|
|
-
|
|
- /*
|
|
- * Step 4. No success...
|
|
- */
|
|
- if (TC_H_MAJ(prio) == 0 &&
|
|
- !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
|
|
- !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
|
|
- return head;
|
|
-
|
|
- return cl;
|
|
-}
|
|
-
|
|
-/*
|
|
- * A packet has just been enqueued on the empty class.
|
|
- * cbq_activate_class adds it to the tail of active class list
|
|
- * of its priority band.
|
|
- */
|
|
-
|
|
-static inline void cbq_activate_class(struct cbq_class *cl)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
|
|
- int prio = cl->cpriority;
|
|
- struct cbq_class *cl_tail;
|
|
-
|
|
- cl_tail = q->active[prio];
|
|
- q->active[prio] = cl;
|
|
-
|
|
- if (cl_tail != NULL) {
|
|
- cl->next_alive = cl_tail->next_alive;
|
|
- cl_tail->next_alive = cl;
|
|
- } else {
|
|
- cl->next_alive = cl;
|
|
- q->activemask |= (1<<prio);
|
|
- }
|
|
-}
|
|
-
|
|
-/*
|
|
- * Unlink class from active chain.
|
|
- * Note that this same procedure is done directly in cbq_dequeue*
|
|
- * during round-robin procedure.
|
|
- */
|
|
-
|
|
-static void cbq_deactivate_class(struct cbq_class *this)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
|
|
- int prio = this->cpriority;
|
|
- struct cbq_class *cl;
|
|
- struct cbq_class *cl_prev = q->active[prio];
|
|
-
|
|
- do {
|
|
- cl = cl_prev->next_alive;
|
|
- if (cl == this) {
|
|
- cl_prev->next_alive = cl->next_alive;
|
|
- cl->next_alive = NULL;
|
|
-
|
|
- if (cl == q->active[prio]) {
|
|
- q->active[prio] = cl_prev;
|
|
- if (cl == q->active[prio]) {
|
|
- q->active[prio] = NULL;
|
|
- q->activemask &= ~(1<<prio);
|
|
- return;
|
|
- }
|
|
- }
|
|
- return;
|
|
- }
|
|
- } while ((cl_prev = cl) != q->active[prio]);
|
|
-}
|
|
-
|
|
-static void
|
|
-cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
|
|
-{
|
|
- int toplevel = q->toplevel;
|
|
-
|
|
- if (toplevel > cl->level) {
|
|
- psched_time_t now = psched_get_time();
|
|
-
|
|
- do {
|
|
- if (cl->undertime < now) {
|
|
- q->toplevel = cl->level;
|
|
- return;
|
|
- }
|
|
- } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
|
|
- }
|
|
-}
|
|
-
|
|
-static int
|
|
-cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|
- struct sk_buff **to_free)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- int ret;
|
|
- struct cbq_class *cl = cbq_classify(skb, sch, &ret);
|
|
-
|
|
-#ifdef CONFIG_NET_CLS_ACT
|
|
- q->rx_class = cl;
|
|
-#endif
|
|
- if (cl == NULL) {
|
|
- if (ret & __NET_XMIT_BYPASS)
|
|
- qdisc_qstats_drop(sch);
|
|
- __qdisc_drop(skb, to_free);
|
|
- return ret;
|
|
- }
|
|
-
|
|
- ret = qdisc_enqueue(skb, cl->q, to_free);
|
|
- if (ret == NET_XMIT_SUCCESS) {
|
|
- sch->q.qlen++;
|
|
- cbq_mark_toplevel(q, cl);
|
|
- if (!cl->next_alive)
|
|
- cbq_activate_class(cl);
|
|
- return ret;
|
|
- }
|
|
-
|
|
- if (net_xmit_drop_count(ret)) {
|
|
- qdisc_qstats_drop(sch);
|
|
- cbq_mark_toplevel(q, cl);
|
|
- cl->qstats.drops++;
|
|
- }
|
|
- return ret;
|
|
-}
|
|
-
|
|
-/* Overlimit action: penalize leaf class by adding offtime */
|
|
-static void cbq_overlimit(struct cbq_class *cl)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
|
|
- psched_tdiff_t delay = cl->undertime - q->now;
|
|
-
|
|
- if (!cl->delayed) {
|
|
- delay += cl->offtime;
|
|
-
|
|
- /*
|
|
- * Class goes to sleep, so that it will have no
|
|
- * chance to work avgidle. Let's forgive it 8)
|
|
- *
|
|
- * BTW cbq-2.0 has a crap in this
|
|
- * place, apparently they forgot to shift it by cl->ewma_log.
|
|
- */
|
|
- if (cl->avgidle < 0)
|
|
- delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
|
|
- if (cl->avgidle < cl->minidle)
|
|
- cl->avgidle = cl->minidle;
|
|
- if (delay <= 0)
|
|
- delay = 1;
|
|
- cl->undertime = q->now + delay;
|
|
-
|
|
- cl->xstats.overactions++;
|
|
- cl->delayed = 1;
|
|
- }
|
|
- if (q->wd_expires == 0 || q->wd_expires > delay)
|
|
- q->wd_expires = delay;
|
|
-
|
|
- /* Dirty work! We must schedule wakeups based on
|
|
- * real available rate, rather than leaf rate,
|
|
- * which may be tiny (even zero).
|
|
- */
|
|
- if (q->toplevel == TC_CBQ_MAXLEVEL) {
|
|
- struct cbq_class *b;
|
|
- psched_tdiff_t base_delay = q->wd_expires;
|
|
-
|
|
- for (b = cl->borrow; b; b = b->borrow) {
|
|
- delay = b->undertime - q->now;
|
|
- if (delay < base_delay) {
|
|
- if (delay <= 0)
|
|
- delay = 1;
|
|
- base_delay = delay;
|
|
- }
|
|
- }
|
|
-
|
|
- q->wd_expires = base_delay;
|
|
- }
|
|
-}
|
|
-
|
|
-/*
|
|
- * It is mission critical procedure.
|
|
- *
|
|
- * We "regenerate" toplevel cutoff, if transmitting class
|
|
- * has backlog and it is not regulated. It is not part of
|
|
- * original CBQ description, but looks more reasonable.
|
|
- * Probably, it is wrong. This question needs further investigation.
|
|
- */
|
|
-
|
|
-static inline void
|
|
-cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
|
|
- struct cbq_class *borrowed)
|
|
-{
|
|
- if (cl && q->toplevel >= borrowed->level) {
|
|
- if (cl->q->q.qlen > 1) {
|
|
- do {
|
|
- if (borrowed->undertime == PSCHED_PASTPERFECT) {
|
|
- q->toplevel = borrowed->level;
|
|
- return;
|
|
- }
|
|
- } while ((borrowed = borrowed->borrow) != NULL);
|
|
- }
|
|
-#if 0
|
|
- /* It is not necessary now. Uncommenting it
|
|
- will save CPU cycles, but decrease fairness.
|
|
- */
|
|
- q->toplevel = TC_CBQ_MAXLEVEL;
|
|
-#endif
|
|
- }
|
|
-}
|
|
-
|
|
-static void
|
|
-cbq_update(struct cbq_sched_data *q)
|
|
-{
|
|
- struct cbq_class *this = q->tx_class;
|
|
- struct cbq_class *cl = this;
|
|
- int len = q->tx_len;
|
|
- psched_time_t now;
|
|
-
|
|
- q->tx_class = NULL;
|
|
- /* Time integrator. We calculate EOS time
|
|
- * by adding expected packet transmission time.
|
|
- */
|
|
- now = q->now + L2T(&q->link, len);
|
|
-
|
|
- for ( ; cl; cl = cl->share) {
|
|
- long avgidle = cl->avgidle;
|
|
- long idle;
|
|
-
|
|
- _bstats_update(&cl->bstats, len, 1);
|
|
-
|
|
- /*
|
|
- * (now - last) is total time between packet right edges.
|
|
- * (last_pktlen/rate) is "virtual" busy time, so that
|
|
- *
|
|
- * idle = (now - last) - last_pktlen/rate
|
|
- */
|
|
-
|
|
- idle = now - cl->last;
|
|
- if ((unsigned long)idle > 128*1024*1024) {
|
|
- avgidle = cl->maxidle;
|
|
- } else {
|
|
- idle -= L2T(cl, len);
|
|
-
|
|
- /* true_avgidle := (1-W)*true_avgidle + W*idle,
|
|
- * where W=2^{-ewma_log}. But cl->avgidle is scaled:
|
|
- * cl->avgidle == true_avgidle/W,
|
|
- * hence:
|
|
- */
|
|
- avgidle += idle - (avgidle>>cl->ewma_log);
|
|
- }
|
|
-
|
|
- if (avgidle <= 0) {
|
|
- /* Overlimit or at-limit */
|
|
-
|
|
- if (avgidle < cl->minidle)
|
|
- avgidle = cl->minidle;
|
|
-
|
|
- cl->avgidle = avgidle;
|
|
-
|
|
- /* Calculate expected time, when this class
|
|
- * will be allowed to send.
|
|
- * It will occur, when:
|
|
- * (1-W)*true_avgidle + W*delay = 0, i.e.
|
|
- * idle = (1/W - 1)*(-true_avgidle)
|
|
- * or
|
|
- * idle = (1 - W)*(-cl->avgidle);
|
|
- */
|
|
- idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
|
|
-
|
|
- /*
|
|
- * That is not all.
|
|
- * To maintain the rate allocated to the class,
|
|
- * we add to undertime virtual clock,
|
|
- * necessary to complete transmitted packet.
|
|
- * (len/phys_bandwidth has been already passed
|
|
- * to the moment of cbq_update)
|
|
- */
|
|
-
|
|
- idle -= L2T(&q->link, len);
|
|
- idle += L2T(cl, len);
|
|
-
|
|
- cl->undertime = now + idle;
|
|
- } else {
|
|
- /* Underlimit */
|
|
-
|
|
- cl->undertime = PSCHED_PASTPERFECT;
|
|
- if (avgidle > cl->maxidle)
|
|
- cl->avgidle = cl->maxidle;
|
|
- else
|
|
- cl->avgidle = avgidle;
|
|
- }
|
|
- if ((s64)(now - cl->last) > 0)
|
|
- cl->last = now;
|
|
- }
|
|
-
|
|
- cbq_update_toplevel(q, this, q->tx_borrowed);
|
|
-}
|
|
-
|
|
-static inline struct cbq_class *
|
|
-cbq_under_limit(struct cbq_class *cl)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
|
|
- struct cbq_class *this_cl = cl;
|
|
-
|
|
- if (cl->tparent == NULL)
|
|
- return cl;
|
|
-
|
|
- if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
|
|
- cl->delayed = 0;
|
|
- return cl;
|
|
- }
|
|
-
|
|
- do {
|
|
- /* It is very suspicious place. Now overlimit
|
|
- * action is generated for not bounded classes
|
|
- * only if link is completely congested.
|
|
- * Though it is in agree with ancestor-only paradigm,
|
|
- * it looks very stupid. Particularly,
|
|
- * it means that this chunk of code will either
|
|
- * never be called or result in strong amplification
|
|
- * of burstiness. Dangerous, silly, and, however,
|
|
- * no another solution exists.
|
|
- */
|
|
- cl = cl->borrow;
|
|
- if (!cl) {
|
|
- this_cl->qstats.overlimits++;
|
|
- cbq_overlimit(this_cl);
|
|
- return NULL;
|
|
- }
|
|
- if (cl->level > q->toplevel)
|
|
- return NULL;
|
|
- } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
|
|
-
|
|
- cl->delayed = 0;
|
|
- return cl;
|
|
-}
|
|
-
|
|
-static inline struct sk_buff *
|
|
-cbq_dequeue_prio(struct Qdisc *sch, int prio)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct cbq_class *cl_tail, *cl_prev, *cl;
|
|
- struct sk_buff *skb;
|
|
- int deficit;
|
|
-
|
|
- cl_tail = cl_prev = q->active[prio];
|
|
- cl = cl_prev->next_alive;
|
|
-
|
|
- do {
|
|
- deficit = 0;
|
|
-
|
|
- /* Start round */
|
|
- do {
|
|
- struct cbq_class *borrow = cl;
|
|
-
|
|
- if (cl->q->q.qlen &&
|
|
- (borrow = cbq_under_limit(cl)) == NULL)
|
|
- goto skip_class;
|
|
-
|
|
- if (cl->deficit <= 0) {
|
|
- /* Class exhausted its allotment per
|
|
- * this round. Switch to the next one.
|
|
- */
|
|
- deficit = 1;
|
|
- cl->deficit += cl->quantum;
|
|
- goto next_class;
|
|
- }
|
|
-
|
|
- skb = cl->q->dequeue(cl->q);
|
|
-
|
|
- /* Class did not give us any skb :-(
|
|
- * It could occur even if cl->q->q.qlen != 0
|
|
- * f.e. if cl->q == "tbf"
|
|
- */
|
|
- if (skb == NULL)
|
|
- goto skip_class;
|
|
-
|
|
- cl->deficit -= qdisc_pkt_len(skb);
|
|
- q->tx_class = cl;
|
|
- q->tx_borrowed = borrow;
|
|
- if (borrow != cl) {
|
|
-#ifndef CBQ_XSTATS_BORROWS_BYTES
|
|
- borrow->xstats.borrows++;
|
|
- cl->xstats.borrows++;
|
|
-#else
|
|
- borrow->xstats.borrows += qdisc_pkt_len(skb);
|
|
- cl->xstats.borrows += qdisc_pkt_len(skb);
|
|
-#endif
|
|
- }
|
|
- q->tx_len = qdisc_pkt_len(skb);
|
|
-
|
|
- if (cl->deficit <= 0) {
|
|
- q->active[prio] = cl;
|
|
- cl = cl->next_alive;
|
|
- cl->deficit += cl->quantum;
|
|
- }
|
|
- return skb;
|
|
-
|
|
-skip_class:
|
|
- if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
|
|
- /* Class is empty or penalized.
|
|
- * Unlink it from active chain.
|
|
- */
|
|
- cl_prev->next_alive = cl->next_alive;
|
|
- cl->next_alive = NULL;
|
|
-
|
|
- /* Did cl_tail point to it? */
|
|
- if (cl == cl_tail) {
|
|
- /* Repair it! */
|
|
- cl_tail = cl_prev;
|
|
-
|
|
- /* Was it the last class in this band? */
|
|
- if (cl == cl_tail) {
|
|
- /* Kill the band! */
|
|
- q->active[prio] = NULL;
|
|
- q->activemask &= ~(1<<prio);
|
|
- if (cl->q->q.qlen)
|
|
- cbq_activate_class(cl);
|
|
- return NULL;
|
|
- }
|
|
-
|
|
- q->active[prio] = cl_tail;
|
|
- }
|
|
- if (cl->q->q.qlen)
|
|
- cbq_activate_class(cl);
|
|
-
|
|
- cl = cl_prev;
|
|
- }
|
|
-
|
|
-next_class:
|
|
- cl_prev = cl;
|
|
- cl = cl->next_alive;
|
|
- } while (cl_prev != cl_tail);
|
|
- } while (deficit);
|
|
-
|
|
- q->active[prio] = cl_prev;
|
|
-
|
|
- return NULL;
|
|
-}
|
|
-
|
|
-static inline struct sk_buff *
|
|
-cbq_dequeue_1(struct Qdisc *sch)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct sk_buff *skb;
|
|
- unsigned int activemask;
|
|
-
|
|
- activemask = q->activemask & 0xFF;
|
|
- while (activemask) {
|
|
- int prio = ffz(~activemask);
|
|
- activemask &= ~(1<<prio);
|
|
- skb = cbq_dequeue_prio(sch, prio);
|
|
- if (skb)
|
|
- return skb;
|
|
- }
|
|
- return NULL;
|
|
-}
|
|
-
|
|
-static struct sk_buff *
|
|
-cbq_dequeue(struct Qdisc *sch)
|
|
-{
|
|
- struct sk_buff *skb;
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- psched_time_t now;
|
|
-
|
|
- now = psched_get_time();
|
|
-
|
|
- if (q->tx_class)
|
|
- cbq_update(q);
|
|
-
|
|
- q->now = now;
|
|
-
|
|
- for (;;) {
|
|
- q->wd_expires = 0;
|
|
-
|
|
- skb = cbq_dequeue_1(sch);
|
|
- if (skb) {
|
|
- qdisc_bstats_update(sch, skb);
|
|
- sch->q.qlen--;
|
|
- return skb;
|
|
- }
|
|
-
|
|
- /* All the classes are overlimit.
|
|
- *
|
|
- * It is possible, if:
|
|
- *
|
|
- * 1. Scheduler is empty.
|
|
- * 2. Toplevel cutoff inhibited borrowing.
|
|
- * 3. Root class is overlimit.
|
|
- *
|
|
- * Reset 2d and 3d conditions and retry.
|
|
- *
|
|
- * Note, that NS and cbq-2.0 are buggy, peeking
|
|
- * an arbitrary class is appropriate for ancestor-only
|
|
- * sharing, but not for toplevel algorithm.
|
|
- *
|
|
- * Our version is better, but slower, because it requires
|
|
- * two passes, but it is unavoidable with top-level sharing.
|
|
- */
|
|
-
|
|
- if (q->toplevel == TC_CBQ_MAXLEVEL &&
|
|
- q->link.undertime == PSCHED_PASTPERFECT)
|
|
- break;
|
|
-
|
|
- q->toplevel = TC_CBQ_MAXLEVEL;
|
|
- q->link.undertime = PSCHED_PASTPERFECT;
|
|
- }
|
|
-
|
|
- /* No packets in scheduler or nobody wants to give them to us :-(
|
|
- * Sigh... start watchdog timer in the last case.
|
|
- */
|
|
-
|
|
- if (sch->q.qlen) {
|
|
- qdisc_qstats_overlimit(sch);
|
|
- if (q->wd_expires)
|
|
- qdisc_watchdog_schedule(&q->watchdog,
|
|
- now + q->wd_expires);
|
|
- }
|
|
- return NULL;
|
|
-}
|
|
-
|
|
-/* CBQ class maintenance routines */
|
|
-
|
|
-static void cbq_adjust_levels(struct cbq_class *this)
|
|
-{
|
|
- if (this == NULL)
|
|
- return;
|
|
-
|
|
- do {
|
|
- int level = 0;
|
|
- struct cbq_class *cl;
|
|
-
|
|
- cl = this->children;
|
|
- if (cl) {
|
|
- do {
|
|
- if (cl->level > level)
|
|
- level = cl->level;
|
|
- } while ((cl = cl->sibling) != this->children);
|
|
- }
|
|
- this->level = level + 1;
|
|
- } while ((this = this->tparent) != NULL);
|
|
-}
|
|
-
|
|
-static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
|
|
-{
|
|
- struct cbq_class *cl;
|
|
- unsigned int h;
|
|
-
|
|
- if (q->quanta[prio] == 0)
|
|
- return;
|
|
-
|
|
- for (h = 0; h < q->clhash.hashsize; h++) {
|
|
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
|
|
- /* BUGGGG... Beware! This expression suffer of
|
|
- * arithmetic overflows!
|
|
- */
|
|
- if (cl->priority == prio) {
|
|
- cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
|
|
- q->quanta[prio];
|
|
- }
|
|
- if (cl->quantum <= 0 ||
|
|
- cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
|
|
- pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
|
|
- cl->common.classid, cl->quantum);
|
|
- cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
|
|
- }
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-static void cbq_sync_defmap(struct cbq_class *cl)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
|
|
- struct cbq_class *split = cl->split;
|
|
- unsigned int h;
|
|
- int i;
|
|
-
|
|
- if (split == NULL)
|
|
- return;
|
|
-
|
|
- for (i = 0; i <= TC_PRIO_MAX; i++) {
|
|
- if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
|
|
- split->defaults[i] = NULL;
|
|
- }
|
|
-
|
|
- for (i = 0; i <= TC_PRIO_MAX; i++) {
|
|
- int level = split->level;
|
|
-
|
|
- if (split->defaults[i])
|
|
- continue;
|
|
-
|
|
- for (h = 0; h < q->clhash.hashsize; h++) {
|
|
- struct cbq_class *c;
|
|
-
|
|
- hlist_for_each_entry(c, &q->clhash.hash[h],
|
|
- common.hnode) {
|
|
- if (c->split == split && c->level < level &&
|
|
- c->defmap & (1<<i)) {
|
|
- split->defaults[i] = c;
|
|
- level = c->level;
|
|
- }
|
|
- }
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
|
|
-{
|
|
- struct cbq_class *split = NULL;
|
|
-
|
|
- if (splitid == 0) {
|
|
- split = cl->split;
|
|
- if (!split)
|
|
- return;
|
|
- splitid = split->common.classid;
|
|
- }
|
|
-
|
|
- if (split == NULL || split->common.classid != splitid) {
|
|
- for (split = cl->tparent; split; split = split->tparent)
|
|
- if (split->common.classid == splitid)
|
|
- break;
|
|
- }
|
|
-
|
|
- if (split == NULL)
|
|
- return;
|
|
-
|
|
- if (cl->split != split) {
|
|
- cl->defmap = 0;
|
|
- cbq_sync_defmap(cl);
|
|
- cl->split = split;
|
|
- cl->defmap = def & mask;
|
|
- } else
|
|
- cl->defmap = (cl->defmap & ~mask) | (def & mask);
|
|
-
|
|
- cbq_sync_defmap(cl);
|
|
-}
|
|
-
|
|
-static void cbq_unlink_class(struct cbq_class *this)
|
|
-{
|
|
- struct cbq_class *cl, **clp;
|
|
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
|
|
-
|
|
- qdisc_class_hash_remove(&q->clhash, &this->common);
|
|
-
|
|
- if (this->tparent) {
|
|
- clp = &this->sibling;
|
|
- cl = *clp;
|
|
- do {
|
|
- if (cl == this) {
|
|
- *clp = cl->sibling;
|
|
- break;
|
|
- }
|
|
- clp = &cl->sibling;
|
|
- } while ((cl = *clp) != this->sibling);
|
|
-
|
|
- if (this->tparent->children == this) {
|
|
- this->tparent->children = this->sibling;
|
|
- if (this->sibling == this)
|
|
- this->tparent->children = NULL;
|
|
- }
|
|
- } else {
|
|
- WARN_ON(this->sibling != this);
|
|
- }
|
|
-}
|
|
-
|
|
-static void cbq_link_class(struct cbq_class *this)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
|
|
- struct cbq_class *parent = this->tparent;
|
|
-
|
|
- this->sibling = this;
|
|
- qdisc_class_hash_insert(&q->clhash, &this->common);
|
|
-
|
|
- if (parent == NULL)
|
|
- return;
|
|
-
|
|
- if (parent->children == NULL) {
|
|
- parent->children = this;
|
|
- } else {
|
|
- this->sibling = parent->children->sibling;
|
|
- parent->children->sibling = this;
|
|
- }
|
|
-}
|
|
-
|
|
-static void
|
|
-cbq_reset(struct Qdisc *sch)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct cbq_class *cl;
|
|
- int prio;
|
|
- unsigned int h;
|
|
-
|
|
- q->activemask = 0;
|
|
- q->pmask = 0;
|
|
- q->tx_class = NULL;
|
|
- q->tx_borrowed = NULL;
|
|
- qdisc_watchdog_cancel(&q->watchdog);
|
|
- q->toplevel = TC_CBQ_MAXLEVEL;
|
|
- q->now = psched_get_time();
|
|
-
|
|
- for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
|
|
- q->active[prio] = NULL;
|
|
-
|
|
- for (h = 0; h < q->clhash.hashsize; h++) {
|
|
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
|
|
- qdisc_reset(cl->q);
|
|
-
|
|
- cl->next_alive = NULL;
|
|
- cl->undertime = PSCHED_PASTPERFECT;
|
|
- cl->avgidle = cl->maxidle;
|
|
- cl->deficit = cl->quantum;
|
|
- cl->cpriority = cl->priority;
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-
|
|
-static void cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
|
|
-{
|
|
- if (lss->change & TCF_CBQ_LSS_FLAGS) {
|
|
- cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
|
|
- cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
|
|
- }
|
|
- if (lss->change & TCF_CBQ_LSS_EWMA)
|
|
- cl->ewma_log = lss->ewma_log;
|
|
- if (lss->change & TCF_CBQ_LSS_AVPKT)
|
|
- cl->avpkt = lss->avpkt;
|
|
- if (lss->change & TCF_CBQ_LSS_MINIDLE)
|
|
- cl->minidle = -(long)lss->minidle;
|
|
- if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
|
|
- cl->maxidle = lss->maxidle;
|
|
- cl->avgidle = lss->maxidle;
|
|
- }
|
|
- if (lss->change & TCF_CBQ_LSS_OFFTIME)
|
|
- cl->offtime = lss->offtime;
|
|
-}
|
|
-
|
|
-static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
|
|
-{
|
|
- q->nclasses[cl->priority]--;
|
|
- q->quanta[cl->priority] -= cl->weight;
|
|
- cbq_normalize_quanta(q, cl->priority);
|
|
-}
|
|
-
|
|
-static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
|
|
-{
|
|
- q->nclasses[cl->priority]++;
|
|
- q->quanta[cl->priority] += cl->weight;
|
|
- cbq_normalize_quanta(q, cl->priority);
|
|
-}
|
|
-
|
|
-static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
|
|
-
|
|
- if (wrr->allot)
|
|
- cl->allot = wrr->allot;
|
|
- if (wrr->weight)
|
|
- cl->weight = wrr->weight;
|
|
- if (wrr->priority) {
|
|
- cl->priority = wrr->priority - 1;
|
|
- cl->cpriority = cl->priority;
|
|
- if (cl->priority >= cl->priority2)
|
|
- cl->priority2 = TC_CBQ_MAXPRIO - 1;
|
|
- }
|
|
-
|
|
- cbq_addprio(q, cl);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
|
|
-{
|
|
- cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
|
|
- [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
|
|
- [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
|
|
- [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
|
|
- [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
|
|
- [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
|
|
- [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
|
|
- [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
|
|
-};
|
|
-
|
|
-static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
|
|
- struct nlattr *opt,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- int err;
|
|
-
|
|
- if (!opt) {
|
|
- NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
|
|
- cbq_policy, extack);
|
|
- if (err < 0)
|
|
- return err;
|
|
-
|
|
- if (tb[TCA_CBQ_WRROPT]) {
|
|
- const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
|
|
-
|
|
- if (wrr->priority > TC_CBQ_MAXPRIO) {
|
|
- NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
|
|
- err = -EINVAL;
|
|
- }
|
|
- }
|
|
- return err;
|
|
-}
|
|
-
|
|
-static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct nlattr *tb[TCA_CBQ_MAX + 1];
|
|
- struct tc_ratespec *r;
|
|
- int err;
|
|
-
|
|
- qdisc_watchdog_init(&q->watchdog, sch);
|
|
-
|
|
- err = cbq_opt_parse(tb, opt, extack);
|
|
- if (err < 0)
|
|
- return err;
|
|
-
|
|
- if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
|
|
- NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- r = nla_data(tb[TCA_CBQ_RATE]);
|
|
-
|
|
- q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
|
|
- if (!q->link.R_tab)
|
|
- return -EINVAL;
|
|
-
|
|
- err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
|
|
- if (err)
|
|
- goto put_rtab;
|
|
-
|
|
- err = qdisc_class_hash_init(&q->clhash);
|
|
- if (err < 0)
|
|
- goto put_block;
|
|
-
|
|
- q->link.sibling = &q->link;
|
|
- q->link.common.classid = sch->handle;
|
|
- q->link.qdisc = sch;
|
|
- q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
|
- sch->handle, NULL);
|
|
- if (!q->link.q)
|
|
- q->link.q = &noop_qdisc;
|
|
- else
|
|
- qdisc_hash_add(q->link.q, true);
|
|
-
|
|
- q->link.priority = TC_CBQ_MAXPRIO - 1;
|
|
- q->link.priority2 = TC_CBQ_MAXPRIO - 1;
|
|
- q->link.cpriority = TC_CBQ_MAXPRIO - 1;
|
|
- q->link.allot = psched_mtu(qdisc_dev(sch));
|
|
- q->link.quantum = q->link.allot;
|
|
- q->link.weight = q->link.R_tab->rate.rate;
|
|
-
|
|
- q->link.ewma_log = TC_CBQ_DEF_EWMA;
|
|
- q->link.avpkt = q->link.allot/2;
|
|
- q->link.minidle = -0x7FFFFFFF;
|
|
-
|
|
- q->toplevel = TC_CBQ_MAXLEVEL;
|
|
- q->now = psched_get_time();
|
|
-
|
|
- cbq_link_class(&q->link);
|
|
-
|
|
- if (tb[TCA_CBQ_LSSOPT])
|
|
- cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
|
|
-
|
|
- cbq_addprio(q, &q->link);
|
|
- return 0;
|
|
-
|
|
-put_block:
|
|
- tcf_block_put(q->link.block);
|
|
-
|
|
-put_rtab:
|
|
- qdisc_put_rtab(q->link.R_tab);
|
|
- return err;
|
|
-}
|
|
-
|
|
-static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
|
|
-{
|
|
- unsigned char *b = skb_tail_pointer(skb);
|
|
-
|
|
- if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
|
|
- goto nla_put_failure;
|
|
- return skb->len;
|
|
-
|
|
-nla_put_failure:
|
|
- nlmsg_trim(skb, b);
|
|
- return -1;
|
|
-}
|
|
-
|
|
-static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
|
|
-{
|
|
- unsigned char *b = skb_tail_pointer(skb);
|
|
- struct tc_cbq_lssopt opt;
|
|
-
|
|
- opt.flags = 0;
|
|
- if (cl->borrow == NULL)
|
|
- opt.flags |= TCF_CBQ_LSS_BOUNDED;
|
|
- if (cl->share == NULL)
|
|
- opt.flags |= TCF_CBQ_LSS_ISOLATED;
|
|
- opt.ewma_log = cl->ewma_log;
|
|
- opt.level = cl->level;
|
|
- opt.avpkt = cl->avpkt;
|
|
- opt.maxidle = cl->maxidle;
|
|
- opt.minidle = (u32)(-cl->minidle);
|
|
- opt.offtime = cl->offtime;
|
|
- opt.change = ~0;
|
|
- if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
|
|
- goto nla_put_failure;
|
|
- return skb->len;
|
|
-
|
|
-nla_put_failure:
|
|
- nlmsg_trim(skb, b);
|
|
- return -1;
|
|
-}
|
|
-
|
|
-static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
|
|
-{
|
|
- unsigned char *b = skb_tail_pointer(skb);
|
|
- struct tc_cbq_wrropt opt;
|
|
-
|
|
- memset(&opt, 0, sizeof(opt));
|
|
- opt.flags = 0;
|
|
- opt.allot = cl->allot;
|
|
- opt.priority = cl->priority + 1;
|
|
- opt.cpriority = cl->cpriority + 1;
|
|
- opt.weight = cl->weight;
|
|
- if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
|
|
- goto nla_put_failure;
|
|
- return skb->len;
|
|
-
|
|
-nla_put_failure:
|
|
- nlmsg_trim(skb, b);
|
|
- return -1;
|
|
-}
|
|
-
|
|
-static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
|
|
-{
|
|
- unsigned char *b = skb_tail_pointer(skb);
|
|
- struct tc_cbq_fopt opt;
|
|
-
|
|
- if (cl->split || cl->defmap) {
|
|
- opt.split = cl->split ? cl->split->common.classid : 0;
|
|
- opt.defmap = cl->defmap;
|
|
- opt.defchange = ~0;
|
|
- if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
|
|
- goto nla_put_failure;
|
|
- }
|
|
- return skb->len;
|
|
-
|
|
-nla_put_failure:
|
|
- nlmsg_trim(skb, b);
|
|
- return -1;
|
|
-}
|
|
-
|
|
-static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
|
|
-{
|
|
- if (cbq_dump_lss(skb, cl) < 0 ||
|
|
- cbq_dump_rate(skb, cl) < 0 ||
|
|
- cbq_dump_wrr(skb, cl) < 0 ||
|
|
- cbq_dump_fopt(skb, cl) < 0)
|
|
- return -1;
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct nlattr *nest;
|
|
-
|
|
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
|
|
- if (nest == NULL)
|
|
- goto nla_put_failure;
|
|
- if (cbq_dump_attr(skb, &q->link) < 0)
|
|
- goto nla_put_failure;
|
|
- return nla_nest_end(skb, nest);
|
|
-
|
|
-nla_put_failure:
|
|
- nla_nest_cancel(skb, nest);
|
|
- return -1;
|
|
-}
|
|
-
|
|
-static int
|
|
-cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
-
|
|
- q->link.xstats.avgidle = q->link.avgidle;
|
|
- return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
|
|
-}
|
|
-
|
|
-static int
|
|
-cbq_dump_class(struct Qdisc *sch, unsigned long arg,
|
|
- struct sk_buff *skb, struct tcmsg *tcm)
|
|
-{
|
|
- struct cbq_class *cl = (struct cbq_class *)arg;
|
|
- struct nlattr *nest;
|
|
-
|
|
- if (cl->tparent)
|
|
- tcm->tcm_parent = cl->tparent->common.classid;
|
|
- else
|
|
- tcm->tcm_parent = TC_H_ROOT;
|
|
- tcm->tcm_handle = cl->common.classid;
|
|
- tcm->tcm_info = cl->q->handle;
|
|
-
|
|
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
|
|
- if (nest == NULL)
|
|
- goto nla_put_failure;
|
|
- if (cbq_dump_attr(skb, cl) < 0)
|
|
- goto nla_put_failure;
|
|
- return nla_nest_end(skb, nest);
|
|
-
|
|
-nla_put_failure:
|
|
- nla_nest_cancel(skb, nest);
|
|
- return -1;
|
|
-}
|
|
-
|
|
-static int
|
|
-cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
|
- struct gnet_dump *d)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct cbq_class *cl = (struct cbq_class *)arg;
|
|
- __u32 qlen;
|
|
-
|
|
- cl->xstats.avgidle = cl->avgidle;
|
|
- cl->xstats.undertime = 0;
|
|
- qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
|
|
-
|
|
- if (cl->undertime != PSCHED_PASTPERFECT)
|
|
- cl->xstats.undertime = cl->undertime - q->now;
|
|
-
|
|
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
|
|
- gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
|
|
- gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
|
|
- return -1;
|
|
-
|
|
- return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
|
|
-}
|
|
-
|
|
-static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
|
|
- struct Qdisc **old, struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct cbq_class *cl = (struct cbq_class *)arg;
|
|
-
|
|
- if (new == NULL) {
|
|
- new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
|
- cl->common.classid, extack);
|
|
- if (new == NULL)
|
|
- return -ENOBUFS;
|
|
- }
|
|
-
|
|
- *old = qdisc_replace(sch, new, &cl->q);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
|
|
-{
|
|
- struct cbq_class *cl = (struct cbq_class *)arg;
|
|
-
|
|
- return cl->q;
|
|
-}
|
|
-
|
|
-static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
|
-{
|
|
- struct cbq_class *cl = (struct cbq_class *)arg;
|
|
-
|
|
- cbq_deactivate_class(cl);
|
|
-}
|
|
-
|
|
-static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
-
|
|
- return (unsigned long)cbq_class_lookup(q, classid);
|
|
-}
|
|
-
|
|
-static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
-
|
|
- WARN_ON(cl->filters);
|
|
-
|
|
- tcf_block_put(cl->block);
|
|
- qdisc_put(cl->q);
|
|
- qdisc_put_rtab(cl->R_tab);
|
|
- gen_kill_estimator(&cl->rate_est);
|
|
- if (cl != &q->link)
|
|
- kfree(cl);
|
|
-}
|
|
-
|
|
-static void cbq_destroy(struct Qdisc *sch)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct hlist_node *next;
|
|
- struct cbq_class *cl;
|
|
- unsigned int h;
|
|
-
|
|
-#ifdef CONFIG_NET_CLS_ACT
|
|
- q->rx_class = NULL;
|
|
-#endif
|
|
- /*
|
|
- * Filters must be destroyed first because we don't destroy the
|
|
- * classes from root to leafs which means that filters can still
|
|
- * be bound to classes which have been destroyed already. --TGR '04
|
|
- */
|
|
- for (h = 0; h < q->clhash.hashsize; h++) {
|
|
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
|
|
- tcf_block_put(cl->block);
|
|
- cl->block = NULL;
|
|
- }
|
|
- }
|
|
- for (h = 0; h < q->clhash.hashsize; h++) {
|
|
- hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
|
|
- common.hnode)
|
|
- cbq_destroy_class(sch, cl);
|
|
- }
|
|
- qdisc_class_hash_destroy(&q->clhash);
|
|
-}
|
|
-
|
|
-static int
|
|
-cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
|
|
- unsigned long *arg, struct netlink_ext_ack *extack)
|
|
-{
|
|
- int err;
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct cbq_class *cl = (struct cbq_class *)*arg;
|
|
- struct nlattr *opt = tca[TCA_OPTIONS];
|
|
- struct nlattr *tb[TCA_CBQ_MAX + 1];
|
|
- struct cbq_class *parent;
|
|
- struct qdisc_rate_table *rtab = NULL;
|
|
-
|
|
- err = cbq_opt_parse(tb, opt, extack);
|
|
- if (err < 0)
|
|
- return err;
|
|
-
|
|
- if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
|
|
- NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
|
|
- return -EOPNOTSUPP;
|
|
- }
|
|
-
|
|
- if (cl) {
|
|
- /* Check parent */
|
|
- if (parentid) {
|
|
- if (cl->tparent &&
|
|
- cl->tparent->common.classid != parentid) {
|
|
- NL_SET_ERR_MSG(extack, "Invalid parent id");
|
|
- return -EINVAL;
|
|
- }
|
|
- if (!cl->tparent && parentid != TC_H_ROOT) {
|
|
- NL_SET_ERR_MSG(extack, "Parent must be root");
|
|
- return -EINVAL;
|
|
- }
|
|
- }
|
|
-
|
|
- if (tb[TCA_CBQ_RATE]) {
|
|
- rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
|
|
- tb[TCA_CBQ_RTAB], extack);
|
|
- if (rtab == NULL)
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (tca[TCA_RATE]) {
|
|
- err = gen_replace_estimator(&cl->bstats, NULL,
|
|
- &cl->rate_est,
|
|
- NULL,
|
|
- true,
|
|
- tca[TCA_RATE]);
|
|
- if (err) {
|
|
- NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
|
|
- qdisc_put_rtab(rtab);
|
|
- return err;
|
|
- }
|
|
- }
|
|
-
|
|
- /* Change class parameters */
|
|
- sch_tree_lock(sch);
|
|
-
|
|
- if (cl->next_alive != NULL)
|
|
- cbq_deactivate_class(cl);
|
|
-
|
|
- if (rtab) {
|
|
- qdisc_put_rtab(cl->R_tab);
|
|
- cl->R_tab = rtab;
|
|
- }
|
|
-
|
|
- if (tb[TCA_CBQ_LSSOPT])
|
|
- cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
|
|
-
|
|
- if (tb[TCA_CBQ_WRROPT]) {
|
|
- cbq_rmprio(q, cl);
|
|
- cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
|
|
- }
|
|
-
|
|
- if (tb[TCA_CBQ_FOPT])
|
|
- cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
|
|
-
|
|
- if (cl->q->q.qlen)
|
|
- cbq_activate_class(cl);
|
|
-
|
|
- sch_tree_unlock(sch);
|
|
-
|
|
- return 0;
|
|
- }
|
|
-
|
|
- if (parentid == TC_H_ROOT)
|
|
- return -EINVAL;
|
|
-
|
|
- if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
|
|
- NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
|
|
- extack);
|
|
- if (rtab == NULL)
|
|
- return -EINVAL;
|
|
-
|
|
- if (classid) {
|
|
- err = -EINVAL;
|
|
- if (TC_H_MAJ(classid ^ sch->handle) ||
|
|
- cbq_class_lookup(q, classid)) {
|
|
- NL_SET_ERR_MSG(extack, "Specified class not found");
|
|
- goto failure;
|
|
- }
|
|
- } else {
|
|
- int i;
|
|
- classid = TC_H_MAKE(sch->handle, 0x8000);
|
|
-
|
|
- for (i = 0; i < 0x8000; i++) {
|
|
- if (++q->hgenerator >= 0x8000)
|
|
- q->hgenerator = 1;
|
|
- if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
|
|
- break;
|
|
- }
|
|
- err = -ENOSR;
|
|
- if (i >= 0x8000) {
|
|
- NL_SET_ERR_MSG(extack, "Unable to generate classid");
|
|
- goto failure;
|
|
- }
|
|
- classid = classid|q->hgenerator;
|
|
- }
|
|
-
|
|
- parent = &q->link;
|
|
- if (parentid) {
|
|
- parent = cbq_class_lookup(q, parentid);
|
|
- err = -EINVAL;
|
|
- if (!parent) {
|
|
- NL_SET_ERR_MSG(extack, "Failed to find parentid");
|
|
- goto failure;
|
|
- }
|
|
- }
|
|
-
|
|
- err = -ENOBUFS;
|
|
- cl = kzalloc(sizeof(*cl), GFP_KERNEL);
|
|
- if (cl == NULL)
|
|
- goto failure;
|
|
-
|
|
- gnet_stats_basic_sync_init(&cl->bstats);
|
|
- err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
|
|
- if (err) {
|
|
- kfree(cl);
|
|
- goto failure;
|
|
- }
|
|
-
|
|
- if (tca[TCA_RATE]) {
|
|
- err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
|
|
- NULL, true, tca[TCA_RATE]);
|
|
- if (err) {
|
|
- NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
|
|
- tcf_block_put(cl->block);
|
|
- kfree(cl);
|
|
- goto failure;
|
|
- }
|
|
- }
|
|
-
|
|
- cl->R_tab = rtab;
|
|
- rtab = NULL;
|
|
- cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
|
|
- NULL);
|
|
- if (!cl->q)
|
|
- cl->q = &noop_qdisc;
|
|
- else
|
|
- qdisc_hash_add(cl->q, true);
|
|
-
|
|
- cl->common.classid = classid;
|
|
- cl->tparent = parent;
|
|
- cl->qdisc = sch;
|
|
- cl->allot = parent->allot;
|
|
- cl->quantum = cl->allot;
|
|
- cl->weight = cl->R_tab->rate.rate;
|
|
-
|
|
- sch_tree_lock(sch);
|
|
- cbq_link_class(cl);
|
|
- cl->borrow = cl->tparent;
|
|
- if (cl->tparent != &q->link)
|
|
- cl->share = cl->tparent;
|
|
- cbq_adjust_levels(parent);
|
|
- cl->minidle = -0x7FFFFFFF;
|
|
- cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
|
|
- cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
|
|
- if (cl->ewma_log == 0)
|
|
- cl->ewma_log = q->link.ewma_log;
|
|
- if (cl->maxidle == 0)
|
|
- cl->maxidle = q->link.maxidle;
|
|
- if (cl->avpkt == 0)
|
|
- cl->avpkt = q->link.avpkt;
|
|
- if (tb[TCA_CBQ_FOPT])
|
|
- cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
|
|
- sch_tree_unlock(sch);
|
|
-
|
|
- qdisc_class_hash_grow(sch, &q->clhash);
|
|
-
|
|
- *arg = (unsigned long)cl;
|
|
- return 0;
|
|
-
|
|
-failure:
|
|
- qdisc_put_rtab(rtab);
|
|
- return err;
|
|
-}
|
|
-
|
|
-static int cbq_delete(struct Qdisc *sch, unsigned long arg,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct cbq_class *cl = (struct cbq_class *)arg;
|
|
-
|
|
- if (cl->filters || cl->children || cl == &q->link)
|
|
- return -EBUSY;
|
|
-
|
|
- sch_tree_lock(sch);
|
|
-
|
|
- qdisc_purge_queue(cl->q);
|
|
-
|
|
- if (cl->next_alive)
|
|
- cbq_deactivate_class(cl);
|
|
-
|
|
- if (q->tx_borrowed == cl)
|
|
- q->tx_borrowed = q->tx_class;
|
|
- if (q->tx_class == cl) {
|
|
- q->tx_class = NULL;
|
|
- q->tx_borrowed = NULL;
|
|
- }
|
|
-#ifdef CONFIG_NET_CLS_ACT
|
|
- if (q->rx_class == cl)
|
|
- q->rx_class = NULL;
|
|
-#endif
|
|
-
|
|
- cbq_unlink_class(cl);
|
|
- cbq_adjust_levels(cl->tparent);
|
|
- cl->defmap = 0;
|
|
- cbq_sync_defmap(cl);
|
|
-
|
|
- cbq_rmprio(q, cl);
|
|
- sch_tree_unlock(sch);
|
|
-
|
|
- cbq_destroy_class(sch, cl);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct cbq_class *cl = (struct cbq_class *)arg;
|
|
-
|
|
- if (cl == NULL)
|
|
- cl = &q->link;
|
|
-
|
|
- return cl->block;
|
|
-}
|
|
-
|
|
-static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
|
|
- u32 classid)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct cbq_class *p = (struct cbq_class *)parent;
|
|
- struct cbq_class *cl = cbq_class_lookup(q, classid);
|
|
-
|
|
- if (cl) {
|
|
- if (p && p->level <= cl->level)
|
|
- return 0;
|
|
- cl->filters++;
|
|
- return (unsigned long)cl;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
|
|
-{
|
|
- struct cbq_class *cl = (struct cbq_class *)arg;
|
|
-
|
|
- cl->filters--;
|
|
-}
|
|
-
|
|
-static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
|
|
-{
|
|
- struct cbq_sched_data *q = qdisc_priv(sch);
|
|
- struct cbq_class *cl;
|
|
- unsigned int h;
|
|
-
|
|
- if (arg->stop)
|
|
- return;
|
|
-
|
|
- for (h = 0; h < q->clhash.hashsize; h++) {
|
|
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
|
|
- if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
|
|
- return;
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-static const struct Qdisc_class_ops cbq_class_ops = {
|
|
- .graft = cbq_graft,
|
|
- .leaf = cbq_leaf,
|
|
- .qlen_notify = cbq_qlen_notify,
|
|
- .find = cbq_find,
|
|
- .change = cbq_change_class,
|
|
- .delete = cbq_delete,
|
|
- .walk = cbq_walk,
|
|
- .tcf_block = cbq_tcf_block,
|
|
- .bind_tcf = cbq_bind_filter,
|
|
- .unbind_tcf = cbq_unbind_filter,
|
|
- .dump = cbq_dump_class,
|
|
- .dump_stats = cbq_dump_class_stats,
|
|
-};
|
|
-
|
|
-static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
|
|
- .next = NULL,
|
|
- .cl_ops = &cbq_class_ops,
|
|
- .id = "cbq",
|
|
- .priv_size = sizeof(struct cbq_sched_data),
|
|
- .enqueue = cbq_enqueue,
|
|
- .dequeue = cbq_dequeue,
|
|
- .peek = qdisc_peek_dequeued,
|
|
- .init = cbq_init,
|
|
- .reset = cbq_reset,
|
|
- .destroy = cbq_destroy,
|
|
- .change = NULL,
|
|
- .dump = cbq_dump,
|
|
- .dump_stats = cbq_dump_stats,
|
|
- .owner = THIS_MODULE,
|
|
-};
|
|
-
|
|
-static int __init cbq_module_init(void)
|
|
-{
|
|
- return register_qdisc(&cbq_qdisc_ops);
|
|
-}
|
|
-static void __exit cbq_module_exit(void)
|
|
-{
|
|
- unregister_qdisc(&cbq_qdisc_ops);
|
|
-}
|
|
-module_init(cbq_module_init)
|
|
-module_exit(cbq_module_exit)
|
|
-MODULE_LICENSE("GPL");
|
|
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
|
|
deleted file mode 100644
|
|
index 401ffaf87d622..0000000000000
|
|
--- a/net/sched/sch_dsmark.c
|
|
+++ /dev/null
|
|
@@ -1,518 +0,0 @@
|
|
-// SPDX-License-Identifier: GPL-2.0-only
|
|
-/* net/sched/sch_dsmark.c - Differentiated Services field marker */
|
|
-
|
|
-/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
|
|
-
|
|
-
|
|
-#include <linux/module.h>
|
|
-#include <linux/init.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/types.h>
|
|
-#include <linux/string.h>
|
|
-#include <linux/errno.h>
|
|
-#include <linux/skbuff.h>
|
|
-#include <linux/rtnetlink.h>
|
|
-#include <linux/bitops.h>
|
|
-#include <net/pkt_sched.h>
|
|
-#include <net/pkt_cls.h>
|
|
-#include <net/dsfield.h>
|
|
-#include <net/inet_ecn.h>
|
|
-#include <asm/byteorder.h>
|
|
-
|
|
-/*
|
|
- * classid class marking
|
|
- * ------- ----- -------
|
|
- * n/a 0 n/a
|
|
- * x:0 1 use entry [0]
|
|
- * ... ... ...
|
|
- * x:y y>0 y+1 use entry [y]
|
|
- * ... ... ...
|
|
- * x:indices-1 indices use entry [indices-1]
|
|
- * ... ... ...
|
|
- * x:y y+1 use entry [y & (indices-1)]
|
|
- * ... ... ...
|
|
- * 0xffff 0x10000 use entry [indices-1]
|
|
- */
|
|
-
|
|
-
|
|
-#define NO_DEFAULT_INDEX (1 << 16)
|
|
-
|
|
-struct mask_value {
|
|
- u8 mask;
|
|
- u8 value;
|
|
-};
|
|
-
|
|
-struct dsmark_qdisc_data {
|
|
- struct Qdisc *q;
|
|
- struct tcf_proto __rcu *filter_list;
|
|
- struct tcf_block *block;
|
|
- struct mask_value *mv;
|
|
- u16 indices;
|
|
- u8 set_tc_index;
|
|
- u32 default_index; /* index range is 0...0xffff */
|
|
-#define DSMARK_EMBEDDED_SZ 16
|
|
- struct mask_value embedded[DSMARK_EMBEDDED_SZ];
|
|
-};
|
|
-
|
|
-static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
|
|
-{
|
|
- return index <= p->indices && index > 0;
|
|
-}
|
|
-
|
|
-/* ------------------------- Class/flow operations ------------------------- */
|
|
-
|
|
-static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
|
|
- struct Qdisc *new, struct Qdisc **old,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
|
|
- __func__, sch, p, new, old);
|
|
-
|
|
- if (new == NULL) {
|
|
- new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
|
- sch->handle, NULL);
|
|
- if (new == NULL)
|
|
- new = &noop_qdisc;
|
|
- }
|
|
-
|
|
- *old = qdisc_replace(sch, new, &p->q);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
- return p->q;
|
|
-}
|
|
-
|
|
-static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
|
|
-{
|
|
- return TC_H_MIN(classid) + 1;
|
|
-}
|
|
-
|
|
-static unsigned long dsmark_bind_filter(struct Qdisc *sch,
|
|
- unsigned long parent, u32 classid)
|
|
-{
|
|
- pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
|
|
- __func__, sch, qdisc_priv(sch), classid);
|
|
-
|
|
- return dsmark_find(sch, classid);
|
|
-}
|
|
-
|
|
-static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
|
|
-{
|
|
-}
|
|
-
|
|
-static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
|
|
- [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
|
|
- [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
|
|
- [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
|
|
- [TCA_DSMARK_MASK] = { .type = NLA_U8 },
|
|
- [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
|
|
-};
|
|
-
|
|
-static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
|
|
- struct nlattr **tca, unsigned long *arg,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
- struct nlattr *opt = tca[TCA_OPTIONS];
|
|
- struct nlattr *tb[TCA_DSMARK_MAX + 1];
|
|
- int err = -EINVAL;
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
|
|
- __func__, sch, p, classid, parent, *arg);
|
|
-
|
|
- if (!dsmark_valid_index(p, *arg)) {
|
|
- err = -ENOENT;
|
|
- goto errout;
|
|
- }
|
|
-
|
|
- if (!opt)
|
|
- goto errout;
|
|
-
|
|
- err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
|
|
- dsmark_policy, NULL);
|
|
- if (err < 0)
|
|
- goto errout;
|
|
-
|
|
- if (tb[TCA_DSMARK_VALUE])
|
|
- p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
|
|
-
|
|
- if (tb[TCA_DSMARK_MASK])
|
|
- p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
|
|
-
|
|
- err = 0;
|
|
-
|
|
-errout:
|
|
- return err;
|
|
-}
|
|
-
|
|
-static int dsmark_delete(struct Qdisc *sch, unsigned long arg,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
-
|
|
- if (!dsmark_valid_index(p, arg))
|
|
- return -EINVAL;
|
|
-
|
|
- p->mv[arg - 1].mask = 0xff;
|
|
- p->mv[arg - 1].value = 0;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
- int i;
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
|
|
- __func__, sch, p, walker);
|
|
-
|
|
- if (walker->stop)
|
|
- return;
|
|
-
|
|
- for (i = 0; i < p->indices; i++) {
|
|
- if (p->mv[i].mask == 0xff && !p->mv[i].value) {
|
|
- walker->count++;
|
|
- continue;
|
|
- }
|
|
- if (!tc_qdisc_stats_dump(sch, i + 1, walker))
|
|
- break;
|
|
- }
|
|
-}
|
|
-
|
|
-static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
-
|
|
- return p->block;
|
|
-}
|
|
-
|
|
-/* --------------------------- Qdisc operations ---------------------------- */
|
|
-
|
|
-static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|
- struct sk_buff **to_free)
|
|
-{
|
|
- unsigned int len = qdisc_pkt_len(skb);
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
- int err;
|
|
-
|
|
- pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
|
|
-
|
|
- if (p->set_tc_index) {
|
|
- int wlen = skb_network_offset(skb);
|
|
-
|
|
- switch (skb_protocol(skb, true)) {
|
|
- case htons(ETH_P_IP):
|
|
- wlen += sizeof(struct iphdr);
|
|
- if (!pskb_may_pull(skb, wlen) ||
|
|
- skb_try_make_writable(skb, wlen))
|
|
- goto drop;
|
|
-
|
|
- skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
|
|
- & ~INET_ECN_MASK;
|
|
- break;
|
|
-
|
|
- case htons(ETH_P_IPV6):
|
|
- wlen += sizeof(struct ipv6hdr);
|
|
- if (!pskb_may_pull(skb, wlen) ||
|
|
- skb_try_make_writable(skb, wlen))
|
|
- goto drop;
|
|
-
|
|
- skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
|
|
- & ~INET_ECN_MASK;
|
|
- break;
|
|
- default:
|
|
- skb->tc_index = 0;
|
|
- break;
|
|
- }
|
|
- }
|
|
-
|
|
- if (TC_H_MAJ(skb->priority) == sch->handle)
|
|
- skb->tc_index = TC_H_MIN(skb->priority);
|
|
- else {
|
|
- struct tcf_result res;
|
|
- struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
|
|
- int result = tcf_classify(skb, NULL, fl, &res, false);
|
|
-
|
|
- pr_debug("result %d class 0x%04x\n", result, res.classid);
|
|
-
|
|
- switch (result) {
|
|
-#ifdef CONFIG_NET_CLS_ACT
|
|
- case TC_ACT_QUEUED:
|
|
- case TC_ACT_STOLEN:
|
|
- case TC_ACT_TRAP:
|
|
- __qdisc_drop(skb, to_free);
|
|
- return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
|
|
-
|
|
- case TC_ACT_SHOT:
|
|
- goto drop;
|
|
-#endif
|
|
- case TC_ACT_OK:
|
|
- skb->tc_index = TC_H_MIN(res.classid);
|
|
- break;
|
|
-
|
|
- default:
|
|
- if (p->default_index != NO_DEFAULT_INDEX)
|
|
- skb->tc_index = p->default_index;
|
|
- break;
|
|
- }
|
|
- }
|
|
-
|
|
- err = qdisc_enqueue(skb, p->q, to_free);
|
|
- if (err != NET_XMIT_SUCCESS) {
|
|
- if (net_xmit_drop_count(err))
|
|
- qdisc_qstats_drop(sch);
|
|
- return err;
|
|
- }
|
|
-
|
|
- sch->qstats.backlog += len;
|
|
- sch->q.qlen++;
|
|
-
|
|
- return NET_XMIT_SUCCESS;
|
|
-
|
|
-drop:
|
|
- qdisc_drop(skb, sch, to_free);
|
|
- return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
|
-}
|
|
-
|
|
-static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
- struct sk_buff *skb;
|
|
- u32 index;
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
|
|
-
|
|
- skb = qdisc_dequeue_peeked(p->q);
|
|
- if (skb == NULL)
|
|
- return NULL;
|
|
-
|
|
- qdisc_bstats_update(sch, skb);
|
|
- qdisc_qstats_backlog_dec(sch, skb);
|
|
- sch->q.qlen--;
|
|
-
|
|
- index = skb->tc_index & (p->indices - 1);
|
|
- pr_debug("index %d->%d\n", skb->tc_index, index);
|
|
-
|
|
- switch (skb_protocol(skb, true)) {
|
|
- case htons(ETH_P_IP):
|
|
- ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
|
|
- p->mv[index].value);
|
|
- break;
|
|
- case htons(ETH_P_IPV6):
|
|
- ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
|
|
- p->mv[index].value);
|
|
- break;
|
|
- default:
|
|
- /*
|
|
- * Only complain if a change was actually attempted.
|
|
- * This way, we can send non-IP traffic through dsmark
|
|
- * and don't need yet another qdisc as a bypass.
|
|
- */
|
|
- if (p->mv[index].mask != 0xff || p->mv[index].value)
|
|
- pr_warn("%s: unsupported protocol %d\n",
|
|
- __func__, ntohs(skb_protocol(skb, true)));
|
|
- break;
|
|
- }
|
|
-
|
|
- return skb;
|
|
-}
|
|
-
|
|
-static struct sk_buff *dsmark_peek(struct Qdisc *sch)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
|
|
-
|
|
- return p->q->ops->peek(p->q);
|
|
-}
|
|
-
|
|
-static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
- struct nlattr *tb[TCA_DSMARK_MAX + 1];
|
|
- int err = -EINVAL;
|
|
- u32 default_index = NO_DEFAULT_INDEX;
|
|
- u16 indices;
|
|
- int i;
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
|
|
-
|
|
- if (!opt)
|
|
- goto errout;
|
|
-
|
|
- err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
|
|
- if (err)
|
|
- return err;
|
|
-
|
|
- err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
|
|
- dsmark_policy, NULL);
|
|
- if (err < 0)
|
|
- goto errout;
|
|
-
|
|
- err = -EINVAL;
|
|
- if (!tb[TCA_DSMARK_INDICES])
|
|
- goto errout;
|
|
- indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
|
|
-
|
|
- if (hweight32(indices) != 1)
|
|
- goto errout;
|
|
-
|
|
- if (tb[TCA_DSMARK_DEFAULT_INDEX])
|
|
- default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
|
|
-
|
|
- if (indices <= DSMARK_EMBEDDED_SZ)
|
|
- p->mv = p->embedded;
|
|
- else
|
|
- p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
|
|
- if (!p->mv) {
|
|
- err = -ENOMEM;
|
|
- goto errout;
|
|
- }
|
|
- for (i = 0; i < indices; i++) {
|
|
- p->mv[i].mask = 0xff;
|
|
- p->mv[i].value = 0;
|
|
- }
|
|
- p->indices = indices;
|
|
- p->default_index = default_index;
|
|
- p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
|
|
-
|
|
- p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
|
|
- NULL);
|
|
- if (p->q == NULL)
|
|
- p->q = &noop_qdisc;
|
|
- else
|
|
- qdisc_hash_add(p->q, true);
|
|
-
|
|
- pr_debug("%s: qdisc %p\n", __func__, p->q);
|
|
-
|
|
- err = 0;
|
|
-errout:
|
|
- return err;
|
|
-}
|
|
-
|
|
-static void dsmark_reset(struct Qdisc *sch)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
|
|
- if (p->q)
|
|
- qdisc_reset(p->q);
|
|
-}
|
|
-
|
|
-static void dsmark_destroy(struct Qdisc *sch)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
|
|
-
|
|
- tcf_block_put(p->block);
|
|
- qdisc_put(p->q);
|
|
- if (p->mv != p->embedded)
|
|
- kfree(p->mv);
|
|
-}
|
|
-
|
|
-static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
|
|
- struct sk_buff *skb, struct tcmsg *tcm)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
- struct nlattr *opts = NULL;
|
|
-
|
|
- pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
|
|
-
|
|
- if (!dsmark_valid_index(p, cl))
|
|
- return -EINVAL;
|
|
-
|
|
- tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
|
|
- tcm->tcm_info = p->q->handle;
|
|
-
|
|
- opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
|
|
- if (opts == NULL)
|
|
- goto nla_put_failure;
|
|
- if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
|
|
- nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
|
|
- goto nla_put_failure;
|
|
-
|
|
- return nla_nest_end(skb, opts);
|
|
-
|
|
-nla_put_failure:
|
|
- nla_nest_cancel(skb, opts);
|
|
- return -EMSGSIZE;
|
|
-}
|
|
-
|
|
-static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|
-{
|
|
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
|
- struct nlattr *opts = NULL;
|
|
-
|
|
- opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
|
|
- if (opts == NULL)
|
|
- goto nla_put_failure;
|
|
- if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
|
|
- goto nla_put_failure;
|
|
-
|
|
- if (p->default_index != NO_DEFAULT_INDEX &&
|
|
- nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
|
|
- goto nla_put_failure;
|
|
-
|
|
- if (p->set_tc_index &&
|
|
- nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
|
|
- goto nla_put_failure;
|
|
-
|
|
- return nla_nest_end(skb, opts);
|
|
-
|
|
-nla_put_failure:
|
|
- nla_nest_cancel(skb, opts);
|
|
- return -EMSGSIZE;
|
|
-}
|
|
-
|
|
-static const struct Qdisc_class_ops dsmark_class_ops = {
|
|
- .graft = dsmark_graft,
|
|
- .leaf = dsmark_leaf,
|
|
- .find = dsmark_find,
|
|
- .change = dsmark_change,
|
|
- .delete = dsmark_delete,
|
|
- .walk = dsmark_walk,
|
|
- .tcf_block = dsmark_tcf_block,
|
|
- .bind_tcf = dsmark_bind_filter,
|
|
- .unbind_tcf = dsmark_unbind_filter,
|
|
- .dump = dsmark_dump_class,
|
|
-};
|
|
-
|
|
-static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
|
|
- .next = NULL,
|
|
- .cl_ops = &dsmark_class_ops,
|
|
- .id = "dsmark",
|
|
- .priv_size = sizeof(struct dsmark_qdisc_data),
|
|
- .enqueue = dsmark_enqueue,
|
|
- .dequeue = dsmark_dequeue,
|
|
- .peek = dsmark_peek,
|
|
- .init = dsmark_init,
|
|
- .reset = dsmark_reset,
|
|
- .destroy = dsmark_destroy,
|
|
- .change = NULL,
|
|
- .dump = dsmark_dump,
|
|
- .owner = THIS_MODULE,
|
|
-};
|
|
-
|
|
-static int __init dsmark_module_init(void)
|
|
-{
|
|
- return register_qdisc(&dsmark_qdisc_ops);
|
|
-}
|
|
-
|
|
-static void __exit dsmark_module_exit(void)
|
|
-{
|
|
- unregister_qdisc(&dsmark_qdisc_ops);
|
|
-}
|
|
-
|
|
-module_init(dsmark_module_init)
|
|
-module_exit(dsmark_module_exit)
|
|
-
|
|
-MODULE_LICENSE("GPL");
|
|
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
|
|
index 8cc42aea19c7e..2e14d4c37e2dc 100644
|
|
--- a/net/switchdev/switchdev.c
|
|
+++ b/net/switchdev/switchdev.c
|
|
@@ -19,6 +19,35 @@
|
|
#include <linux/rtnetlink.h>
|
|
#include <net/switchdev.h>
|
|
|
|
+static bool switchdev_obj_eq(const struct switchdev_obj *a,
|
|
+ const struct switchdev_obj *b)
|
|
+{
|
|
+ const struct switchdev_obj_port_vlan *va, *vb;
|
|
+ const struct switchdev_obj_port_mdb *ma, *mb;
|
|
+
|
|
+ if (a->id != b->id || a->orig_dev != b->orig_dev)
|
|
+ return false;
|
|
+
|
|
+ switch (a->id) {
|
|
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
|
|
+ va = SWITCHDEV_OBJ_PORT_VLAN(a);
|
|
+ vb = SWITCHDEV_OBJ_PORT_VLAN(b);
|
|
+ return va->flags == vb->flags &&
|
|
+ va->vid == vb->vid &&
|
|
+ va->changed == vb->changed;
|
|
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
|
|
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
|
|
+ ma = SWITCHDEV_OBJ_PORT_MDB(a);
|
|
+ mb = SWITCHDEV_OBJ_PORT_MDB(b);
|
|
+ return ma->vid == mb->vid &&
|
|
+ ether_addr_equal(ma->addr, mb->addr);
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ BUG();
|
|
+}
|
|
+
|
|
static LIST_HEAD(deferred);
|
|
static DEFINE_SPINLOCK(deferred_lock);
|
|
|
|
@@ -307,6 +336,50 @@ int switchdev_port_obj_del(struct net_device *dev,
|
|
}
|
|
EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
|
|
|
|
+/**
|
|
+ * switchdev_port_obj_act_is_deferred - Is object action pending?
|
|
+ *
|
|
+ * @dev: port device
|
|
+ * @nt: type of action; add or delete
|
|
+ * @obj: object to test
|
|
+ *
|
|
+ * Returns true if a deferred item is pending, which is
|
|
+ * equivalent to the action @nt on an object @obj.
|
|
+ *
|
|
+ * rtnl_lock must be held.
|
|
+ */
|
|
+bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
|
|
+ enum switchdev_notifier_type nt,
|
|
+ const struct switchdev_obj *obj)
|
|
+{
|
|
+ struct switchdev_deferred_item *dfitem;
|
|
+ bool found = false;
|
|
+
|
|
+ ASSERT_RTNL();
|
|
+
|
|
+ spin_lock_bh(&deferred_lock);
|
|
+
|
|
+ list_for_each_entry(dfitem, &deferred, list) {
|
|
+ if (dfitem->dev != dev)
|
|
+ continue;
|
|
+
|
|
+ if ((dfitem->func == switchdev_port_obj_add_deferred &&
|
|
+ nt == SWITCHDEV_PORT_OBJ_ADD) ||
|
|
+ (dfitem->func == switchdev_port_obj_del_deferred &&
|
|
+ nt == SWITCHDEV_PORT_OBJ_DEL)) {
|
|
+ if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ spin_unlock_bh(&deferred_lock);
|
|
+
|
|
+ return found;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
|
|
+
|
|
static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
|
|
static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
|
|
|
|
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
|
|
index 80b42a3e78830..6b7189a520af7 100644
|
|
--- a/net/tls/tls_main.c
|
|
+++ b/net/tls/tls_main.c
|
|
@@ -1098,7 +1098,7 @@ static u16 tls_user_config(struct tls_context *ctx, bool tx)
|
|
return 0;
|
|
}
|
|
|
|
-static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
|
|
+static int tls_get_info(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
u16 version, cipher_type;
|
|
struct tls_context *ctx;
|
|
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
|
|
index c8cbdd02a784e..93e1bfa72d791 100644
|
|
--- a/net/tls/tls_sw.c
|
|
+++ b/net/tls/tls_sw.c
|
|
@@ -1845,7 +1845,8 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
|
|
u8 *control,
|
|
size_t skip,
|
|
size_t len,
|
|
- bool is_peek)
|
|
+ bool is_peek,
|
|
+ bool *more)
|
|
{
|
|
struct sk_buff *skb = skb_peek(&ctx->rx_list);
|
|
struct tls_msg *tlm;
|
|
@@ -1858,7 +1859,7 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
|
|
|
|
err = tls_record_content_type(msg, tlm, control);
|
|
if (err <= 0)
|
|
- goto out;
|
|
+ goto more;
|
|
|
|
if (skip < rxm->full_len)
|
|
break;
|
|
@@ -1876,12 +1877,12 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
|
|
|
|
err = tls_record_content_type(msg, tlm, control);
|
|
if (err <= 0)
|
|
- goto out;
|
|
+ goto more;
|
|
|
|
err = skb_copy_datagram_msg(skb, rxm->offset + skip,
|
|
msg, chunk);
|
|
if (err < 0)
|
|
- goto out;
|
|
+ goto more;
|
|
|
|
len = len - chunk;
|
|
copied = copied + chunk;
|
|
@@ -1917,6 +1918,10 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
|
|
|
|
out:
|
|
return copied ? : err;
|
|
+more:
|
|
+ if (more)
|
|
+ *more = true;
|
|
+ goto out;
|
|
}
|
|
|
|
static bool
|
|
@@ -2020,6 +2025,7 @@ int tls_sw_recvmsg(struct sock *sk,
|
|
int target, err;
|
|
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
|
|
bool is_peek = flags & MSG_PEEK;
|
|
+ bool rx_more = false;
|
|
bool released = true;
|
|
bool bpf_strp_enabled;
|
|
bool zc_capable;
|
|
@@ -2039,12 +2045,12 @@ int tls_sw_recvmsg(struct sock *sk,
|
|
goto end;
|
|
|
|
/* Process pending decrypted records. It must be non-zero-copy */
|
|
- err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
|
|
+ err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
|
|
if (err < 0)
|
|
goto end;
|
|
|
|
copied = err;
|
|
- if (len <= copied)
|
|
+ if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
|
|
goto end;
|
|
|
|
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
|
|
@@ -2137,6 +2143,8 @@ int tls_sw_recvmsg(struct sock *sk,
|
|
decrypted += chunk;
|
|
len -= chunk;
|
|
__skb_queue_tail(&ctx->rx_list, skb);
|
|
+ if (unlikely(control != TLS_RECORD_TYPE_DATA))
|
|
+ break;
|
|
continue;
|
|
}
|
|
|
|
@@ -2201,10 +2209,10 @@ int tls_sw_recvmsg(struct sock *sk,
|
|
/* Drain records from the rx_list & copy if required */
|
|
if (is_peek || is_kvec)
|
|
err = process_rx_list(ctx, msg, &control, copied,
|
|
- decrypted, is_peek);
|
|
+ decrypted, is_peek, NULL);
|
|
else
|
|
err = process_rx_list(ctx, msg, &control, 0,
|
|
- async_copy_bytes, is_peek);
|
|
+ async_copy_bytes, is_peek, NULL);
|
|
}
|
|
|
|
copied += decrypted;
|
|
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
|
|
index 70fb14b8bab07..c259d3227a9e2 100644
|
|
--- a/net/wireless/nl80211.c
|
|
+++ b/net/wireless/nl80211.c
|
|
@@ -3960,6 +3960,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
|
|
if_idx++;
|
|
}
|
|
|
|
+ if_start = 0;
|
|
wp_idx++;
|
|
}
|
|
out:
|
|
diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
|
|
index d5c389df6045e..4de98b7bbea95 100755
|
|
--- a/scripts/bpf_doc.py
|
|
+++ b/scripts/bpf_doc.py
|
|
@@ -495,7 +495,7 @@ eBPF programs can have an associated license, passed along with the bytecode
|
|
instructions to the kernel when the programs are loaded. The format for that
|
|
string is identical to the one in use for kernel modules (Dual licenses, such
|
|
as "Dual BSD/GPL", may be used). Some helper functions are only accessible to
|
|
-programs that are compatible with the GNU Privacy License (GPL).
|
|
+programs that are compatible with the GNU General Public License (GNU GPL).
|
|
|
|
In order to use such helpers, the eBPF program must be loaded with the correct
|
|
license string passed (via **attr**) to the **bpf**\ () system call, and this
|
|
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
|
|
index 2cfca78f0401f..47a4c363227cc 100644
|
|
--- a/sound/soc/codecs/wm_adsp.c
|
|
+++ b/sound/soc/codecs/wm_adsp.c
|
|
@@ -740,19 +740,25 @@ static int wm_adsp_request_firmware_file(struct wm_adsp *dsp,
|
|
const char *filetype)
|
|
{
|
|
struct cs_dsp *cs_dsp = &dsp->cs_dsp;
|
|
+ const char *fwf;
|
|
char *s, c;
|
|
int ret = 0;
|
|
|
|
+ if (dsp->fwf_name)
|
|
+ fwf = dsp->fwf_name;
|
|
+ else
|
|
+ fwf = dsp->cs_dsp.name;
|
|
+
|
|
if (system_name && asoc_component_prefix)
|
|
*filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s-%s.%s", dir, dsp->part,
|
|
- dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
|
|
+ fwf, wm_adsp_fw[dsp->fw].file, system_name,
|
|
asoc_component_prefix, filetype);
|
|
else if (system_name)
|
|
*filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s.%s", dir, dsp->part,
|
|
- dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
|
|
+ fwf, wm_adsp_fw[dsp->fw].file, system_name,
|
|
filetype);
|
|
else
|
|
- *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, dsp->fwf_name,
|
|
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, fwf,
|
|
wm_adsp_fw[dsp->fw].file, filetype);
|
|
|
|
if (*filename == NULL)
|
|
@@ -842,29 +848,18 @@ static int wm_adsp_request_firmware_files(struct wm_adsp *dsp,
|
|
}
|
|
|
|
adsp_err(dsp, "Failed to request firmware <%s>%s-%s-%s<-%s<%s>>.wmfw\n",
|
|
- cirrus_dir, dsp->part, dsp->fwf_name, wm_adsp_fw[dsp->fw].file,
|
|
- system_name, asoc_component_prefix);
|
|
+ cirrus_dir, dsp->part,
|
|
+ dsp->fwf_name ? dsp->fwf_name : dsp->cs_dsp.name,
|
|
+ wm_adsp_fw[dsp->fw].file, system_name, asoc_component_prefix);
|
|
|
|
return -ENOENT;
|
|
}
|
|
|
|
static int wm_adsp_common_init(struct wm_adsp *dsp)
|
|
{
|
|
- char *p;
|
|
-
|
|
INIT_LIST_HEAD(&dsp->compr_list);
|
|
INIT_LIST_HEAD(&dsp->buffer_list);
|
|
|
|
- if (!dsp->fwf_name) {
|
|
- p = devm_kstrdup(dsp->cs_dsp.dev, dsp->cs_dsp.name, GFP_KERNEL);
|
|
- if (!p)
|
|
- return -ENOMEM;
|
|
-
|
|
- dsp->fwf_name = p;
|
|
- for (; *p != 0; ++p)
|
|
- *p = tolower(*p);
|
|
- }
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
|
|
index bcceebca915ac..484b0e7c2defa 100644
|
|
--- a/sound/soc/sunxi/sun4i-spdif.c
|
|
+++ b/sound/soc/sunxi/sun4i-spdif.c
|
|
@@ -578,6 +578,11 @@ static const struct of_device_id sun4i_spdif_of_match[] = {
|
|
.compatible = "allwinner,sun50i-h6-spdif",
|
|
.data = &sun50i_h6_spdif_quirks,
|
|
},
|
|
+ {
|
|
+ .compatible = "allwinner,sun50i-h616-spdif",
|
|
+ /* Essentially the same as the H6, but without RX */
|
|
+ .data = &sun50i_h6_spdif_quirks,
|
|
+ },
|
|
{ /* sentinel */ }
|
|
};
|
|
MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match);
|
|
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
|
|
index 33db334e65566..a676ad093d189 100644
|
|
--- a/sound/usb/clock.c
|
|
+++ b/sound/usb/clock.c
|
|
@@ -328,8 +328,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
|
|
if (chip->quirk_flags & QUIRK_FLAG_SKIP_CLOCK_SELECTOR)
|
|
return ret;
|
|
err = uac_clock_selector_set_val(chip, entity_id, cur);
|
|
- if (err < 0)
|
|
+ if (err < 0) {
|
|
+ if (pins == 1) {
|
|
+ usb_audio_dbg(chip,
|
|
+ "%s(): selector returned an error, "
|
|
+ "assuming a firmware bug, id %d, ret %d\n",
|
|
+ __func__, clock_id, err);
|
|
+ return ret;
|
|
+ }
|
|
return err;
|
|
+ }
|
|
}
|
|
|
|
if (!validate || ret > 0 || !chip->autoclock)
|
|
diff --git a/sound/usb/format.c b/sound/usb/format.c
|
|
index ab5fed9f55b60..3b45d0ee76938 100644
|
|
--- a/sound/usb/format.c
|
|
+++ b/sound/usb/format.c
|
|
@@ -470,9 +470,11 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
|
|
int clock)
|
|
{
|
|
struct usb_device *dev = chip->dev;
|
|
+ struct usb_host_interface *alts;
|
|
unsigned int *table;
|
|
unsigned int nr_rates;
|
|
int i, err;
|
|
+ u32 bmControls;
|
|
|
|
/* performing the rate verification may lead to unexpected USB bus
|
|
* behavior afterwards by some unknown reason. Do this only for the
|
|
@@ -481,6 +483,24 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
|
|
if (!(chip->quirk_flags & QUIRK_FLAG_VALIDATE_RATES))
|
|
return 0; /* don't perform the validation as default */
|
|
|
|
+ alts = snd_usb_get_host_interface(chip, fp->iface, fp->altsetting);
|
|
+ if (!alts)
|
|
+ return 0;
|
|
+
|
|
+ if (fp->protocol == UAC_VERSION_3) {
|
|
+ struct uac3_as_header_descriptor *as = snd_usb_find_csint_desc(
|
|
+ alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
|
|
+ bmControls = le32_to_cpu(as->bmControls);
|
|
+ } else {
|
|
+ struct uac2_as_header_descriptor *as = snd_usb_find_csint_desc(
|
|
+ alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
|
|
+ bmControls = as->bmControls;
|
|
+ }
|
|
+
|
|
+ if (!uac_v2v3_control_is_readable(bmControls,
|
|
+ UAC2_AS_VAL_ALT_SETTINGS))
|
|
+ return 0;
|
|
+
|
|
table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
|
|
if (!table)
|
|
return -ENOMEM;
|
|
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/atm.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/atm.json
|
|
deleted file mode 100644
|
|
index f5bc8670a67d1..0000000000000
|
|
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/atm.json
|
|
+++ /dev/null
|
|
@@ -1,94 +0,0 @@
|
|
-[
|
|
- {
|
|
- "id": "7628",
|
|
- "name": "Create ATM with default setting",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "atm"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root atm",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc atm 1: root refcnt",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "390a",
|
|
- "name": "Delete ATM with valid handle",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "atm"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true",
|
|
- "$TC qdisc add dev $DUMMY handle 1: root atm"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc atm 1: root refcnt",
|
|
- "matchCount": "0",
|
|
- "teardown": [
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "32a0",
|
|
- "name": "Show ATM class",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "atm"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root atm",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC class show dev $DUMMY",
|
|
- "matchPattern": "class atm 1: parent 1:",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "6310",
|
|
- "name": "Dump ATM stats",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "atm"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root atm",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC -s qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc atm 1: root refcnt",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- }
|
|
-]
|
|
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbq.json
|
|
deleted file mode 100644
|
|
index 1ab21c83a1223..0000000000000
|
|
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbq.json
|
|
+++ /dev/null
|
|
@@ -1,184 +0,0 @@
|
|
-[
|
|
- {
|
|
- "id": "3460",
|
|
- "name": "Create CBQ with default setting",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "cbq"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "0592",
|
|
- "name": "Create CBQ with mpu",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "cbq"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 mpu 1000",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "4684",
|
|
- "name": "Create CBQ with valid cell num",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "cbq"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 cell 128",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "4345",
|
|
- "name": "Create CBQ with invalid cell num",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "cbq"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 cell 100",
|
|
- "expExitCode": "1",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
|
|
- "matchCount": "0",
|
|
- "teardown": [
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "4525",
|
|
- "name": "Create CBQ with valid ewma",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "cbq"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 ewma 16",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "6784",
|
|
- "name": "Create CBQ with invalid ewma",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "cbq"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 ewma 128",
|
|
- "expExitCode": "1",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
|
|
- "matchCount": "0",
|
|
- "teardown": [
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "5468",
|
|
- "name": "Delete CBQ with handle",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "cbq"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true",
|
|
- "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
|
|
- "matchCount": "0",
|
|
- "teardown": [
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "492a",
|
|
- "name": "Show CBQ class",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "cbq"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC class show dev $DUMMY",
|
|
- "matchPattern": "class cbq 1: root rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- }
|
|
-]
|
|
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dsmark.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dsmark.json
|
|
deleted file mode 100644
|
|
index c030795f9c37d..0000000000000
|
|
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dsmark.json
|
|
+++ /dev/null
|
|
@@ -1,140 +0,0 @@
|
|
-[
|
|
- {
|
|
- "id": "6345",
|
|
- "name": "Create DSMARK with default setting",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "dsmark"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "3462",
|
|
- "name": "Create DSMARK with default_index setting",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "dsmark"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 default_index 512",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400 default_index 0x0200",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "ca95",
|
|
- "name": "Create DSMARK with set_tc_index flag",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "dsmark"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 set_tc_index",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400 set_tc_index",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "a950",
|
|
- "name": "Create DSMARK with multiple setting",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "dsmark"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 default_index 1024 set_tc_index",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400 default_index 0x0400 set_tc_index",
|
|
- "matchCount": "1",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "4092",
|
|
- "name": "Delete DSMARK with handle",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "dsmark"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true",
|
|
- "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 default_index 1024"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC qdisc show dev $DUMMY",
|
|
- "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400",
|
|
- "matchCount": "0",
|
|
- "teardown": [
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- },
|
|
- {
|
|
- "id": "5930",
|
|
- "name": "Show DSMARK class",
|
|
- "category": [
|
|
- "qdisc",
|
|
- "dsmark"
|
|
- ],
|
|
- "plugins": {
|
|
- "requires": "nsPlugin"
|
|
- },
|
|
- "setup": [
|
|
- "$IP link add dev $DUMMY type dummy || /bin/true"
|
|
- ],
|
|
- "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024",
|
|
- "expExitCode": "0",
|
|
- "verifyCmd": "$TC class show dev $DUMMY",
|
|
- "matchPattern": "class dsmark 1:",
|
|
- "matchCount": "0",
|
|
- "teardown": [
|
|
- "$TC qdisc del dev $DUMMY handle 1: root",
|
|
- "$IP link del dev $DUMMY type dummy"
|
|
- ]
|
|
- }
|
|
-]
|