mirror of
https://github.com/armbian/build.git
synced 2025-08-16 08:06:59 +02:00
8345 lines
262 KiB
Diff
8345 lines
262 KiB
Diff
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
|
|
index d83a3f47e20074..184f2f96f6a547 100644
|
|
--- a/Documentation/admin-guide/kernel-parameters.txt
|
|
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
|
@@ -3287,6 +3287,11 @@
|
|
|
|
mga= [HW,DRM]
|
|
|
|
+ microcode.force_minrev= [X86]
|
|
+ Format: <bool>
|
|
+ Enable or disable the microcode minimal revision
|
|
+ enforcement for the runtime microcode loader.
|
|
+
|
|
min_addr=nn[KMG] [KNL,BOOT,IA-64] All physical memory below this
|
|
physical address is ignored.
|
|
|
|
diff --git a/Makefile b/Makefile
|
|
index 67c5799f259e2e..892ed237b1e1b6 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 6
|
|
PATCHLEVEL = 6
|
|
-SUBLEVEL = 80
|
|
+SUBLEVEL = 81
|
|
EXTRAVERSION =
|
|
NAME = Pinguïn Aangedreven
|
|
|
|
diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
|
|
index 5fcc5f32be2d79..2963d634baba99 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
|
|
+++ b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
|
|
@@ -367,6 +367,11 @@ &u2phy_host {
|
|
status = "okay";
|
|
};
|
|
|
|
+&uart5 {
|
|
+ /delete-property/ dmas;
|
|
+ /delete-property/ dma-names;
|
|
+};
|
|
+
|
|
/* Mule UCAN */
|
|
&usb_host0_ehci {
|
|
status = "okay";
|
|
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
|
|
index fc8130f995c1ee..6907c456ac8c05 100644
|
|
--- a/arch/riscv/include/asm/futex.h
|
|
+++ b/arch/riscv/include/asm/futex.h
|
|
@@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
|
|
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
|
|
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
|
|
- : [ov] "Jr" (oldval), [nv] "Jr" (newval)
|
|
+ : [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
|
|
: "memory");
|
|
__disable_user_access();
|
|
|
|
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
|
|
index 1ebf20dfbaa698..459e61ad7d2b68 100644
|
|
--- a/arch/riscv/include/asm/kvm_host.h
|
|
+++ b/arch/riscv/include/asm/kvm_host.h
|
|
@@ -236,8 +236,9 @@ struct kvm_vcpu_arch {
|
|
/* Cache pages needed to program page tables with spinlock held */
|
|
struct kvm_mmu_memory_cache mmu_page_cache;
|
|
|
|
- /* VCPU power-off state */
|
|
- bool power_off;
|
|
+ /* VCPU power state */
|
|
+ struct kvm_mp_state mp_state;
|
|
+ spinlock_t mp_state_lock;
|
|
|
|
/* Don't run the VCPU (blocked) */
|
|
bool pause;
|
|
@@ -351,7 +352,10 @@ int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
|
|
void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
|
|
void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
|
|
bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
|
|
+void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
|
|
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
|
|
+void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
|
|
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
|
|
+bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
|
|
|
|
#endif /* __RISCV_KVM_HOST_H__ */
|
|
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
|
|
index 88b6220b260879..048b9b23d7543e 100644
|
|
--- a/arch/riscv/kernel/signal.c
|
|
+++ b/arch/riscv/kernel/signal.c
|
|
@@ -211,12 +211,6 @@ static size_t get_rt_frame_size(bool cal_all)
|
|
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
|
|
total_context_size += riscv_v_sc_size;
|
|
}
|
|
- /*
|
|
- * Preserved a __riscv_ctx_hdr for END signal context header if an
|
|
- * extension uses __riscv_extra_ext_header
|
|
- */
|
|
- if (total_context_size)
|
|
- total_context_size += sizeof(struct __riscv_ctx_hdr);
|
|
|
|
frame_size += total_context_size;
|
|
|
|
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
|
|
index 82229db1ce73f3..9584d62c96ee74 100644
|
|
--- a/arch/riscv/kvm/vcpu.c
|
|
+++ b/arch/riscv/kvm/vcpu.c
|
|
@@ -100,6 +100,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
|
struct kvm_cpu_context *cntx;
|
|
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
|
|
|
|
+ spin_lock_init(&vcpu->arch.mp_state_lock);
|
|
+
|
|
/* Mark this VCPU never ran */
|
|
vcpu->arch.ran_atleast_once = false;
|
|
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
|
|
@@ -193,7 +195,7 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
|
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
|
{
|
|
return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
|
|
- !vcpu->arch.power_off && !vcpu->arch.pause);
|
|
+ !kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause);
|
|
}
|
|
|
|
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
|
@@ -421,26 +423,42 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
|
|
return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
|
|
}
|
|
|
|
-void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
|
|
+void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
|
|
{
|
|
- vcpu->arch.power_off = true;
|
|
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
|
|
kvm_make_request(KVM_REQ_SLEEP, vcpu);
|
|
kvm_vcpu_kick(vcpu);
|
|
}
|
|
|
|
-void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
|
|
+void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
|
|
{
|
|
- vcpu->arch.power_off = false;
|
|
+ spin_lock(&vcpu->arch.mp_state_lock);
|
|
+ __kvm_riscv_vcpu_power_off(vcpu);
|
|
+ spin_unlock(&vcpu->arch.mp_state_lock);
|
|
+}
|
|
+
|
|
+void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
|
|
kvm_vcpu_wake_up(vcpu);
|
|
}
|
|
|
|
+void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ spin_lock(&vcpu->arch.mp_state_lock);
|
|
+ __kvm_riscv_vcpu_power_on(vcpu);
|
|
+ spin_unlock(&vcpu->arch.mp_state_lock);
|
|
+}
|
|
+
|
|
+bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
|
|
+}
|
|
+
|
|
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
|
|
struct kvm_mp_state *mp_state)
|
|
{
|
|
- if (vcpu->arch.power_off)
|
|
- mp_state->mp_state = KVM_MP_STATE_STOPPED;
|
|
- else
|
|
- mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
|
|
+ *mp_state = READ_ONCE(vcpu->arch.mp_state);
|
|
|
|
return 0;
|
|
}
|
|
@@ -450,17 +468,21 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
|
|
{
|
|
int ret = 0;
|
|
|
|
+ spin_lock(&vcpu->arch.mp_state_lock);
|
|
+
|
|
switch (mp_state->mp_state) {
|
|
case KVM_MP_STATE_RUNNABLE:
|
|
- vcpu->arch.power_off = false;
|
|
+ WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
|
|
break;
|
|
case KVM_MP_STATE_STOPPED:
|
|
- kvm_riscv_vcpu_power_off(vcpu);
|
|
+ __kvm_riscv_vcpu_power_off(vcpu);
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
+ spin_unlock(&vcpu->arch.mp_state_lock);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -561,11 +583,11 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
|
|
if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
|
|
kvm_vcpu_srcu_read_unlock(vcpu);
|
|
rcuwait_wait_event(wait,
|
|
- (!vcpu->arch.power_off) && (!vcpu->arch.pause),
|
|
+ (!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
|
|
TASK_INTERRUPTIBLE);
|
|
kvm_vcpu_srcu_read_lock(vcpu);
|
|
|
|
- if (vcpu->arch.power_off || vcpu->arch.pause) {
|
|
+ if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) {
|
|
/*
|
|
* Awaken to handle a signal, request to
|
|
* sleep again later.
|
|
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
|
|
index 7a7fe40d0930be..be43278109f4e8 100644
|
|
--- a/arch/riscv/kvm/vcpu_sbi.c
|
|
+++ b/arch/riscv/kvm/vcpu_sbi.c
|
|
@@ -102,8 +102,11 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
|
|
unsigned long i;
|
|
struct kvm_vcpu *tmp;
|
|
|
|
- kvm_for_each_vcpu(i, tmp, vcpu->kvm)
|
|
- tmp->arch.power_off = true;
|
|
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
|
|
+ spin_lock(&vcpu->arch.mp_state_lock);
|
|
+ WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
|
|
+ spin_unlock(&vcpu->arch.mp_state_lock);
|
|
+ }
|
|
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
|
|
|
|
memset(&run->system_event, 0, sizeof(run->system_event));
|
|
diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
|
|
index 7dca0e9381d9a5..7e349b4ee926cb 100644
|
|
--- a/arch/riscv/kvm/vcpu_sbi_hsm.c
|
|
+++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
|
|
@@ -18,12 +18,18 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
|
|
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
|
struct kvm_vcpu *target_vcpu;
|
|
unsigned long target_vcpuid = cp->a0;
|
|
+ int ret = 0;
|
|
|
|
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
|
|
if (!target_vcpu)
|
|
return SBI_ERR_INVALID_PARAM;
|
|
- if (!target_vcpu->arch.power_off)
|
|
- return SBI_ERR_ALREADY_AVAILABLE;
|
|
+
|
|
+ spin_lock(&target_vcpu->arch.mp_state_lock);
|
|
+
|
|
+ if (!kvm_riscv_vcpu_stopped(target_vcpu)) {
|
|
+ ret = SBI_ERR_ALREADY_AVAILABLE;
|
|
+ goto out;
|
|
+ }
|
|
|
|
reset_cntx = &target_vcpu->arch.guest_reset_context;
|
|
/* start address */
|
|
@@ -34,19 +40,31 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
|
|
reset_cntx->a1 = cp->a2;
|
|
kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
|
|
|
|
- kvm_riscv_vcpu_power_on(target_vcpu);
|
|
+ __kvm_riscv_vcpu_power_on(target_vcpu);
|
|
|
|
- return 0;
|
|
+out:
|
|
+ spin_unlock(&target_vcpu->arch.mp_state_lock);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
|
|
{
|
|
- if (vcpu->arch.power_off)
|
|
- return SBI_ERR_FAILURE;
|
|
+ int ret = 0;
|
|
|
|
- kvm_riscv_vcpu_power_off(vcpu);
|
|
+ spin_lock(&vcpu->arch.mp_state_lock);
|
|
|
|
- return 0;
|
|
+ if (kvm_riscv_vcpu_stopped(vcpu)) {
|
|
+ ret = SBI_ERR_FAILURE;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ __kvm_riscv_vcpu_power_off(vcpu);
|
|
+
|
|
+out:
|
|
+ spin_unlock(&vcpu->arch.mp_state_lock);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
|
|
@@ -58,12 +76,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
|
|
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
|
|
if (!target_vcpu)
|
|
return SBI_ERR_INVALID_PARAM;
|
|
- if (!target_vcpu->arch.power_off)
|
|
- return SBI_HSM_STATE_STARTED;
|
|
- else if (vcpu->stat.generic.blocking)
|
|
+ if (kvm_riscv_vcpu_stopped(target_vcpu))
|
|
+ return SBI_HSM_STATE_STOPPED;
|
|
+ else if (target_vcpu->stat.generic.blocking)
|
|
return SBI_HSM_STATE_SUSPENDED;
|
|
else
|
|
- return SBI_HSM_STATE_STOPPED;
|
|
+ return SBI_HSM_STATE_STARTED;
|
|
}
|
|
|
|
static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|
@@ -71,14 +89,11 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|
{
|
|
int ret = 0;
|
|
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
|
- struct kvm *kvm = vcpu->kvm;
|
|
unsigned long funcid = cp->a6;
|
|
|
|
switch (funcid) {
|
|
case SBI_EXT_HSM_HART_START:
|
|
- mutex_lock(&kvm->lock);
|
|
ret = kvm_sbi_hsm_vcpu_start(vcpu);
|
|
- mutex_unlock(&kvm->lock);
|
|
break;
|
|
case SBI_EXT_HSM_HART_STOP:
|
|
ret = kvm_sbi_hsm_vcpu_stop(vcpu);
|
|
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
|
|
index 7c4d5d38a33908..87ec68ed52d762 100644
|
|
--- a/arch/riscv/kvm/vcpu_sbi_replace.c
|
|
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
|
|
@@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|
u64 next_cycle;
|
|
|
|
if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
|
|
- retdata->err_val = SBI_ERR_INVALID_PARAM;
|
|
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
|
|
return 0;
|
|
}
|
|
|
|
@@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
|
unsigned long hmask = cp->a0;
|
|
unsigned long hbase = cp->a1;
|
|
+ unsigned long hart_bit = 0, sentmask = 0;
|
|
|
|
if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
|
|
- retdata->err_val = SBI_ERR_INVALID_PARAM;
|
|
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
|
|
return 0;
|
|
}
|
|
|
|
@@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|
if (hbase != -1UL) {
|
|
if (tmp->vcpu_id < hbase)
|
|
continue;
|
|
- if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
|
|
+ hart_bit = tmp->vcpu_id - hbase;
|
|
+ if (hart_bit >= __riscv_xlen)
|
|
+ goto done;
|
|
+ if (!(hmask & (1UL << hart_bit)))
|
|
continue;
|
|
}
|
|
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
|
|
if (ret < 0)
|
|
break;
|
|
+ sentmask |= 1UL << hart_bit;
|
|
kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
|
|
}
|
|
|
|
+done:
|
|
+ if (hbase != -1UL && (hmask ^ sentmask))
|
|
+ retdata->err_val = SBI_ERR_INVALID_PARAM;
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
|
index 989d432b58345d..1e666454ebdc3e 100644
|
|
--- a/arch/x86/Kconfig
|
|
+++ b/arch/x86/Kconfig
|
|
@@ -1313,17 +1313,39 @@ config X86_REBOOTFIXUPS
|
|
config MICROCODE
|
|
def_bool y
|
|
depends on CPU_SUP_AMD || CPU_SUP_INTEL
|
|
+ select CRYPTO_LIB_SHA256 if CPU_SUP_AMD
|
|
|
|
config MICROCODE_LATE_LOADING
|
|
bool "Late microcode loading (DANGEROUS)"
|
|
default n
|
|
- depends on MICROCODE
|
|
+ depends on MICROCODE && SMP
|
|
help
|
|
Loading microcode late, when the system is up and executing instructions
|
|
is a tricky business and should be avoided if possible. Just the sequence
|
|
of synchronizing all cores and SMT threads is one fragile dance which does
|
|
not guarantee that cores might not softlock after the loading. Therefore,
|
|
- use this at your own risk. Late loading taints the kernel too.
|
|
+ use this at your own risk. Late loading taints the kernel unless the
|
|
+ microcode header indicates that it is safe for late loading via the
|
|
+ minimal revision check. This minimal revision check can be enforced on
|
|
+ the kernel command line with "microcode.minrev=Y".
|
|
+
|
|
+config MICROCODE_LATE_FORCE_MINREV
|
|
+ bool "Enforce late microcode loading minimal revision check"
|
|
+ default n
|
|
+ depends on MICROCODE_LATE_LOADING
|
|
+ help
|
|
+ To prevent that users load microcode late which modifies already
|
|
+ in use features, newer microcode patches have a minimum revision field
|
|
+ in the microcode header, which tells the kernel which minimum
|
|
+ revision must be active in the CPU to safely load that new microcode
|
|
+ late into the running system. If disabled the check will not
|
|
+ be enforced but the kernel will be tainted when the minimal
|
|
+ revision check fails.
|
|
+
|
|
+ This minimal revision check can also be controlled via the
|
|
+ "microcode.minrev" parameter on the kernel command line.
|
|
+
|
|
+ If unsure say Y.
|
|
|
|
config X86_MSR
|
|
tristate "/dev/cpu/*/msr - Model-specific register support"
|
|
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
|
|
index 150a365b4fbc89..1458ccaa6a0579 100644
|
|
--- a/arch/x86/events/core.c
|
|
+++ b/arch/x86/events/core.c
|
|
@@ -623,7 +623,7 @@ int x86_pmu_hw_config(struct perf_event *event)
|
|
if (event->attr.type == event->pmu->type)
|
|
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
|
|
|
|
- if (event->attr.sample_period && x86_pmu.limit_period) {
|
|
+ if (!event->attr.freq && x86_pmu.limit_period) {
|
|
s64 left = event->attr.sample_period;
|
|
x86_pmu.limit_period(event, &left);
|
|
if (left > event->attr.sample_period)
|
|
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
|
|
index 33aa0c31c21cf1..a2258c894244a8 100644
|
|
--- a/arch/x86/include/asm/apic.h
|
|
+++ b/arch/x86/include/asm/apic.h
|
|
@@ -277,7 +277,8 @@ struct apic {
|
|
|
|
u32 disable_esr : 1,
|
|
dest_mode_logical : 1,
|
|
- x2apic_set_max_apicid : 1;
|
|
+ x2apic_set_max_apicid : 1,
|
|
+ nmi_to_offline_cpu : 1;
|
|
|
|
u32 (*calc_dest_apicid)(unsigned int cpu);
|
|
|
|
@@ -543,6 +544,8 @@ extern bool default_check_apicid_used(physid_mask_t *map, int apicid);
|
|
extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap);
|
|
extern int default_cpu_present_to_apicid(int mps_cpu);
|
|
|
|
+void apic_send_nmi_to_offline_cpu(unsigned int cpu);
|
|
+
|
|
#else /* CONFIG_X86_LOCAL_APIC */
|
|
|
|
static inline unsigned int read_apic_id(void) { return 0; }
|
|
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
|
|
index 25050d953eee02..fecc4fe1d68aff 100644
|
|
--- a/arch/x86/include/asm/cpu.h
|
|
+++ b/arch/x86/include/asm/cpu.h
|
|
@@ -71,26 +71,12 @@ static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
|
|
|
|
extern __noendbr void cet_disable(void);
|
|
|
|
-struct ucode_cpu_info;
|
|
+struct cpu_signature;
|
|
|
|
-int intel_cpu_collect_info(struct ucode_cpu_info *uci);
|
|
-
|
|
-static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1,
|
|
- unsigned int s2, unsigned int p2)
|
|
-{
|
|
- if (s1 != s2)
|
|
- return false;
|
|
-
|
|
- /* Processor flags are either both 0 ... */
|
|
- if (!p1 && !p2)
|
|
- return true;
|
|
-
|
|
- /* ... or they intersect. */
|
|
- return p1 & p2;
|
|
-}
|
|
+void intel_collect_cpu_info(struct cpu_signature *sig);
|
|
|
|
extern u64 x86_read_arch_cap_msr(void);
|
|
-int intel_find_matching_signature(void *mc, unsigned int csig, int cpf);
|
|
+bool intel_find_matching_signature(void *mc, struct cpu_signature *sig);
|
|
int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type);
|
|
|
|
extern struct cpumask cpus_stop_mask;
|
|
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
|
|
index bbbe9d744977d0..1ab475a518e9a5 100644
|
|
--- a/arch/x86/include/asm/microcode.h
|
|
+++ b/arch/x86/include/asm/microcode.h
|
|
@@ -68,11 +68,19 @@ static inline u32 intel_get_microcode_revision(void)
|
|
|
|
return rev;
|
|
}
|
|
+#endif /* !CONFIG_CPU_SUP_INTEL */
|
|
|
|
-void show_ucode_info_early(void);
|
|
+bool microcode_nmi_handler(void);
|
|
+void microcode_offline_nmi_handler(void);
|
|
|
|
-#else /* CONFIG_CPU_SUP_INTEL */
|
|
-static inline void show_ucode_info_early(void) { }
|
|
-#endif /* !CONFIG_CPU_SUP_INTEL */
|
|
+#ifdef CONFIG_MICROCODE_LATE_LOADING
|
|
+DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
|
|
+static __always_inline bool microcode_nmi_handler_enabled(void)
|
|
+{
|
|
+ return static_branch_unlikely(µcode_nmi_handler_enable);
|
|
+}
|
|
+#else
|
|
+static __always_inline bool microcode_nmi_handler_enabled(void) { return false; }
|
|
+#endif
|
|
|
|
#endif /* _ASM_X86_MICROCODE_H */
|
|
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
|
|
index 032a84e2c3ccc7..cd16228611ce8f 100644
|
|
--- a/arch/x86/kernel/apic/apic_flat_64.c
|
|
+++ b/arch/x86/kernel/apic/apic_flat_64.c
|
|
@@ -103,6 +103,7 @@ static struct apic apic_flat __ro_after_init = {
|
|
.send_IPI_allbutself = default_send_IPI_allbutself,
|
|
.send_IPI_all = default_send_IPI_all,
|
|
.send_IPI_self = default_send_IPI_self,
|
|
+ .nmi_to_offline_cpu = true,
|
|
|
|
.read = native_apic_mem_read,
|
|
.write = native_apic_mem_write,
|
|
@@ -175,6 +176,7 @@ static struct apic apic_physflat __ro_after_init = {
|
|
.send_IPI_allbutself = default_send_IPI_allbutself,
|
|
.send_IPI_all = default_send_IPI_all,
|
|
.send_IPI_self = default_send_IPI_self,
|
|
+ .nmi_to_offline_cpu = true,
|
|
|
|
.read = native_apic_mem_read,
|
|
.write = native_apic_mem_write,
|
|
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
|
|
index a44ba7209ef3a8..edad86f32e38cb 100644
|
|
--- a/arch/x86/kernel/apic/ipi.c
|
|
+++ b/arch/x86/kernel/apic/ipi.c
|
|
@@ -97,6 +97,14 @@ void native_send_call_func_ipi(const struct cpumask *mask)
|
|
__apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
|
}
|
|
|
|
+void apic_send_nmi_to_offline_cpu(unsigned int cpu)
|
|
+{
|
|
+ if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu))
|
|
+ return;
|
|
+ if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask)))
|
|
+ return;
|
|
+ apic->send_IPI(cpu, NMI_VECTOR);
|
|
+}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static inline int __prepare_ICR2(unsigned int mask)
|
|
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
|
|
index affbff65e49713..a8306089c91bca 100644
|
|
--- a/arch/x86/kernel/apic/x2apic_cluster.c
|
|
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
|
|
@@ -251,6 +251,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
|
|
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
|
|
.send_IPI_all = x2apic_send_IPI_all,
|
|
.send_IPI_self = x2apic_send_IPI_self,
|
|
+ .nmi_to_offline_cpu = true,
|
|
|
|
.read = native_apic_msr_read,
|
|
.write = native_apic_msr_write,
|
|
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
|
|
index 788cdb4ee394da..c8ac1b12b8ac6c 100644
|
|
--- a/arch/x86/kernel/apic/x2apic_phys.c
|
|
+++ b/arch/x86/kernel/apic/x2apic_phys.c
|
|
@@ -166,6 +166,7 @@ static struct apic apic_x2apic_phys __ro_after_init = {
|
|
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
|
|
.send_IPI_all = x2apic_send_IPI_all,
|
|
.send_IPI_self = x2apic_send_IPI_self,
|
|
+ .nmi_to_offline_cpu = true,
|
|
|
|
.read = native_apic_msr_read,
|
|
.write = native_apic_msr_write,
|
|
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
|
index 8bc90a501e7b80..a844110691f978 100644
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -2224,8 +2224,6 @@ static inline void setup_getcpu(int cpu)
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
-static inline void ucode_cpu_init(int cpu) { }
|
|
-
|
|
static inline void tss_setup_ist(struct tss_struct *tss)
|
|
{
|
|
/* Set up the per-CPU TSS IST stacks */
|
|
@@ -2236,16 +2234,8 @@ static inline void tss_setup_ist(struct tss_struct *tss)
|
|
/* Only mapped when SEV-ES is active */
|
|
tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
|
|
}
|
|
-
|
|
#else /* CONFIG_X86_64 */
|
|
-
|
|
-static inline void ucode_cpu_init(int cpu)
|
|
-{
|
|
- show_ucode_info_early();
|
|
-}
|
|
-
|
|
static inline void tss_setup_ist(struct tss_struct *tss) { }
|
|
-
|
|
#endif /* !CONFIG_X86_64 */
|
|
|
|
static inline void tss_setup_io_bitmap(struct tss_struct *tss)
|
|
@@ -2301,8 +2291,6 @@ void cpu_init(void)
|
|
struct task_struct *cur = current;
|
|
int cpu = raw_smp_processor_id();
|
|
|
|
- ucode_cpu_init(cpu);
|
|
-
|
|
#ifdef CONFIG_NUMA
|
|
if (this_cpu_read(numa_node) == 0 &&
|
|
early_cpu_to_node(cpu) != NUMA_NO_NODE)
|
|
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
|
|
index 9651275aecd1bb..dfec2c61e3547d 100644
|
|
--- a/arch/x86/kernel/cpu/cyrix.c
|
|
+++ b/arch/x86/kernel/cpu/cyrix.c
|
|
@@ -153,8 +153,8 @@ static void geode_configure(void)
|
|
u8 ccr3;
|
|
local_irq_save(flags);
|
|
|
|
- /* Suspend on halt power saving and enable #SUSP pin */
|
|
- setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
|
|
+ /* Suspend on halt power saving */
|
|
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08);
|
|
|
|
ccr3 = getCx86(CX86_CCR3);
|
|
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
|
|
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
|
|
index bbd1dc38ea0316..13a632da09ed7b 100644
|
|
--- a/arch/x86/kernel/cpu/microcode/amd.c
|
|
+++ b/arch/x86/kernel/cpu/microcode/amd.c
|
|
@@ -23,20 +23,35 @@
|
|
|
|
#include <linux/earlycpio.h>
|
|
#include <linux/firmware.h>
|
|
+#include <linux/bsearch.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/initrd.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/pci.h>
|
|
|
|
+#include <crypto/sha2.h>
|
|
+
|
|
#include <asm/microcode.h>
|
|
#include <asm/processor.h>
|
|
+#include <asm/cmdline.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/cpu.h>
|
|
#include <asm/msr.h>
|
|
+#include <asm/tlb.h>
|
|
|
|
#include "internal.h"
|
|
|
|
+struct ucode_patch {
|
|
+ struct list_head plist;
|
|
+ void *data;
|
|
+ unsigned int size;
|
|
+ u32 patch_id;
|
|
+ u16 equiv_cpu;
|
|
+};
|
|
+
|
|
+static LIST_HEAD(microcode_cache);
|
|
+
|
|
#define UCODE_MAGIC 0x00414d44
|
|
#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
|
|
#define UCODE_UCODE_TYPE 0x00000001
|
|
@@ -81,6 +96,31 @@ static struct equiv_cpu_table {
|
|
struct equiv_cpu_entry *entry;
|
|
} equiv_table;
|
|
|
|
+union zen_patch_rev {
|
|
+ struct {
|
|
+ __u32 rev : 8,
|
|
+ stepping : 4,
|
|
+ model : 4,
|
|
+ __reserved : 4,
|
|
+ ext_model : 4,
|
|
+ ext_fam : 8;
|
|
+ };
|
|
+ __u32 ucode_rev;
|
|
+};
|
|
+
|
|
+union cpuid_1_eax {
|
|
+ struct {
|
|
+ __u32 stepping : 4,
|
|
+ model : 4,
|
|
+ family : 4,
|
|
+ __reserved0 : 4,
|
|
+ ext_model : 4,
|
|
+ ext_fam : 8,
|
|
+ __reserved1 : 4;
|
|
+ };
|
|
+ __u32 full;
|
|
+};
|
|
+
|
|
/*
|
|
* This points to the current valid container of microcode patches which we will
|
|
* save from the initrd/builtin before jettisoning its contents. @mc is the
|
|
@@ -88,14 +128,11 @@ static struct equiv_cpu_table {
|
|
*/
|
|
struct cont_desc {
|
|
struct microcode_amd *mc;
|
|
- u32 cpuid_1_eax;
|
|
u32 psize;
|
|
u8 *data;
|
|
size_t size;
|
|
};
|
|
|
|
-static u32 ucode_new_rev;
|
|
-
|
|
/*
|
|
* Microcode patch container file is prepended to the initrd in cpio
|
|
* format. See Documentation/arch/x86/microcode.rst
|
|
@@ -103,10 +140,143 @@ static u32 ucode_new_rev;
|
|
static const char
|
|
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
|
|
|
|
+/*
|
|
+ * This is CPUID(1).EAX on the BSP. It is used in two ways:
|
|
+ *
|
|
+ * 1. To ignore the equivalence table on Zen1 and newer.
|
|
+ *
|
|
+ * 2. To match which patches to load because the patch revision ID
|
|
+ * already contains the f/m/s for which the microcode is destined
|
|
+ * for.
|
|
+ */
|
|
+static u32 bsp_cpuid_1_eax __ro_after_init;
|
|
+
|
|
+static bool sha_check = true;
|
|
+
|
|
+struct patch_digest {
|
|
+ u32 patch_id;
|
|
+ u8 sha256[SHA256_DIGEST_SIZE];
|
|
+};
|
|
+
|
|
+#include "amd_shas.c"
|
|
+
|
|
+static int cmp_id(const void *key, const void *elem)
|
|
+{
|
|
+ struct patch_digest *pd = (struct patch_digest *)elem;
|
|
+ u32 patch_id = *(u32 *)key;
|
|
+
|
|
+ if (patch_id == pd->patch_id)
|
|
+ return 0;
|
|
+ else if (patch_id < pd->patch_id)
|
|
+ return -1;
|
|
+ else
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static bool need_sha_check(u32 cur_rev)
|
|
+{
|
|
+ switch (cur_rev >> 8) {
|
|
+ case 0x80012: return cur_rev <= 0x800126f; break;
|
|
+ case 0x83010: return cur_rev <= 0x830107c; break;
|
|
+ case 0x86001: return cur_rev <= 0x860010e; break;
|
|
+ case 0x86081: return cur_rev <= 0x8608108; break;
|
|
+ case 0x87010: return cur_rev <= 0x8701034; break;
|
|
+ case 0x8a000: return cur_rev <= 0x8a0000a; break;
|
|
+ case 0xa0011: return cur_rev <= 0xa0011da; break;
|
|
+ case 0xa0012: return cur_rev <= 0xa001243; break;
|
|
+ case 0xa1011: return cur_rev <= 0xa101153; break;
|
|
+ case 0xa1012: return cur_rev <= 0xa10124e; break;
|
|
+ case 0xa1081: return cur_rev <= 0xa108109; break;
|
|
+ case 0xa2010: return cur_rev <= 0xa20102f; break;
|
|
+ case 0xa2012: return cur_rev <= 0xa201212; break;
|
|
+ case 0xa6012: return cur_rev <= 0xa60120a; break;
|
|
+ case 0xa7041: return cur_rev <= 0xa704109; break;
|
|
+ case 0xa7052: return cur_rev <= 0xa705208; break;
|
|
+ case 0xa7080: return cur_rev <= 0xa708009; break;
|
|
+ case 0xa70c0: return cur_rev <= 0xa70C009; break;
|
|
+ case 0xaa002: return cur_rev <= 0xaa00218; break;
|
|
+ default: break;
|
|
+ }
|
|
+
|
|
+ pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
|
|
+ pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
|
|
+{
|
|
+ struct patch_digest *pd = NULL;
|
|
+ u8 digest[SHA256_DIGEST_SIZE];
|
|
+ struct sha256_state s;
|
|
+ int i;
|
|
+
|
|
+ if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
|
|
+ x86_family(bsp_cpuid_1_eax) > 0x19)
|
|
+ return true;
|
|
+
|
|
+ if (!need_sha_check(cur_rev))
|
|
+ return true;
|
|
+
|
|
+ if (!sha_check)
|
|
+ return true;
|
|
+
|
|
+ pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
|
|
+ if (!pd) {
|
|
+ pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ sha256_init(&s);
|
|
+ sha256_update(&s, data, len);
|
|
+ sha256_final(&s, digest);
|
|
+
|
|
+ if (memcmp(digest, pd->sha256, sizeof(digest))) {
|
|
+ pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
|
|
+
|
|
+ for (i = 0; i < SHA256_DIGEST_SIZE; i++)
|
|
+ pr_cont("0x%x ", digest[i]);
|
|
+ pr_info("\n");
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static u32 get_patch_level(void)
|
|
+{
|
|
+ u32 rev, dummy __always_unused;
|
|
+
|
|
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
+
|
|
+ return rev;
|
|
+}
|
|
+
|
|
+static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
|
|
+{
|
|
+ union zen_patch_rev p;
|
|
+ union cpuid_1_eax c;
|
|
+
|
|
+ p.ucode_rev = val;
|
|
+ c.full = 0;
|
|
+
|
|
+ c.stepping = p.stepping;
|
|
+ c.model = p.model;
|
|
+ c.ext_model = p.ext_model;
|
|
+ c.family = 0xf;
|
|
+ c.ext_fam = p.ext_fam;
|
|
+
|
|
+ return c;
|
|
+}
|
|
+
|
|
static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
|
|
{
|
|
unsigned int i;
|
|
|
|
+ /* Zen and newer do not need an equivalence table. */
|
|
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17)
|
|
+ return 0;
|
|
+
|
|
if (!et || !et->num_entries)
|
|
return 0;
|
|
|
|
@@ -121,24 +291,20 @@ static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
|
|
|
|
/*
|
|
* Check whether there is a valid microcode container file at the beginning
|
|
- * of @buf of size @buf_size. Set @early to use this function in the early path.
|
|
+ * of @buf of size @buf_size.
|
|
*/
|
|
-static bool verify_container(const u8 *buf, size_t buf_size, bool early)
|
|
+static bool verify_container(const u8 *buf, size_t buf_size)
|
|
{
|
|
u32 cont_magic;
|
|
|
|
if (buf_size <= CONTAINER_HDR_SZ) {
|
|
- if (!early)
|
|
- pr_debug("Truncated microcode container header.\n");
|
|
-
|
|
+ pr_debug("Truncated microcode container header.\n");
|
|
return false;
|
|
}
|
|
|
|
cont_magic = *(const u32 *)buf;
|
|
if (cont_magic != UCODE_MAGIC) {
|
|
- if (!early)
|
|
- pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
|
|
-
|
|
+ pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
|
|
return false;
|
|
}
|
|
|
|
@@ -147,23 +313,24 @@ static bool verify_container(const u8 *buf, size_t buf_size, bool early)
|
|
|
|
/*
|
|
* Check whether there is a valid, non-truncated CPU equivalence table at the
|
|
- * beginning of @buf of size @buf_size. Set @early to use this function in the
|
|
- * early path.
|
|
+ * beginning of @buf of size @buf_size.
|
|
*/
|
|
-static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
|
|
+static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
|
|
{
|
|
const u32 *hdr = (const u32 *)buf;
|
|
u32 cont_type, equiv_tbl_len;
|
|
|
|
- if (!verify_container(buf, buf_size, early))
|
|
+ if (!verify_container(buf, buf_size))
|
|
return false;
|
|
|
|
+ /* Zen and newer do not need an equivalence table. */
|
|
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17)
|
|
+ return true;
|
|
+
|
|
cont_type = hdr[1];
|
|
if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
|
|
- if (!early)
|
|
- pr_debug("Wrong microcode container equivalence table type: %u.\n",
|
|
- cont_type);
|
|
-
|
|
+ pr_debug("Wrong microcode container equivalence table type: %u.\n",
|
|
+ cont_type);
|
|
return false;
|
|
}
|
|
|
|
@@ -172,9 +339,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
|
|
equiv_tbl_len = hdr[2];
|
|
if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
|
|
buf_size < equiv_tbl_len) {
|
|
- if (!early)
|
|
- pr_debug("Truncated equivalence table.\n");
|
|
-
|
|
+ pr_debug("Truncated equivalence table.\n");
|
|
return false;
|
|
}
|
|
|
|
@@ -183,22 +348,19 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
|
|
|
|
/*
|
|
* Check whether there is a valid, non-truncated microcode patch section at the
|
|
- * beginning of @buf of size @buf_size. Set @early to use this function in the
|
|
- * early path.
|
|
+ * beginning of @buf of size @buf_size.
|
|
*
|
|
* On success, @sh_psize returns the patch size according to the section header,
|
|
* to the caller.
|
|
*/
|
|
static bool
|
|
-__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early)
|
|
+__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
|
|
{
|
|
u32 p_type, p_size;
|
|
const u32 *hdr;
|
|
|
|
if (buf_size < SECTION_HDR_SIZE) {
|
|
- if (!early)
|
|
- pr_debug("Truncated patch section.\n");
|
|
-
|
|
+ pr_debug("Truncated patch section.\n");
|
|
return false;
|
|
}
|
|
|
|
@@ -207,17 +369,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early
|
|
p_size = hdr[1];
|
|
|
|
if (p_type != UCODE_UCODE_TYPE) {
|
|
- if (!early)
|
|
- pr_debug("Invalid type field (0x%x) in container file section header.\n",
|
|
- p_type);
|
|
-
|
|
+ pr_debug("Invalid type field (0x%x) in container file section header.\n",
|
|
+ p_type);
|
|
return false;
|
|
}
|
|
|
|
if (p_size < sizeof(struct microcode_header_amd)) {
|
|
- if (!early)
|
|
- pr_debug("Patch of size %u too short.\n", p_size);
|
|
-
|
|
+ pr_debug("Patch of size %u too short.\n", p_size);
|
|
return false;
|
|
}
|
|
|
|
@@ -232,12 +390,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early
|
|
* exceed the per-family maximum). @sh_psize is the size read from the section
|
|
* header.
|
|
*/
|
|
-static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size)
|
|
+static bool __verify_patch_size(u32 sh_psize, size_t buf_size)
|
|
{
|
|
+ u8 family = x86_family(bsp_cpuid_1_eax);
|
|
u32 max_size;
|
|
|
|
if (family >= 0x15)
|
|
- return min_t(u32, sh_psize, buf_size);
|
|
+ goto ret;
|
|
|
|
#define F1XH_MPB_MAX_SIZE 2048
|
|
#define F14H_MPB_MAX_SIZE 1824
|
|
@@ -251,13 +410,15 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size
|
|
break;
|
|
default:
|
|
WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
|
|
- return 0;
|
|
+ return false;
|
|
}
|
|
|
|
- if (sh_psize > min_t(u32, buf_size, max_size))
|
|
- return 0;
|
|
+ if (sh_psize > max_size)
|
|
+ return false;
|
|
|
|
- return sh_psize;
|
|
+ret:
|
|
+ /* Working with the whole buffer so < is ok. */
|
|
+ return sh_psize <= buf_size;
|
|
}
|
|
|
|
/*
|
|
@@ -268,16 +429,15 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size
|
|
* positive: patch is not for this family, skip it
|
|
* 0: success
|
|
*/
|
|
-static int
|
|
-verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early)
|
|
+static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size)
|
|
{
|
|
+ u8 family = x86_family(bsp_cpuid_1_eax);
|
|
struct microcode_header_amd *mc_hdr;
|
|
- unsigned int ret;
|
|
u32 sh_psize;
|
|
u16 proc_id;
|
|
u8 patch_fam;
|
|
|
|
- if (!__verify_patch_section(buf, buf_size, &sh_psize, early))
|
|
+ if (!__verify_patch_section(buf, buf_size, &sh_psize))
|
|
return -1;
|
|
|
|
/*
|
|
@@ -292,16 +452,12 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea
|
|
* size sh_psize, as the section claims.
|
|
*/
|
|
if (buf_size < sh_psize) {
|
|
- if (!early)
|
|
- pr_debug("Patch of size %u truncated.\n", sh_psize);
|
|
-
|
|
+ pr_debug("Patch of size %u truncated.\n", sh_psize);
|
|
return -1;
|
|
}
|
|
|
|
- ret = __verify_patch_size(family, sh_psize, buf_size);
|
|
- if (!ret) {
|
|
- if (!early)
|
|
- pr_debug("Per-family patch size mismatch.\n");
|
|
+ if (!__verify_patch_size(sh_psize, buf_size)) {
|
|
+ pr_debug("Per-family patch size mismatch.\n");
|
|
return -1;
|
|
}
|
|
|
|
@@ -309,8 +465,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea
|
|
|
|
mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
|
|
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
|
|
- if (!early)
|
|
- pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
|
|
+ pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
|
|
return -1;
|
|
}
|
|
|
|
@@ -322,6 +477,15 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea
|
|
return 0;
|
|
}
|
|
|
|
+static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id)
|
|
+{
|
|
+ /* Zen and newer do not need an equivalence table. */
|
|
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17)
|
|
+ return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax;
|
|
+ else
|
|
+ return eq_id == mc->hdr.processor_rev_id;
|
|
+}
|
|
+
|
|
/*
|
|
* This scans the ucode blob for the proper container as we can have multiple
|
|
* containers glued together. Returns the equivalence ID from the equivalence
|
|
@@ -337,7 +501,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
|
|
u16 eq_id;
|
|
u8 *buf;
|
|
|
|
- if (!verify_equivalence_table(ucode, size, true))
|
|
+ if (!verify_equivalence_table(ucode, size))
|
|
return 0;
|
|
|
|
buf = ucode;
|
|
@@ -350,7 +514,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
|
|
* doesn't contain a patch for the CPU, scan through the whole container
|
|
* so that it can be skipped in case there are other containers appended.
|
|
*/
|
|
- eq_id = find_equiv_id(&table, desc->cpuid_1_eax);
|
|
+ eq_id = find_equiv_id(&table, bsp_cpuid_1_eax);
|
|
|
|
buf += hdr[2] + CONTAINER_HDR_SZ;
|
|
size -= hdr[2] + CONTAINER_HDR_SZ;
|
|
@@ -364,7 +528,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
|
|
u32 patch_size;
|
|
int ret;
|
|
|
|
- ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true);
|
|
+ ret = verify_patch(buf, size, &patch_size);
|
|
if (ret < 0) {
|
|
/*
|
|
* Patch verification failed, skip to the next container, if
|
|
@@ -377,7 +541,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
|
|
}
|
|
|
|
mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
|
|
- if (eq_id == mc->hdr.processor_rev_id) {
|
|
+ if (mc_patch_matches(mc, eq_id)) {
|
|
desc->psize = patch_size;
|
|
desc->mc = mc;
|
|
}
|
|
@@ -427,73 +591,42 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
|
|
}
|
|
}
|
|
|
|
-static int __apply_microcode_amd(struct microcode_amd *mc)
|
|
+static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
|
|
+ unsigned int psize)
|
|
{
|
|
- u32 rev, dummy;
|
|
+ unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
|
|
|
|
- native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
|
|
-
|
|
- /* verify patch application was successful */
|
|
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
- if (rev != mc->hdr.patch_id)
|
|
+ if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
|
|
return -1;
|
|
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * Early load occurs before we can vmalloc(). So we look for the microcode
|
|
- * patch container file in initrd, traverse equivalent cpu table, look for a
|
|
- * matching microcode patch, and update, all in initrd memory in place.
|
|
- * When vmalloc() is available for use later -- on 64-bit during first AP load,
|
|
- * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
|
|
- * load_microcode_amd() to save equivalent cpu table and microcode patches in
|
|
- * kernel heap memory.
|
|
- *
|
|
- * Returns true if container found (sets @desc), false otherwise.
|
|
- */
|
|
-static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
|
|
-{
|
|
- struct cont_desc desc = { 0 };
|
|
- struct microcode_amd *mc;
|
|
- u32 rev, dummy, *new_rev;
|
|
- bool ret = false;
|
|
-
|
|
-#ifdef CONFIG_X86_32
|
|
- new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
|
|
-#else
|
|
- new_rev = &ucode_new_rev;
|
|
-#endif
|
|
-
|
|
- desc.cpuid_1_eax = cpuid_1_eax;
|
|
-
|
|
- scan_containers(ucode, size, &desc);
|
|
-
|
|
- mc = desc.mc;
|
|
- if (!mc)
|
|
- return ret;
|
|
+ native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
|
|
|
|
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
+ if (x86_family(bsp_cpuid_1_eax) == 0x17) {
|
|
+ unsigned long p_addr_end = p_addr + psize - 1;
|
|
|
|
- /*
|
|
- * Allow application of the same revision to pick up SMT-specific
|
|
- * changes even if the revision of the other SMT thread is already
|
|
- * up-to-date.
|
|
- */
|
|
- if (rev > mc->hdr.patch_id)
|
|
- return ret;
|
|
+ invlpg(p_addr);
|
|
|
|
- if (!__apply_microcode_amd(mc)) {
|
|
- *new_rev = mc->hdr.patch_id;
|
|
- ret = true;
|
|
+ /*
|
|
+ * Flush next page too if patch image is crossing a page
|
|
+ * boundary.
|
|
+ */
|
|
+ if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
|
|
+ invlpg(p_addr_end);
|
|
}
|
|
|
|
- return ret;
|
|
+ /* verify patch application was successful */
|
|
+ *cur_rev = get_patch_level();
|
|
+ if (*cur_rev != mc->hdr.patch_id)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
}
|
|
|
|
-static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
|
|
+
|
|
+static bool get_builtin_microcode(struct cpio_data *cp)
|
|
{
|
|
char fw_name[36] = "amd-ucode/microcode_amd.bin";
|
|
+ u8 family = x86_family(bsp_cpuid_1_eax);
|
|
struct firmware fw;
|
|
|
|
if (IS_ENABLED(CONFIG_X86_32))
|
|
@@ -512,93 +645,144 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
|
|
return false;
|
|
}
|
|
|
|
-static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret)
|
|
+static bool __init find_blobs_in_containers(struct cpio_data *ret)
|
|
{
|
|
- struct ucode_cpu_info *uci;
|
|
struct cpio_data cp;
|
|
- const char *path;
|
|
- bool use_pa;
|
|
-
|
|
- if (IS_ENABLED(CONFIG_X86_32)) {
|
|
- uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
|
|
- path = (const char *)__pa_nodebug(ucode_path);
|
|
- use_pa = true;
|
|
- } else {
|
|
- uci = ucode_cpu_info;
|
|
- path = ucode_path;
|
|
- use_pa = false;
|
|
- }
|
|
+ bool found;
|
|
|
|
- if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
|
|
- cp = find_microcode_in_initrd(path, use_pa);
|
|
+ if (!get_builtin_microcode(&cp))
|
|
+ cp = find_microcode_in_initrd(ucode_path);
|
|
|
|
- /* Needed in load_microcode_amd() */
|
|
- uci->cpu_sig.sig = cpuid_1_eax;
|
|
+ found = cp.data && cp.size;
|
|
+ if (found)
|
|
+ *ret = cp;
|
|
|
|
- *ret = cp;
|
|
+ return found;
|
|
}
|
|
|
|
-static void apply_ucode_from_containers(unsigned int cpuid_1_eax)
|
|
+/*
|
|
+ * Early load occurs before we can vmalloc(). So we look for the microcode
|
|
+ * patch container file in initrd, traverse equivalent cpu table, look for a
|
|
+ * matching microcode patch, and update, all in initrd memory in place.
|
|
+ * When vmalloc() is available for use later -- on 64-bit during first AP load,
|
|
+ * and on 32-bit during save_microcode_in_initrd() -- we can call
|
|
+ * load_microcode_amd() to save equivalent cpu table and microcode patches in
|
|
+ * kernel heap memory.
|
|
+ */
|
|
+void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
|
|
{
|
|
+ struct cont_desc desc = { };
|
|
+ struct microcode_amd *mc;
|
|
struct cpio_data cp = { };
|
|
+ char buf[4];
|
|
+ u32 rev;
|
|
+
|
|
+ if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
|
|
+ if (!strncmp(buf, "off", 3)) {
|
|
+ sha_check = false;
|
|
+ pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
|
|
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
|
+ }
|
|
+ }
|
|
|
|
- find_blobs_in_containers(cpuid_1_eax, &cp);
|
|
- if (!(cp.data && cp.size))
|
|
- return;
|
|
+ bsp_cpuid_1_eax = cpuid_1_eax;
|
|
|
|
- early_apply_microcode(cpuid_1_eax, cp.data, cp.size);
|
|
-}
|
|
+ rev = get_patch_level();
|
|
+ ed->old_rev = rev;
|
|
|
|
-void load_ucode_amd_early(unsigned int cpuid_1_eax)
|
|
-{
|
|
- return apply_ucode_from_containers(cpuid_1_eax);
|
|
-}
|
|
+ /* Needed in load_microcode_amd() */
|
|
+ ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
|
|
|
|
-static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
|
|
+ if (!find_blobs_in_containers(&cp))
|
|
+ return;
|
|
|
|
-int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
|
-{
|
|
- struct cont_desc desc = { 0 };
|
|
- enum ucode_state ret;
|
|
- struct cpio_data cp;
|
|
+ scan_containers(cp.data, cp.size, &desc);
|
|
|
|
- cp = find_microcode_in_initrd(ucode_path, false);
|
|
- if (!(cp.data && cp.size))
|
|
- return -EINVAL;
|
|
+ mc = desc.mc;
|
|
+ if (!mc)
|
|
+ return;
|
|
|
|
- desc.cpuid_1_eax = cpuid_1_eax;
|
|
+ /*
|
|
+ * Allow application of the same revision to pick up SMT-specific
|
|
+ * changes even if the revision of the other SMT thread is already
|
|
+ * up-to-date.
|
|
+ */
|
|
+ if (ed->old_rev > mc->hdr.patch_id)
|
|
+ return;
|
|
|
|
- scan_containers(cp.data, cp.size, &desc);
|
|
- if (!desc.mc)
|
|
- return -EINVAL;
|
|
+ if (__apply_microcode_amd(mc, &rev, desc.psize))
|
|
+ ed->new_rev = rev;
|
|
+}
|
|
|
|
- ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
|
|
- if (ret > UCODE_UPDATED)
|
|
- return -EINVAL;
|
|
+static inline bool patch_cpus_equivalent(struct ucode_patch *p,
|
|
+ struct ucode_patch *n,
|
|
+ bool ignore_stepping)
|
|
+{
|
|
+ /* Zen and newer hardcode the f/m/s in the patch ID */
|
|
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
|
|
+ union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
|
|
+ union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
|
|
+
|
|
+ if (ignore_stepping) {
|
|
+ p_cid.stepping = 0;
|
|
+ n_cid.stepping = 0;
|
|
+ }
|
|
|
|
- return 0;
|
|
+ return p_cid.full == n_cid.full;
|
|
+ } else {
|
|
+ return p->equiv_cpu == n->equiv_cpu;
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
* a small, trivial cache of per-family ucode patches
|
|
*/
|
|
-static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
|
|
+static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu)
|
|
{
|
|
struct ucode_patch *p;
|
|
+ struct ucode_patch n;
|
|
+
|
|
+ n.equiv_cpu = equiv_cpu;
|
|
+ n.patch_id = uci->cpu_sig.rev;
|
|
+
|
|
+ WARN_ON_ONCE(!n.patch_id);
|
|
|
|
list_for_each_entry(p, µcode_cache, plist)
|
|
- if (p->equiv_cpu == equiv_cpu)
|
|
+ if (patch_cpus_equivalent(p, &n, false))
|
|
return p;
|
|
+
|
|
return NULL;
|
|
}
|
|
|
|
+static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
|
|
+{
|
|
+ /* Zen and newer hardcode the f/m/s in the patch ID */
|
|
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
|
|
+ union zen_patch_rev zp, zn;
|
|
+
|
|
+ zp.ucode_rev = p->patch_id;
|
|
+ zn.ucode_rev = n->patch_id;
|
|
+
|
|
+ if (zn.stepping != zp.stepping)
|
|
+ return -1;
|
|
+
|
|
+ return zn.rev > zp.rev;
|
|
+ } else {
|
|
+ return n->patch_id > p->patch_id;
|
|
+ }
|
|
+}
|
|
+
|
|
static void update_cache(struct ucode_patch *new_patch)
|
|
{
|
|
struct ucode_patch *p;
|
|
+ int ret;
|
|
|
|
list_for_each_entry(p, µcode_cache, plist) {
|
|
- if (p->equiv_cpu == new_patch->equiv_cpu) {
|
|
- if (p->patch_id >= new_patch->patch_id) {
|
|
+ if (patch_cpus_equivalent(p, new_patch, true)) {
|
|
+ ret = patch_newer(p, new_patch);
|
|
+ if (ret < 0)
|
|
+ continue;
|
|
+ else if (!ret) {
|
|
/* we already have the latest patch */
|
|
kfree(new_patch->data);
|
|
kfree(new_patch);
|
|
@@ -629,14 +813,17 @@ static void free_cache(void)
|
|
static struct ucode_patch *find_patch(unsigned int cpu)
|
|
{
|
|
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
|
- u16 equiv_id;
|
|
+ u16 equiv_id = 0;
|
|
|
|
+ uci->cpu_sig.rev = get_patch_level();
|
|
|
|
- equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
|
|
- if (!equiv_id)
|
|
- return NULL;
|
|
+ if (x86_family(bsp_cpuid_1_eax) < 0x17) {
|
|
+ equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
|
|
+ if (!equiv_id)
|
|
+ return NULL;
|
|
+ }
|
|
|
|
- return cache_find_patch(equiv_id);
|
|
+ return cache_find_patch(uci, equiv_id);
|
|
}
|
|
|
|
void reload_ucode_amd(unsigned int cpu)
|
|
@@ -651,24 +838,20 @@ void reload_ucode_amd(unsigned int cpu)
|
|
|
|
mc = p->data;
|
|
|
|
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
-
|
|
+ rev = get_patch_level();
|
|
if (rev < mc->hdr.patch_id) {
|
|
- if (!__apply_microcode_amd(mc)) {
|
|
- ucode_new_rev = mc->hdr.patch_id;
|
|
- pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
|
|
- }
|
|
+ if (__apply_microcode_amd(mc, &rev, p->size))
|
|
+ pr_info_once("reload revision: 0x%08x\n", rev);
|
|
}
|
|
}
|
|
|
|
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
|
|
{
|
|
- struct cpuinfo_x86 *c = &cpu_data(cpu);
|
|
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
|
struct ucode_patch *p;
|
|
|
|
csig->sig = cpuid_eax(0x00000001);
|
|
- csig->rev = c->microcode;
|
|
+ csig->rev = get_patch_level();
|
|
|
|
/*
|
|
* a patch could have been loaded early, set uci->mc so that
|
|
@@ -678,8 +861,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
|
|
if (p && (p->patch_id == csig->rev))
|
|
uci->mc = p->data;
|
|
|
|
- pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
@@ -690,7 +871,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
|
struct ucode_cpu_info *uci;
|
|
struct ucode_patch *p;
|
|
enum ucode_state ret;
|
|
- u32 rev, dummy __always_unused;
|
|
+ u32 rev;
|
|
|
|
BUG_ON(raw_smp_processor_id() != cpu);
|
|
|
|
@@ -700,18 +881,18 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
|
if (!p)
|
|
return UCODE_NFOUND;
|
|
|
|
+ rev = uci->cpu_sig.rev;
|
|
+
|
|
mc_amd = p->data;
|
|
uci->mc = p->data;
|
|
|
|
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
|
-
|
|
/* need to apply patch? */
|
|
if (rev > mc_amd->hdr.patch_id) {
|
|
ret = UCODE_OK;
|
|
goto out;
|
|
}
|
|
|
|
- if (__apply_microcode_amd(mc_amd)) {
|
|
+ if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
|
|
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
|
|
cpu, mc_amd->hdr.patch_id);
|
|
return UCODE_ERROR;
|
|
@@ -720,8 +901,6 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
|
rev = mc_amd->hdr.patch_id;
|
|
ret = UCODE_UPDATED;
|
|
|
|
- pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
|
|
-
|
|
out:
|
|
uci->cpu_sig.rev = rev;
|
|
c->microcode = rev;
|
|
@@ -733,17 +912,29 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
|
return ret;
|
|
}
|
|
|
|
+void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
|
+{
|
|
+ unsigned int cpu = smp_processor_id();
|
|
+
|
|
+ ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
|
|
+ apply_microcode_amd(cpu);
|
|
+}
|
|
+
|
|
static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
|
|
{
|
|
u32 equiv_tbl_len;
|
|
const u32 *hdr;
|
|
|
|
- if (!verify_equivalence_table(buf, buf_size, false))
|
|
+ if (!verify_equivalence_table(buf, buf_size))
|
|
return 0;
|
|
|
|
hdr = (const u32 *)buf;
|
|
equiv_tbl_len = hdr[2];
|
|
|
|
+ /* Zen and newer do not need an equivalence table. */
|
|
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17)
|
|
+ goto out;
|
|
+
|
|
equiv_table.entry = vmalloc(equiv_tbl_len);
|
|
if (!equiv_table.entry) {
|
|
pr_err("failed to allocate equivalent CPU table\n");
|
|
@@ -753,12 +944,16 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
|
|
memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
|
|
equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
|
|
|
|
+out:
|
|
/* add header length */
|
|
return equiv_tbl_len + CONTAINER_HDR_SZ;
|
|
}
|
|
|
|
static void free_equiv_cpu_table(void)
|
|
{
|
|
+ if (x86_family(bsp_cpuid_1_eax) >= 0x17)
|
|
+ return;
|
|
+
|
|
vfree(equiv_table.entry);
|
|
memset(&equiv_table, 0, sizeof(equiv_table));
|
|
}
|
|
@@ -784,7 +979,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
|
|
u16 proc_id;
|
|
int ret;
|
|
|
|
- ret = verify_patch(family, fw, leftover, patch_size, false);
|
|
+ ret = verify_patch(fw, leftover, patch_size);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -809,7 +1004,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
|
|
patch->patch_id = mc_hdr->patch_id;
|
|
patch->equiv_cpu = proc_id;
|
|
|
|
- pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
|
|
+ pr_debug("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n",
|
|
__func__, patch->patch_id, proc_id);
|
|
|
|
/* ... and add to cache. */
|
|
@@ -819,8 +1014,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
|
|
}
|
|
|
|
/* Scan the blob in @data and add microcode patches to the cache. */
|
|
-static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
|
- size_t size)
|
|
+static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
|
|
{
|
|
u8 *fw = (u8 *)data;
|
|
size_t offset;
|
|
@@ -853,21 +1047,30 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
|
return UCODE_OK;
|
|
}
|
|
|
|
-static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
|
|
+static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
|
|
{
|
|
- struct cpuinfo_x86 *c;
|
|
- unsigned int nid, cpu;
|
|
- struct ucode_patch *p;
|
|
enum ucode_state ret;
|
|
|
|
/* free old equiv table */
|
|
free_equiv_cpu_table();
|
|
|
|
ret = __load_microcode_amd(family, data, size);
|
|
- if (ret != UCODE_OK) {
|
|
+ if (ret != UCODE_OK)
|
|
cleanup();
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
|
|
+{
|
|
+ struct cpuinfo_x86 *c;
|
|
+ unsigned int nid, cpu;
|
|
+ struct ucode_patch *p;
|
|
+ enum ucode_state ret;
|
|
+
|
|
+ ret = _load_microcode_amd(family, data, size);
|
|
+ if (ret != UCODE_OK)
|
|
return ret;
|
|
- }
|
|
|
|
for_each_node(nid) {
|
|
cpu = cpumask_first(cpumask_of_node(nid));
|
|
@@ -886,6 +1089,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
|
return ret;
|
|
}
|
|
|
|
+static int __init save_microcode_in_initrd(void)
|
|
+{
|
|
+ unsigned int cpuid_1_eax = native_cpuid_eax(1);
|
|
+ struct cpuinfo_x86 *c = &boot_cpu_data;
|
|
+ struct cont_desc desc = { 0 };
|
|
+ enum ucode_state ret;
|
|
+ struct cpio_data cp;
|
|
+
|
|
+ if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
|
|
+ return 0;
|
|
+
|
|
+ if (!find_blobs_in_containers(&cp))
|
|
+ return -EINVAL;
|
|
+
|
|
+ scan_containers(cp.data, cp.size, &desc);
|
|
+ if (!desc.mc)
|
|
+ return -EINVAL;
|
|
+
|
|
+ ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
|
|
+ if (ret > UCODE_UPDATED)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+early_initcall(save_microcode_in_initrd);
|
|
+
|
|
/*
|
|
* AMD microcode firmware naming convention, up to family 15h they are in
|
|
* the legacy file:
|
|
@@ -909,6 +1138,9 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
|
|
enum ucode_state ret = UCODE_NFOUND;
|
|
const struct firmware *fw;
|
|
|
|
+ if (force_minrev)
|
|
+ return UCODE_NFOUND;
|
|
+
|
|
if (c->x86 >= 0x15)
|
|
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
|
|
|
|
@@ -918,7 +1150,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
|
|
}
|
|
|
|
ret = UCODE_ERROR;
|
|
- if (!verify_container(fw->data, fw->size, false))
|
|
+ if (!verify_container(fw->data, fw->size))
|
|
goto fw_release;
|
|
|
|
ret = load_microcode_amd(c->x86, fw->data, fw->size);
|
|
@@ -938,10 +1170,11 @@ static void microcode_fini_cpu_amd(int cpu)
|
|
}
|
|
|
|
static struct microcode_ops microcode_amd_ops = {
|
|
- .request_microcode_fw = request_microcode_amd,
|
|
- .collect_cpu_info = collect_cpu_info_amd,
|
|
- .apply_microcode = apply_microcode_amd,
|
|
- .microcode_fini_cpu = microcode_fini_cpu_amd,
|
|
+ .request_microcode_fw = request_microcode_amd,
|
|
+ .collect_cpu_info = collect_cpu_info_amd,
|
|
+ .apply_microcode = apply_microcode_amd,
|
|
+ .microcode_fini_cpu = microcode_fini_cpu_amd,
|
|
+ .nmi_safe = true,
|
|
};
|
|
|
|
struct microcode_ops * __init init_amd_microcode(void)
|
|
@@ -952,11 +1185,6 @@ struct microcode_ops * __init init_amd_microcode(void)
|
|
pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
|
|
return NULL;
|
|
}
|
|
-
|
|
- if (ucode_new_rev)
|
|
- pr_info_once("microcode updated early to new patch_level=0x%08x\n",
|
|
- ucode_new_rev);
|
|
-
|
|
return µcode_amd_ops;
|
|
}
|
|
|
|
diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c
|
|
new file mode 100644
|
|
index 00000000000000..2a1655b1fdd883
|
|
--- /dev/null
|
|
+++ b/arch/x86/kernel/cpu/microcode/amd_shas.c
|
|
@@ -0,0 +1,444 @@
|
|
+/* Keep 'em sorted. */
|
|
+static const struct patch_digest phashes[] = {
|
|
+ { 0x8001227, {
|
|
+ 0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b,
|
|
+ 0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46,
|
|
+ 0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8,
|
|
+ 0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18,
|
|
+ }
|
|
+ },
|
|
+ { 0x8001250, {
|
|
+ 0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60,
|
|
+ 0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa,
|
|
+ 0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3,
|
|
+ 0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19,
|
|
+ }
|
|
+ },
|
|
+ { 0x800126e, {
|
|
+ 0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c,
|
|
+ 0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3,
|
|
+ 0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6,
|
|
+ 0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43,
|
|
+ }
|
|
+ },
|
|
+ { 0x800126f, {
|
|
+ 0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec,
|
|
+ 0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b,
|
|
+ 0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18,
|
|
+ 0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33,
|
|
+ }
|
|
+ },
|
|
+ { 0x800820d, {
|
|
+ 0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59,
|
|
+ 0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67,
|
|
+ 0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c,
|
|
+ 0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e,
|
|
+ }
|
|
+ },
|
|
+ { 0x8301025, {
|
|
+ 0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36,
|
|
+ 0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1,
|
|
+ 0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30,
|
|
+ 0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77,
|
|
+ }
|
|
+ },
|
|
+ { 0x8301055, {
|
|
+ 0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a,
|
|
+ 0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea,
|
|
+ 0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b,
|
|
+ 0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b,
|
|
+ }
|
|
+ },
|
|
+ { 0x8301072, {
|
|
+ 0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e,
|
|
+ 0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28,
|
|
+ 0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1,
|
|
+ 0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30,
|
|
+ }
|
|
+ },
|
|
+ { 0x830107a, {
|
|
+ 0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72,
|
|
+ 0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34,
|
|
+ 0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20,
|
|
+ 0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a,
|
|
+ }
|
|
+ },
|
|
+ { 0x830107b, {
|
|
+ 0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad,
|
|
+ 0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a,
|
|
+ 0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04,
|
|
+ 0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19,
|
|
+ }
|
|
+ },
|
|
+ { 0x830107c, {
|
|
+ 0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47,
|
|
+ 0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac,
|
|
+ 0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3,
|
|
+ 0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38,
|
|
+ }
|
|
+ },
|
|
+ { 0x860010d, {
|
|
+ 0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0,
|
|
+ 0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7,
|
|
+ 0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c,
|
|
+ 0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8,
|
|
+ }
|
|
+ },
|
|
+ { 0x8608108, {
|
|
+ 0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2,
|
|
+ 0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38,
|
|
+ 0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc,
|
|
+ 0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd,
|
|
+ }
|
|
+ },
|
|
+ { 0x8701034, {
|
|
+ 0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83,
|
|
+ 0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d,
|
|
+ 0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8,
|
|
+ 0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b,
|
|
+ }
|
|
+ },
|
|
+ { 0x8a00008, {
|
|
+ 0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e,
|
|
+ 0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e,
|
|
+ 0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72,
|
|
+ 0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c,
|
|
+ }
|
|
+ },
|
|
+ { 0x8a0000a, {
|
|
+ 0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c,
|
|
+ 0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44,
|
|
+ 0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb,
|
|
+ 0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20,
|
|
+ }
|
|
+ },
|
|
+ { 0xa00104c, {
|
|
+ 0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe,
|
|
+ 0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76,
|
|
+ 0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b,
|
|
+ 0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b,
|
|
+ }
|
|
+ },
|
|
+ { 0xa00104e, {
|
|
+ 0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2,
|
|
+ 0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0,
|
|
+ 0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e,
|
|
+ 0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001053, {
|
|
+ 0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d,
|
|
+ 0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25,
|
|
+ 0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb,
|
|
+ 0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001058, {
|
|
+ 0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36,
|
|
+ 0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19,
|
|
+ 0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec,
|
|
+ 0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001075, {
|
|
+ 0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9,
|
|
+ 0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a,
|
|
+ 0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f,
|
|
+ 0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001078, {
|
|
+ 0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25,
|
|
+ 0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce,
|
|
+ 0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04,
|
|
+ 0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001079, {
|
|
+ 0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb,
|
|
+ 0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93,
|
|
+ 0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb,
|
|
+ 0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a,
|
|
+ }
|
|
+ },
|
|
+ { 0xa00107a, {
|
|
+ 0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f,
|
|
+ 0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd,
|
|
+ 0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f,
|
|
+ 0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001143, {
|
|
+ 0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80,
|
|
+ 0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42,
|
|
+ 0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d,
|
|
+ 0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001144, {
|
|
+ 0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41,
|
|
+ 0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b,
|
|
+ 0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25,
|
|
+ 0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc,
|
|
+ }
|
|
+ },
|
|
+ { 0xa00115d, {
|
|
+ 0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd,
|
|
+ 0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75,
|
|
+ 0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e,
|
|
+ 0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001173, {
|
|
+ 0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a,
|
|
+ 0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35,
|
|
+ 0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3,
|
|
+ 0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e,
|
|
+ }
|
|
+ },
|
|
+ { 0xa0011a8, {
|
|
+ 0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b,
|
|
+ 0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6,
|
|
+ 0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4,
|
|
+ 0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e,
|
|
+ }
|
|
+ },
|
|
+ { 0xa0011ce, {
|
|
+ 0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71,
|
|
+ 0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86,
|
|
+ 0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e,
|
|
+ 0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a,
|
|
+ }
|
|
+ },
|
|
+ { 0xa0011d1, {
|
|
+ 0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e,
|
|
+ 0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09,
|
|
+ 0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5,
|
|
+ 0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8,
|
|
+ }
|
|
+ },
|
|
+ { 0xa0011d3, {
|
|
+ 0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b,
|
|
+ 0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6,
|
|
+ 0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00,
|
|
+ 0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26,
|
|
+ }
|
|
+ },
|
|
+ { 0xa0011d5, {
|
|
+ 0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13,
|
|
+ 0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7,
|
|
+ 0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62,
|
|
+ 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001223, {
|
|
+ 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
|
|
+ 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
|
|
+ 0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15,
|
|
+ 0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001224, {
|
|
+ 0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25,
|
|
+ 0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad,
|
|
+ 0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9,
|
|
+ 0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001227, {
|
|
+ 0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad,
|
|
+ 0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d,
|
|
+ 0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9,
|
|
+ 0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001229, {
|
|
+ 0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6,
|
|
+ 0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75,
|
|
+ 0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4,
|
|
+ 0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d,
|
|
+ }
|
|
+ },
|
|
+ { 0xa00122e, {
|
|
+ 0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf,
|
|
+ 0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15,
|
|
+ 0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa,
|
|
+ 0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001231, {
|
|
+ 0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e,
|
|
+ 0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64,
|
|
+ 0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b,
|
|
+ 0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001234, {
|
|
+ 0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7,
|
|
+ 0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc,
|
|
+ 0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b,
|
|
+ 0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001236, {
|
|
+ 0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78,
|
|
+ 0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d,
|
|
+ 0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b,
|
|
+ 0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25,
|
|
+ }
|
|
+ },
|
|
+ { 0xa001238, {
|
|
+ 0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc,
|
|
+ 0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a,
|
|
+ 0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e,
|
|
+ 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
|
|
+ }
|
|
+ },
|
|
+ { 0xa00820c, {
|
|
+ 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
|
|
+ 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
|
|
+ 0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1,
|
|
+ 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
|
|
+ }
|
|
+ },
|
|
+ { 0xa10113e, {
|
|
+ 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
|
|
+ 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
|
|
+ 0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5,
|
|
+ 0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53,
|
|
+ }
|
|
+ },
|
|
+ { 0xa101144, {
|
|
+ 0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26,
|
|
+ 0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09,
|
|
+ 0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc,
|
|
+ 0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47,
|
|
+ }
|
|
+ },
|
|
+ { 0xa101148, {
|
|
+ 0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90,
|
|
+ 0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f,
|
|
+ 0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08,
|
|
+ 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
|
|
+ }
|
|
+ },
|
|
+ { 0xa10123e, {
|
|
+ 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
|
|
+ 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
|
|
+ 0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc,
|
|
+ 0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b,
|
|
+ }
|
|
+ },
|
|
+ { 0xa101244, {
|
|
+ 0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c,
|
|
+ 0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1,
|
|
+ 0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72,
|
|
+ 0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0,
|
|
+ }
|
|
+ },
|
|
+ { 0xa101248, {
|
|
+ 0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e,
|
|
+ 0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22,
|
|
+ 0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e,
|
|
+ 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
|
|
+ }
|
|
+ },
|
|
+ { 0xa108108, {
|
|
+ 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
|
|
+ 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
|
|
+ 0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c,
|
|
+ 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
|
|
+ }
|
|
+ },
|
|
+ { 0xa20102d, {
|
|
+ 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
|
|
+ 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
|
|
+ 0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23,
|
|
+ 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
|
|
+ }
|
|
+ },
|
|
+ { 0xa201210, {
|
|
+ 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
|
|
+ 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
|
|
+ 0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68,
|
|
+ 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
|
|
+ }
|
|
+ },
|
|
+ { 0xa404107, {
|
|
+ 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
|
|
+ 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
|
|
+ 0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81,
|
|
+ 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
|
|
+ }
|
|
+ },
|
|
+ { 0xa500011, {
|
|
+ 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
|
|
+ 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
|
|
+ 0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2,
|
|
+ 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
|
|
+ }
|
|
+ },
|
|
+ { 0xa601209, {
|
|
+ 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
|
|
+ 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
|
|
+ 0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46,
|
|
+ 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
|
|
+ }
|
|
+ },
|
|
+ { 0xa704107, {
|
|
+ 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
|
|
+ 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
|
|
+ 0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2,
|
|
+ 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
|
|
+ }
|
|
+ },
|
|
+ { 0xa705206, {
|
|
+ 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
|
|
+ 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
|
|
+ 0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b,
|
|
+ 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
|
|
+ }
|
|
+ },
|
|
+ { 0xa708007, {
|
|
+ 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
|
|
+ 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
|
|
+ 0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80,
|
|
+ 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
|
|
+ }
|
|
+ },
|
|
+ { 0xa70c005, {
|
|
+ 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
|
|
+ 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
|
|
+ 0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55,
|
|
+ 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
|
|
+ }
|
|
+ },
|
|
+ { 0xaa00116, {
|
|
+ 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
|
|
+ 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
|
|
+ 0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d,
|
|
+ 0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9,
|
|
+ }
|
|
+ },
|
|
+ { 0xaa00212, {
|
|
+ 0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75,
|
|
+ 0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71,
|
|
+ 0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2,
|
|
+ 0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1,
|
|
+ }
|
|
+ },
|
|
+ { 0xaa00213, {
|
|
+ 0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a,
|
|
+ 0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f,
|
|
+ 0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed,
|
|
+ 0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b,
|
|
+ }
|
|
+ },
|
|
+ { 0xaa00215, {
|
|
+ 0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9,
|
|
+ 0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4,
|
|
+ 0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b,
|
|
+ 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
|
|
+ }
|
|
+ },
|
|
+};
|
|
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
|
|
index a4ebd5e0ae8287..c15c7b862bec1c 100644
|
|
--- a/arch/x86/kernel/cpu/microcode/core.c
|
|
+++ b/arch/x86/kernel/cpu/microcode/core.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <linux/miscdevice.h>
|
|
#include <linux/capability.h>
|
|
#include <linux/firmware.h>
|
|
+#include <linux/cpumask.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/mutex.h>
|
|
@@ -31,6 +32,7 @@
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
|
|
+#include <asm/apic.h>
|
|
#include <asm/cpu_device_id.h>
|
|
#include <asm/perf_event.h>
|
|
#include <asm/processor.h>
|
|
@@ -42,11 +44,10 @@
|
|
#define DRIVER_VERSION "2.2"
|
|
|
|
static struct microcode_ops *microcode_ops;
|
|
-static bool dis_ucode_ldr = true;
|
|
+bool dis_ucode_ldr = true;
|
|
|
|
-bool initrd_gone;
|
|
-
|
|
-LIST_HEAD(microcode_cache);
|
|
+bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
|
|
+module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
|
|
|
|
/*
|
|
* Synchronization.
|
|
@@ -76,6 +77,8 @@ static u32 final_levels[] = {
|
|
0, /* T-101 terminator */
|
|
};
|
|
|
|
+struct early_load_data early_data;
|
|
+
|
|
/*
|
|
* Check the current patch level on this CPU.
|
|
*
|
|
@@ -90,10 +93,7 @@ static bool amd_check_current_patch_level(void)
|
|
|
|
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
|
|
|
|
- if (IS_ENABLED(CONFIG_X86_32))
|
|
- levels = (u32 *)__pa_nodebug(&final_levels);
|
|
- else
|
|
- levels = final_levels;
|
|
+ levels = final_levels;
|
|
|
|
for (i = 0; levels[i]; i++) {
|
|
if (lvl == levels[i])
|
|
@@ -105,17 +105,8 @@ static bool amd_check_current_patch_level(void)
|
|
static bool __init check_loader_disabled_bsp(void)
|
|
{
|
|
static const char *__dis_opt_str = "dis_ucode_ldr";
|
|
-
|
|
-#ifdef CONFIG_X86_32
|
|
- const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
|
|
- const char *option = (const char *)__pa_nodebug(__dis_opt_str);
|
|
- bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
|
|
-
|
|
-#else /* CONFIG_X86_64 */
|
|
const char *cmdline = boot_command_line;
|
|
const char *option = __dis_opt_str;
|
|
- bool *res = &dis_ucode_ldr;
|
|
-#endif
|
|
|
|
/*
|
|
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
|
|
@@ -123,17 +114,17 @@ static bool __init check_loader_disabled_bsp(void)
|
|
* that's good enough as they don't land on the BSP path anyway.
|
|
*/
|
|
if (native_cpuid_ecx(1) & BIT(31))
|
|
- return *res;
|
|
+ return true;
|
|
|
|
if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
|
|
if (amd_check_current_patch_level())
|
|
- return *res;
|
|
+ return true;
|
|
}
|
|
|
|
if (cmdline_find_option_bool(cmdline, option) <= 0)
|
|
- *res = false;
|
|
+ dis_ucode_ldr = false;
|
|
|
|
- return *res;
|
|
+ return dis_ucode_ldr;
|
|
}
|
|
|
|
void __init load_ucode_bsp(void)
|
|
@@ -166,25 +157,16 @@ void __init load_ucode_bsp(void)
|
|
return;
|
|
|
|
if (intel)
|
|
- load_ucode_intel_bsp();
|
|
+ load_ucode_intel_bsp(&early_data);
|
|
else
|
|
- load_ucode_amd_early(cpuid_1_eax);
|
|
-}
|
|
-
|
|
-static bool check_loader_disabled_ap(void)
|
|
-{
|
|
-#ifdef CONFIG_X86_32
|
|
- return *((bool *)__pa_nodebug(&dis_ucode_ldr));
|
|
-#else
|
|
- return dis_ucode_ldr;
|
|
-#endif
|
|
+ load_ucode_amd_bsp(&early_data, cpuid_1_eax);
|
|
}
|
|
|
|
void load_ucode_ap(void)
|
|
{
|
|
unsigned int cpuid_1_eax;
|
|
|
|
- if (check_loader_disabled_ap())
|
|
+ if (dis_ucode_ldr)
|
|
return;
|
|
|
|
cpuid_1_eax = native_cpuid_eax(1);
|
|
@@ -196,103 +178,44 @@ void load_ucode_ap(void)
|
|
break;
|
|
case X86_VENDOR_AMD:
|
|
if (x86_family(cpuid_1_eax) >= 0x10)
|
|
- load_ucode_amd_early(cpuid_1_eax);
|
|
+ load_ucode_amd_ap(cpuid_1_eax);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
-static int __init save_microcode_in_initrd(void)
|
|
-{
|
|
- struct cpuinfo_x86 *c = &boot_cpu_data;
|
|
- int ret = -EINVAL;
|
|
-
|
|
- if (dis_ucode_ldr) {
|
|
- ret = 0;
|
|
- goto out;
|
|
- }
|
|
-
|
|
- switch (c->x86_vendor) {
|
|
- case X86_VENDOR_INTEL:
|
|
- if (c->x86 >= 6)
|
|
- ret = save_microcode_in_initrd_intel();
|
|
- break;
|
|
- case X86_VENDOR_AMD:
|
|
- if (c->x86 >= 0x10)
|
|
- ret = save_microcode_in_initrd_amd(cpuid_eax(1));
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
-
|
|
-out:
|
|
- initrd_gone = true;
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
|
|
+struct cpio_data __init find_microcode_in_initrd(const char *path)
|
|
{
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
unsigned long start = 0;
|
|
size_t size;
|
|
|
|
#ifdef CONFIG_X86_32
|
|
- struct boot_params *params;
|
|
-
|
|
- if (use_pa)
|
|
- params = (struct boot_params *)__pa_nodebug(&boot_params);
|
|
- else
|
|
- params = &boot_params;
|
|
-
|
|
- size = params->hdr.ramdisk_size;
|
|
-
|
|
- /*
|
|
- * Set start only if we have an initrd image. We cannot use initrd_start
|
|
- * because it is not set that early yet.
|
|
- */
|
|
+ size = boot_params.hdr.ramdisk_size;
|
|
+ /* Early load on BSP has a temporary mapping. */
|
|
if (size)
|
|
- start = params->hdr.ramdisk_image;
|
|
+ start = initrd_start_early;
|
|
|
|
-# else /* CONFIG_X86_64 */
|
|
+#else /* CONFIG_X86_64 */
|
|
size = (unsigned long)boot_params.ext_ramdisk_size << 32;
|
|
size |= boot_params.hdr.ramdisk_size;
|
|
|
|
if (size) {
|
|
start = (unsigned long)boot_params.ext_ramdisk_image << 32;
|
|
start |= boot_params.hdr.ramdisk_image;
|
|
-
|
|
start += PAGE_OFFSET;
|
|
}
|
|
-# endif
|
|
+#endif
|
|
|
|
/*
|
|
* Fixup the start address: after reserve_initrd() runs, initrd_start
|
|
* has the virtual address of the beginning of the initrd. It also
|
|
* possibly relocates the ramdisk. In either case, initrd_start contains
|
|
* the updated address so use that instead.
|
|
- *
|
|
- * initrd_gone is for the hotplug case where we've thrown out initrd
|
|
- * already.
|
|
*/
|
|
- if (!use_pa) {
|
|
- if (initrd_gone)
|
|
- return (struct cpio_data){ NULL, 0, "" };
|
|
- if (initrd_start)
|
|
- start = initrd_start;
|
|
- } else {
|
|
- /*
|
|
- * The picture with physical addresses is a bit different: we
|
|
- * need to get the *physical* address to which the ramdisk was
|
|
- * relocated, i.e., relocated_ramdisk (not initrd_start) and
|
|
- * since we're running from physical addresses, we need to access
|
|
- * relocated_ramdisk through its *physical* address too.
|
|
- */
|
|
- u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk);
|
|
- if (*rr)
|
|
- start = *rr;
|
|
- }
|
|
+ if (initrd_start)
|
|
+ start = initrd_start;
|
|
|
|
return find_cpio_data(path, (void *)start, size, NULL);
|
|
#else /* !CONFIG_BLK_DEV_INITRD */
|
|
@@ -336,117 +259,298 @@ static struct platform_device *microcode_pdev;
|
|
* requirement can be relaxed in the future. Right now, this is conservative
|
|
* and good.
|
|
*/
|
|
-#define SPINUNIT 100 /* 100 nsec */
|
|
+enum sibling_ctrl {
|
|
+ /* Spinwait with timeout */
|
|
+ SCTRL_WAIT,
|
|
+ /* Invoke the microcode_apply() callback */
|
|
+ SCTRL_APPLY,
|
|
+ /* Proceed without invoking the microcode_apply() callback */
|
|
+ SCTRL_DONE,
|
|
+};
|
|
|
|
-static int check_online_cpus(void)
|
|
+struct microcode_ctrl {
|
|
+ enum sibling_ctrl ctrl;
|
|
+ enum ucode_state result;
|
|
+ unsigned int ctrl_cpu;
|
|
+ bool nmi_enabled;
|
|
+};
|
|
+
|
|
+DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
|
|
+static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl);
|
|
+static atomic_t late_cpus_in, offline_in_nmi;
|
|
+static unsigned int loops_per_usec;
|
|
+static cpumask_t cpu_offline_mask;
|
|
+
|
|
+static noinstr bool wait_for_cpus(atomic_t *cnt)
|
|
{
|
|
- unsigned int cpu;
|
|
+ unsigned int timeout, loops;
|
|
|
|
- /*
|
|
- * Make sure all CPUs are online. It's fine for SMT to be disabled if
|
|
- * all the primary threads are still online.
|
|
- */
|
|
- for_each_present_cpu(cpu) {
|
|
- if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
|
|
- pr_err("Not all CPUs online, aborting microcode update.\n");
|
|
- return -EINVAL;
|
|
+ WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0);
|
|
+
|
|
+ for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
|
|
+ if (!raw_atomic_read(cnt))
|
|
+ return true;
|
|
+
|
|
+ for (loops = 0; loops < loops_per_usec; loops++)
|
|
+ cpu_relax();
|
|
+
|
|
+ /* If invoked directly, tickle the NMI watchdog */
|
|
+ if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
|
|
+ instrumentation_begin();
|
|
+ touch_nmi_watchdog();
|
|
+ instrumentation_end();
|
|
}
|
|
}
|
|
-
|
|
- return 0;
|
|
+ /* Prevent the late comers from making progress and let them time out */
|
|
+ raw_atomic_inc(cnt);
|
|
+ return false;
|
|
}
|
|
|
|
-static atomic_t late_cpus_in;
|
|
-static atomic_t late_cpus_out;
|
|
-
|
|
-static int __wait_for_cpus(atomic_t *t, long long timeout)
|
|
+static noinstr bool wait_for_ctrl(void)
|
|
{
|
|
- int all_cpus = num_online_cpus();
|
|
-
|
|
- atomic_inc(t);
|
|
+ unsigned int timeout, loops;
|
|
|
|
- while (atomic_read(t) < all_cpus) {
|
|
- if (timeout < SPINUNIT) {
|
|
- pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
|
|
- all_cpus - atomic_read(t));
|
|
- return 1;
|
|
- }
|
|
+ for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
|
|
+ if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT)
|
|
+ return true;
|
|
|
|
- ndelay(SPINUNIT);
|
|
- timeout -= SPINUNIT;
|
|
+ for (loops = 0; loops < loops_per_usec; loops++)
|
|
+ cpu_relax();
|
|
|
|
- touch_nmi_watchdog();
|
|
+ /* If invoked directly, tickle the NMI watchdog */
|
|
+ if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
|
|
+ instrumentation_begin();
|
|
+ touch_nmi_watchdog();
|
|
+ instrumentation_end();
|
|
+ }
|
|
}
|
|
- return 0;
|
|
+ return false;
|
|
}
|
|
|
|
/*
|
|
- * Returns:
|
|
- * < 0 - on error
|
|
- * 0 - success (no update done or microcode was updated)
|
|
+ * Protected against instrumentation up to the point where the primary
|
|
+ * thread completed the update. See microcode_nmi_handler() for details.
|
|
*/
|
|
-static int __reload_late(void *info)
|
|
+static noinstr bool load_secondary_wait(unsigned int ctrl_cpu)
|
|
{
|
|
- int cpu = smp_processor_id();
|
|
- enum ucode_state err;
|
|
- int ret = 0;
|
|
+ /* Initial rendezvous to ensure that all CPUs have arrived */
|
|
+ if (!wait_for_cpus(&late_cpus_in)) {
|
|
+ raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
|
|
+ return false;
|
|
+ }
|
|
|
|
/*
|
|
- * Wait for all CPUs to arrive. A load will not be attempted unless all
|
|
- * CPUs show up.
|
|
- * */
|
|
- if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
|
|
- return -1;
|
|
+ * Wait for primary threads to complete. If one of them hangs due
|
|
+ * to the update, there is no way out. This is non-recoverable
|
|
+ * because the CPU might hold locks or resources and confuse the
|
|
+ * scheduler, watchdogs etc. There is no way to safely evacuate the
|
|
+ * machine.
|
|
+ */
|
|
+ if (wait_for_ctrl())
|
|
+ return true;
|
|
+
|
|
+ instrumentation_begin();
|
|
+ panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu);
|
|
+ instrumentation_end();
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Protected against instrumentation up to the point where the primary
|
|
+ * thread completed the update. See microcode_nmi_handler() for details.
|
|
+ */
|
|
+static noinstr void load_secondary(unsigned int cpu)
|
|
+{
|
|
+ unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu);
|
|
+ enum ucode_state ret;
|
|
+
|
|
+ if (!load_secondary_wait(ctrl_cpu)) {
|
|
+ instrumentation_begin();
|
|
+ pr_err_once("load: %d CPUs timed out\n",
|
|
+ atomic_read(&late_cpus_in) - 1);
|
|
+ instrumentation_end();
|
|
+ return;
|
|
+ }
|
|
|
|
+ /* Primary thread completed. Allow to invoke instrumentable code */
|
|
+ instrumentation_begin();
|
|
/*
|
|
- * On an SMT system, it suffices to load the microcode on one sibling of
|
|
- * the core because the microcode engine is shared between the threads.
|
|
- * Synchronization still needs to take place so that no concurrent
|
|
- * loading attempts happen on multiple threads of an SMT core. See
|
|
- * below.
|
|
+ * If the primary succeeded then invoke the apply() callback,
|
|
+ * otherwise copy the state from the primary thread.
|
|
*/
|
|
- if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu)
|
|
- err = microcode_ops->apply_microcode(cpu);
|
|
+ if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY)
|
|
+ ret = microcode_ops->apply_microcode(cpu);
|
|
else
|
|
- goto wait_for_siblings;
|
|
+ ret = per_cpu(ucode_ctrl.result, ctrl_cpu);
|
|
|
|
- if (err >= UCODE_NFOUND) {
|
|
- if (err == UCODE_ERROR) {
|
|
- pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
|
- ret = -1;
|
|
- }
|
|
+ this_cpu_write(ucode_ctrl.result, ret);
|
|
+ this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
|
|
+ instrumentation_end();
|
|
+}
|
|
+
|
|
+static void __load_primary(unsigned int cpu)
|
|
+{
|
|
+ struct cpumask *secondaries = topology_sibling_cpumask(cpu);
|
|
+ enum sibling_ctrl ctrl;
|
|
+ enum ucode_state ret;
|
|
+ unsigned int sibling;
|
|
+
|
|
+ /* Initial rendezvous to ensure that all CPUs have arrived */
|
|
+ if (!wait_for_cpus(&late_cpus_in)) {
|
|
+ this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
|
|
+ pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1);
|
|
+ return;
|
|
}
|
|
|
|
-wait_for_siblings:
|
|
- if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC))
|
|
- panic("Timeout during microcode update!\n");
|
|
+ ret = microcode_ops->apply_microcode(cpu);
|
|
+ this_cpu_write(ucode_ctrl.result, ret);
|
|
+ this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
|
|
|
|
/*
|
|
- * At least one thread has completed update on each core.
|
|
- * For others, simply call the update to make sure the
|
|
- * per-cpu cpuinfo can be updated with right microcode
|
|
- * revision.
|
|
+ * If the update was successful, let the siblings run the apply()
|
|
+ * callback. If not, tell them it's done. This also covers the
|
|
+ * case where the CPU has uniform loading at package or system
|
|
+ * scope implemented but does not advertise it.
|
|
*/
|
|
- if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
|
|
- err = microcode_ops->apply_microcode(cpu);
|
|
+ if (ret == UCODE_UPDATED || ret == UCODE_OK)
|
|
+ ctrl = SCTRL_APPLY;
|
|
+ else
|
|
+ ctrl = SCTRL_DONE;
|
|
+
|
|
+ for_each_cpu(sibling, secondaries) {
|
|
+ if (sibling != cpu)
|
|
+ per_cpu(ucode_ctrl.ctrl, sibling) = ctrl;
|
|
+ }
|
|
+}
|
|
+
|
|
+static bool kick_offline_cpus(unsigned int nr_offl)
|
|
+{
|
|
+ unsigned int cpu, timeout;
|
|
+
|
|
+ for_each_cpu(cpu, &cpu_offline_mask) {
|
|
+ /* Enable the rendezvous handler and send NMI */
|
|
+ per_cpu(ucode_ctrl.nmi_enabled, cpu) = true;
|
|
+ apic_send_nmi_to_offline_cpu(cpu);
|
|
+ }
|
|
+
|
|
+ /* Wait for them to arrive */
|
|
+ for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) {
|
|
+ if (atomic_read(&offline_in_nmi) == nr_offl)
|
|
+ return true;
|
|
+ udelay(1);
|
|
+ }
|
|
+ /* Let the others time out */
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static void release_offline_cpus(void)
|
|
+{
|
|
+ unsigned int cpu;
|
|
+
|
|
+ for_each_cpu(cpu, &cpu_offline_mask)
|
|
+ per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE;
|
|
+}
|
|
+
|
|
+static void load_primary(unsigned int cpu)
|
|
+{
|
|
+ unsigned int nr_offl = cpumask_weight(&cpu_offline_mask);
|
|
+ bool proceed = true;
|
|
+
|
|
+ /* Kick soft-offlined SMT siblings if required */
|
|
+ if (!cpu && nr_offl)
|
|
+ proceed = kick_offline_cpus(nr_offl);
|
|
|
|
- return ret;
|
|
+ /* If the soft-offlined CPUs did not respond, abort */
|
|
+ if (proceed)
|
|
+ __load_primary(cpu);
|
|
+
|
|
+ /* Unconditionally release soft-offlined SMT siblings if required */
|
|
+ if (!cpu && nr_offl)
|
|
+ release_offline_cpus();
|
|
}
|
|
|
|
/*
|
|
- * Reload microcode late on all CPUs. Wait for a sec until they
|
|
- * all gather together.
|
|
+ * Minimal stub rendezvous handler for soft-offlined CPUs which participate
|
|
+ * in the NMI rendezvous to protect against a concurrent NMI on affected
|
|
+ * CPUs.
|
|
*/
|
|
-static int microcode_reload_late(void)
|
|
+void noinstr microcode_offline_nmi_handler(void)
|
|
{
|
|
- int old = boot_cpu_data.microcode, ret;
|
|
+ if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
|
|
+ return;
|
|
+ raw_cpu_write(ucode_ctrl.nmi_enabled, false);
|
|
+ raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE);
|
|
+ raw_atomic_inc(&offline_in_nmi);
|
|
+ wait_for_ctrl();
|
|
+}
|
|
+
|
|
+static noinstr bool microcode_update_handler(void)
|
|
+{
|
|
+ unsigned int cpu = raw_smp_processor_id();
|
|
+
|
|
+ if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) {
|
|
+ instrumentation_begin();
|
|
+ load_primary(cpu);
|
|
+ instrumentation_end();
|
|
+ } else {
|
|
+ load_secondary(cpu);
|
|
+ }
|
|
+
|
|
+ instrumentation_begin();
|
|
+ touch_nmi_watchdog();
|
|
+ instrumentation_end();
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Protection against instrumentation is required for CPUs which are not
|
|
+ * safe against an NMI which is delivered to the secondary SMT sibling
|
|
+ * while the primary thread updates the microcode. Instrumentation can end
|
|
+ * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI
|
|
+ * which is the opposite of what the NMI rendezvous is trying to achieve.
|
|
+ *
|
|
+ * The primary thread is safe versus instrumentation as the actual
|
|
+ * microcode update handles this correctly. It's only the sibling code
|
|
+ * path which must be NMI safe until the primary thread completed the
|
|
+ * update.
|
|
+ */
|
|
+bool noinstr microcode_nmi_handler(void)
|
|
+{
|
|
+ if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
|
|
+ return false;
|
|
+
|
|
+ raw_cpu_write(ucode_ctrl.nmi_enabled, false);
|
|
+ return microcode_update_handler();
|
|
+}
|
|
+
|
|
+static int load_cpus_stopped(void *unused)
|
|
+{
|
|
+ if (microcode_ops->use_nmi) {
|
|
+ /* Enable the NMI handler and raise NMI */
|
|
+ this_cpu_write(ucode_ctrl.nmi_enabled, true);
|
|
+ apic->send_IPI(smp_processor_id(), NMI_VECTOR);
|
|
+ } else {
|
|
+ /* Just invoke the handler directly */
|
|
+ microcode_update_handler();
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int load_late_stop_cpus(bool is_safe)
|
|
+{
|
|
+ unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0;
|
|
+ unsigned int nr_offl, offline = 0;
|
|
+ int old_rev = boot_cpu_data.microcode;
|
|
struct cpuinfo_x86 prev_info;
|
|
|
|
- pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
|
|
- pr_err("You should switch to early loading, if possible.\n");
|
|
+ if (!is_safe) {
|
|
+ pr_err("Late microcode loading without minimal revision check.\n");
|
|
+ pr_err("You should switch to early loading, if possible.\n");
|
|
+ }
|
|
|
|
- atomic_set(&late_cpus_in, 0);
|
|
- atomic_set(&late_cpus_out, 0);
|
|
+ atomic_set(&late_cpus_in, num_online_cpus());
|
|
+ atomic_set(&offline_in_nmi, 0);
|
|
+ loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000);
|
|
|
|
/*
|
|
* Take a snapshot before the microcode update in order to compare and
|
|
@@ -454,52 +558,162 @@ static int microcode_reload_late(void)
|
|
*/
|
|
store_cpu_caps(&prev_info);
|
|
|
|
- ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
|
|
- if (!ret) {
|
|
- pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n",
|
|
- old, boot_cpu_data.microcode);
|
|
- microcode_check(&prev_info);
|
|
- } else {
|
|
- pr_info("Reload failed, current microcode revision: 0x%x\n",
|
|
- boot_cpu_data.microcode);
|
|
+ if (microcode_ops->use_nmi)
|
|
+ static_branch_enable_cpuslocked(µcode_nmi_handler_enable);
|
|
+
|
|
+ stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask);
|
|
+
|
|
+ if (microcode_ops->use_nmi)
|
|
+ static_branch_disable_cpuslocked(µcode_nmi_handler_enable);
|
|
+
|
|
+ /* Analyze the results */
|
|
+ for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
|
|
+ switch (per_cpu(ucode_ctrl.result, cpu)) {
|
|
+ case UCODE_UPDATED: updated++; break;
|
|
+ case UCODE_TIMEOUT: timedout++; break;
|
|
+ case UCODE_OK: siblings++; break;
|
|
+ case UCODE_OFFLINE: offline++; break;
|
|
+ default: failed++; break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (microcode_ops->finalize_late_load)
|
|
+ microcode_ops->finalize_late_load(!updated);
|
|
+
|
|
+ if (!updated) {
|
|
+ /* Nothing changed. */
|
|
+ if (!failed && !timedout)
|
|
+ return 0;
|
|
+
|
|
+ nr_offl = cpumask_weight(&cpu_offline_mask);
|
|
+ if (offline < nr_offl) {
|
|
+ pr_warn("%u offline siblings did not respond.\n",
|
|
+ nr_offl - atomic_read(&offline_in_nmi));
|
|
+ return -EIO;
|
|
+ }
|
|
+ pr_err("update failed: %u CPUs failed %u CPUs timed out\n",
|
|
+ failed, timedout);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ if (!is_safe || failed || timedout)
|
|
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
|
+
|
|
+ pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings);
|
|
+ if (failed || timedout) {
|
|
+ pr_err("load incomplete. %u CPUs timed out or failed\n",
|
|
+ num_online_cpus() - (updated + siblings));
|
|
}
|
|
+ pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode);
|
|
+ microcode_check(&prev_info);
|
|
|
|
- return ret;
|
|
+ return updated + siblings == num_online_cpus() ? 0 : -EIO;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function does two things:
|
|
+ *
|
|
+ * 1) Ensure that all required CPUs which are present and have been booted
|
|
+ * once are online.
|
|
+ *
|
|
+ * To pass this check, all primary threads must be online.
|
|
+ *
|
|
+ * If the microcode load is not safe against NMI then all SMT threads
|
|
+ * must be online as well because they still react to NMIs when they are
|
|
+ * soft-offlined and parked in one of the play_dead() variants. So if a
|
|
+ * NMI hits while the primary thread updates the microcode the resulting
|
|
+ * behaviour is undefined. The default play_dead() implementation on
|
|
+ * modern CPUs uses MWAIT, which is also not guaranteed to be safe
|
|
+ * against a microcode update which affects MWAIT.
|
|
+ *
|
|
+ * As soft-offlined CPUs still react on NMIs, the SMT sibling
|
|
+ * restriction can be lifted when the vendor driver signals to use NMI
|
|
+ * for rendezvous and the APIC provides a mechanism to send an NMI to a
|
|
+ * soft-offlined CPU. The soft-offlined CPUs are then able to
|
|
+ * participate in the rendezvous in a trivial stub handler.
|
|
+ *
|
|
+ * 2) Initialize the per CPU control structure and create a cpumask
|
|
+ * which contains "offline"; secondary threads, so they can be handled
|
|
+ * correctly by a control CPU.
|
|
+ */
|
|
+static bool setup_cpus(void)
|
|
+{
|
|
+ struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, };
|
|
+ bool allow_smt_offline;
|
|
+ unsigned int cpu;
|
|
+
|
|
+ allow_smt_offline = microcode_ops->nmi_safe ||
|
|
+ (microcode_ops->use_nmi && apic->nmi_to_offline_cpu);
|
|
+
|
|
+ cpumask_clear(&cpu_offline_mask);
|
|
+
|
|
+ for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
|
|
+ /*
|
|
+ * Offline CPUs sit in one of the play_dead() functions
|
|
+ * with interrupts disabled, but they still react on NMIs
|
|
+ * and execute arbitrary code. Also MWAIT being updated
|
|
+ * while the offline CPU sits there is not necessarily safe
|
|
+ * on all CPU variants.
|
|
+ *
|
|
+ * Mark them in the offline_cpus mask which will be handled
|
|
+ * by CPU0 later in the update process.
|
|
+ *
|
|
+ * Ensure that the primary thread is online so that it is
|
|
+ * guaranteed that all cores are updated.
|
|
+ */
|
|
+ if (!cpu_online(cpu)) {
|
|
+ if (topology_is_primary_thread(cpu) || !allow_smt_offline) {
|
|
+ pr_err("CPU %u not online, loading aborted\n", cpu);
|
|
+ return false;
|
|
+ }
|
|
+ cpumask_set_cpu(cpu, &cpu_offline_mask);
|
|
+ per_cpu(ucode_ctrl, cpu) = ctrl;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Initialize the per CPU state. This is core scope for now,
|
|
+ * but prepared to take package or system scope into account.
|
|
+ */
|
|
+ ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu));
|
|
+ per_cpu(ucode_ctrl, cpu) = ctrl;
|
|
+ }
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static int load_late_locked(void)
|
|
+{
|
|
+ if (!setup_cpus())
|
|
+ return -EBUSY;
|
|
+
|
|
+ switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) {
|
|
+ case UCODE_NEW:
|
|
+ return load_late_stop_cpus(false);
|
|
+ case UCODE_NEW_SAFE:
|
|
+ return load_late_stop_cpus(true);
|
|
+ case UCODE_NFOUND:
|
|
+ return -ENOENT;
|
|
+ default:
|
|
+ return -EBADFD;
|
|
+ }
|
|
}
|
|
|
|
static ssize_t reload_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t size)
|
|
{
|
|
- enum ucode_state tmp_ret = UCODE_OK;
|
|
- int bsp = boot_cpu_data.cpu_index;
|
|
unsigned long val;
|
|
- ssize_t ret = 0;
|
|
+ ssize_t ret;
|
|
|
|
ret = kstrtoul(buf, 0, &val);
|
|
if (ret || val != 1)
|
|
return -EINVAL;
|
|
|
|
cpus_read_lock();
|
|
-
|
|
- ret = check_online_cpus();
|
|
- if (ret)
|
|
- goto put;
|
|
-
|
|
- tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev);
|
|
- if (tmp_ret != UCODE_NEW)
|
|
- goto put;
|
|
-
|
|
- ret = microcode_reload_late();
|
|
-put:
|
|
+ ret = load_late_locked();
|
|
cpus_read_unlock();
|
|
|
|
- if (ret == 0)
|
|
- ret = size;
|
|
-
|
|
- add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
|
-
|
|
- return ret;
|
|
+ return ret ? : size;
|
|
}
|
|
|
|
static DEVICE_ATTR_WO(reload);
|
|
@@ -541,17 +755,6 @@ static void microcode_fini_cpu(int cpu)
|
|
microcode_ops->microcode_fini_cpu(cpu);
|
|
}
|
|
|
|
-static enum ucode_state microcode_init_cpu(int cpu)
|
|
-{
|
|
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
|
-
|
|
- memset(uci, 0, sizeof(*uci));
|
|
-
|
|
- microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
|
|
-
|
|
- return microcode_ops->apply_microcode(cpu);
|
|
-}
|
|
-
|
|
/**
|
|
* microcode_bsp_resume - Update boot CPU microcode during resume.
|
|
*/
|
|
@@ -570,19 +773,18 @@ static struct syscore_ops mc_syscore_ops = {
|
|
.resume = microcode_bsp_resume,
|
|
};
|
|
|
|
-static int mc_cpu_starting(unsigned int cpu)
|
|
-{
|
|
- enum ucode_state err = microcode_ops->apply_microcode(cpu);
|
|
-
|
|
- pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err);
|
|
-
|
|
- return err == UCODE_ERROR;
|
|
-}
|
|
-
|
|
static int mc_cpu_online(unsigned int cpu)
|
|
{
|
|
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
|
struct device *dev = get_cpu_device(cpu);
|
|
|
|
+ memset(uci, 0, sizeof(*uci));
|
|
+
|
|
+ microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
|
|
+ cpu_data(cpu).microcode = uci->cpu_sig.rev;
|
|
+ if (!cpu)
|
|
+ boot_cpu_data.microcode = uci->cpu_sig.rev;
|
|
+
|
|
if (sysfs_create_group(&dev->kobj, &mc_attr_group))
|
|
pr_err("Failed to create group for CPU%d\n", cpu);
|
|
return 0;
|
|
@@ -590,33 +792,13 @@ static int mc_cpu_online(unsigned int cpu)
|
|
|
|
static int mc_cpu_down_prep(unsigned int cpu)
|
|
{
|
|
- struct device *dev;
|
|
-
|
|
- dev = get_cpu_device(cpu);
|
|
+ struct device *dev = get_cpu_device(cpu);
|
|
|
|
microcode_fini_cpu(cpu);
|
|
-
|
|
- /* Suspend is in progress, only remove the interface */
|
|
sysfs_remove_group(&dev->kobj, &mc_attr_group);
|
|
- pr_debug("%s: CPU%d\n", __func__, cpu);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
-static void setup_online_cpu(struct work_struct *work)
|
|
-{
|
|
- int cpu = smp_processor_id();
|
|
- enum ucode_state err;
|
|
-
|
|
- err = microcode_init_cpu(cpu);
|
|
- if (err == UCODE_ERROR) {
|
|
- pr_err("Error applying microcode on CPU%d\n", cpu);
|
|
- return;
|
|
- }
|
|
-
|
|
- mc_cpu_online(cpu);
|
|
-}
|
|
-
|
|
static struct attribute *cpu_root_microcode_attrs[] = {
|
|
#ifdef CONFIG_MICROCODE_LATE_LOADING
|
|
&dev_attr_reload.attr,
|
|
@@ -648,6 +830,11 @@ static int __init microcode_init(void)
|
|
if (!microcode_ops)
|
|
return -ENODEV;
|
|
|
|
+ pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev));
|
|
+
|
|
+ if (early_data.new_rev)
|
|
+ pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev);
|
|
+
|
|
microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0);
|
|
if (IS_ERR(microcode_pdev))
|
|
return PTR_ERR(microcode_pdev);
|
|
@@ -662,14 +849,9 @@ static int __init microcode_init(void)
|
|
}
|
|
}
|
|
|
|
- /* Do per-CPU setup */
|
|
- schedule_on_each_cpu(setup_online_cpu);
|
|
-
|
|
register_syscore_ops(&mc_syscore_ops);
|
|
- cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
|
|
- mc_cpu_starting, NULL);
|
|
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
|
|
- mc_cpu_online, mc_cpu_down_prep);
|
|
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
|
|
+ mc_cpu_online, mc_cpu_down_prep);
|
|
|
|
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
|
|
|
|
@@ -680,5 +862,4 @@ static int __init microcode_init(void)
|
|
return error;
|
|
|
|
}
|
|
-fs_initcall(save_microcode_in_initrd);
|
|
late_initcall(microcode_init);
|
|
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
|
|
index 94dd6af9c963a8..9d7baf2573bcde 100644
|
|
--- a/arch/x86/kernel/cpu/microcode/intel.c
|
|
+++ b/arch/x86/kernel/cpu/microcode/intel.c
|
|
@@ -14,7 +14,6 @@
|
|
#include <linux/earlycpio.h>
|
|
#include <linux/firmware.h>
|
|
#include <linux/uaccess.h>
|
|
-#include <linux/vmalloc.h>
|
|
#include <linux/initrd.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
@@ -32,11 +31,14 @@
|
|
|
|
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
|
|
|
|
+#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL)
|
|
+
|
|
/* Current microcode patch used in early patching on the APs. */
|
|
-static struct microcode_intel *intel_ucode_patch;
|
|
+static struct microcode_intel *ucode_patch_va __read_mostly;
|
|
+static struct microcode_intel *ucode_patch_late __read_mostly;
|
|
|
|
/* last level cache size per core */
|
|
-static int llc_size_per_core;
|
|
+static unsigned int llc_size_per_core __ro_after_init;
|
|
|
|
/* microcode format is extended from prescott processors */
|
|
struct extended_signature {
|
|
@@ -66,60 +68,52 @@ static inline unsigned int exttable_size(struct extended_sigtable *et)
|
|
return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE;
|
|
}
|
|
|
|
-int intel_cpu_collect_info(struct ucode_cpu_info *uci)
|
|
+void intel_collect_cpu_info(struct cpu_signature *sig)
|
|
{
|
|
- unsigned int val[2];
|
|
- unsigned int family, model;
|
|
- struct cpu_signature csig = { 0 };
|
|
- unsigned int eax, ebx, ecx, edx;
|
|
-
|
|
- memset(uci, 0, sizeof(*uci));
|
|
-
|
|
- eax = 0x00000001;
|
|
- ecx = 0;
|
|
- native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
- csig.sig = eax;
|
|
+ sig->sig = cpuid_eax(1);
|
|
+ sig->pf = 0;
|
|
+ sig->rev = intel_get_microcode_revision();
|
|
|
|
- family = x86_family(eax);
|
|
- model = x86_model(eax);
|
|
+ if (x86_model(sig->sig) >= 5 || x86_family(sig->sig) > 6) {
|
|
+ unsigned int val[2];
|
|
|
|
- if (model >= 5 || family > 6) {
|
|
/* get processor flags from MSR 0x17 */
|
|
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
|
|
- csig.pf = 1 << ((val[1] >> 18) & 7);
|
|
+ sig->pf = 1 << ((val[1] >> 18) & 7);
|
|
}
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(intel_collect_cpu_info);
|
|
|
|
- csig.rev = intel_get_microcode_revision();
|
|
-
|
|
- uci->cpu_sig = csig;
|
|
+static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2,
|
|
+ unsigned int pf2)
|
|
+{
|
|
+ if (s1->sig != sig2)
|
|
+ return false;
|
|
|
|
- return 0;
|
|
+ /* Processor flags are either both 0 or they intersect. */
|
|
+ return ((!s1->pf && !pf2) || (s1->pf & pf2));
|
|
}
|
|
-EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
|
|
|
|
-/*
|
|
- * Returns 1 if update has been found, 0 otherwise.
|
|
- */
|
|
-int intel_find_matching_signature(void *mc, unsigned int csig, int cpf)
|
|
+bool intel_find_matching_signature(void *mc, struct cpu_signature *sig)
|
|
{
|
|
struct microcode_header_intel *mc_hdr = mc;
|
|
- struct extended_sigtable *ext_hdr;
|
|
struct extended_signature *ext_sig;
|
|
+ struct extended_sigtable *ext_hdr;
|
|
int i;
|
|
|
|
- if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
|
|
- return 1;
|
|
+ if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf))
|
|
+ return true;
|
|
|
|
/* Look for ext. headers: */
|
|
if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE)
|
|
- return 0;
|
|
+ return false;
|
|
|
|
ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE;
|
|
ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
|
|
|
|
for (i = 0; i < ext_hdr->count; i++) {
|
|
- if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
|
|
- return 1;
|
|
+ if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf))
|
|
+ return true;
|
|
ext_sig++;
|
|
}
|
|
return 0;
|
|
@@ -240,516 +234,245 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type)
|
|
}
|
|
EXPORT_SYMBOL_GPL(intel_microcode_sanity_check);
|
|
|
|
-/*
|
|
- * Returns 1 if update has been found, 0 otherwise.
|
|
- */
|
|
-static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
|
|
-{
|
|
- struct microcode_header_intel *mc_hdr = mc;
|
|
-
|
|
- if (mc_hdr->rev <= new_rev)
|
|
- return 0;
|
|
-
|
|
- return intel_find_matching_signature(mc, csig, cpf);
|
|
-}
|
|
-
|
|
-static struct ucode_patch *memdup_patch(void *data, unsigned int size)
|
|
-{
|
|
- struct ucode_patch *p;
|
|
-
|
|
- p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
|
|
- if (!p)
|
|
- return NULL;
|
|
-
|
|
- p->data = kmemdup(data, size, GFP_KERNEL);
|
|
- if (!p->data) {
|
|
- kfree(p);
|
|
- return NULL;
|
|
- }
|
|
-
|
|
- return p;
|
|
-}
|
|
-
|
|
-static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
|
|
+static void update_ucode_pointer(struct microcode_intel *mc)
|
|
{
|
|
- struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
|
|
- struct ucode_patch *iter, *tmp, *p = NULL;
|
|
- bool prev_found = false;
|
|
- unsigned int sig, pf;
|
|
-
|
|
- mc_hdr = (struct microcode_header_intel *)data;
|
|
-
|
|
- list_for_each_entry_safe(iter, tmp, µcode_cache, plist) {
|
|
- mc_saved_hdr = (struct microcode_header_intel *)iter->data;
|
|
- sig = mc_saved_hdr->sig;
|
|
- pf = mc_saved_hdr->pf;
|
|
-
|
|
- if (intel_find_matching_signature(data, sig, pf)) {
|
|
- prev_found = true;
|
|
-
|
|
- if (mc_hdr->rev <= mc_saved_hdr->rev)
|
|
- continue;
|
|
-
|
|
- p = memdup_patch(data, size);
|
|
- if (!p)
|
|
- pr_err("Error allocating buffer %p\n", data);
|
|
- else {
|
|
- list_replace(&iter->plist, &p->plist);
|
|
- kfree(iter->data);
|
|
- kfree(iter);
|
|
- }
|
|
- }
|
|
- }
|
|
+ kvfree(ucode_patch_va);
|
|
|
|
/*
|
|
- * There weren't any previous patches found in the list cache; save the
|
|
- * newly found.
|
|
+ * Save the virtual address for early loading and for eventual free
|
|
+ * on late loading.
|
|
*/
|
|
- if (!prev_found) {
|
|
- p = memdup_patch(data, size);
|
|
- if (!p)
|
|
- pr_err("Error allocating buffer for %p\n", data);
|
|
- else
|
|
- list_add_tail(&p->plist, µcode_cache);
|
|
- }
|
|
-
|
|
- if (!p)
|
|
- return;
|
|
+ ucode_patch_va = mc;
|
|
+}
|
|
|
|
- if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
|
|
- return;
|
|
+static void save_microcode_patch(struct microcode_intel *patch)
|
|
+{
|
|
+ unsigned int size = get_totalsize(&patch->hdr);
|
|
+ struct microcode_intel *mc;
|
|
|
|
- /*
|
|
- * Save for early loading. On 32-bit, that needs to be a physical
|
|
- * address as the APs are running from physical addresses, before
|
|
- * paging has been enabled.
|
|
- */
|
|
- if (IS_ENABLED(CONFIG_X86_32))
|
|
- intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
|
|
+ mc = kvmemdup(patch, size, GFP_KERNEL);
|
|
+ if (mc)
|
|
+ update_ucode_pointer(mc);
|
|
else
|
|
- intel_ucode_patch = p->data;
|
|
+ pr_err("Unable to allocate microcode memory size: %u\n", size);
|
|
}
|
|
|
|
-/*
|
|
- * Get microcode matching with BSP's model. Only CPUs with the same model as
|
|
- * BSP can stay in the platform.
|
|
- */
|
|
-static struct microcode_intel *
|
|
-scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
|
|
+/* Scan blob for microcode matching the boot CPUs family, model, stepping */
|
|
+static __init struct microcode_intel *scan_microcode(void *data, size_t size,
|
|
+ struct ucode_cpu_info *uci,
|
|
+ bool save)
|
|
{
|
|
struct microcode_header_intel *mc_header;
|
|
struct microcode_intel *patch = NULL;
|
|
+ u32 cur_rev = uci->cpu_sig.rev;
|
|
unsigned int mc_size;
|
|
|
|
- while (size) {
|
|
- if (size < sizeof(struct microcode_header_intel))
|
|
- break;
|
|
-
|
|
+ for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) {
|
|
mc_header = (struct microcode_header_intel *)data;
|
|
|
|
mc_size = get_totalsize(mc_header);
|
|
- if (!mc_size ||
|
|
- mc_size > size ||
|
|
+ if (!mc_size || mc_size > size ||
|
|
intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0)
|
|
break;
|
|
|
|
- size -= mc_size;
|
|
-
|
|
- if (!intel_find_matching_signature(data, uci->cpu_sig.sig,
|
|
- uci->cpu_sig.pf)) {
|
|
- data += mc_size;
|
|
+ if (!intel_find_matching_signature(data, &uci->cpu_sig))
|
|
continue;
|
|
- }
|
|
|
|
+ /*
|
|
+ * For saving the early microcode, find the matching revision which
|
|
+ * was loaded on the BSP.
|
|
+ *
|
|
+ * On the BSP during early boot, find a newer revision than
|
|
+ * actually loaded in the CPU.
|
|
+ */
|
|
if (save) {
|
|
- save_microcode_patch(uci, data, mc_size);
|
|
- goto next;
|
|
- }
|
|
-
|
|
-
|
|
- if (!patch) {
|
|
- if (!has_newer_microcode(data,
|
|
- uci->cpu_sig.sig,
|
|
- uci->cpu_sig.pf,
|
|
- uci->cpu_sig.rev))
|
|
- goto next;
|
|
-
|
|
- } else {
|
|
- struct microcode_header_intel *phdr = &patch->hdr;
|
|
-
|
|
- if (!has_newer_microcode(data,
|
|
- phdr->sig,
|
|
- phdr->pf,
|
|
- phdr->rev))
|
|
- goto next;
|
|
+ if (cur_rev != mc_header->rev)
|
|
+ continue;
|
|
+ } else if (cur_rev >= mc_header->rev) {
|
|
+ continue;
|
|
}
|
|
|
|
- /* We have a newer patch, save it. */
|
|
patch = data;
|
|
-
|
|
-next:
|
|
- data += mc_size;
|
|
+ cur_rev = mc_header->rev;
|
|
}
|
|
|
|
- if (size)
|
|
- return NULL;
|
|
-
|
|
- return patch;
|
|
+ return size ? NULL : patch;
|
|
}
|
|
|
|
-static bool load_builtin_intel_microcode(struct cpio_data *cp)
|
|
+static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,
|
|
+ struct microcode_intel *mc,
|
|
+ u32 *cur_rev)
|
|
{
|
|
- unsigned int eax = 1, ebx, ecx = 0, edx;
|
|
- struct firmware fw;
|
|
- char name[30];
|
|
-
|
|
- if (IS_ENABLED(CONFIG_X86_32))
|
|
- return false;
|
|
-
|
|
- native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
-
|
|
- sprintf(name, "intel-ucode/%02x-%02x-%02x",
|
|
- x86_family(eax), x86_model(eax), x86_stepping(eax));
|
|
-
|
|
- if (firmware_request_builtin(&fw, name)) {
|
|
- cp->size = fw.size;
|
|
- cp->data = (void *)fw.data;
|
|
- return true;
|
|
- }
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-static void print_ucode_info(int old_rev, int new_rev, unsigned int date)
|
|
-{
|
|
- pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n",
|
|
- old_rev,
|
|
- new_rev,
|
|
- date & 0xffff,
|
|
- date >> 24,
|
|
- (date >> 16) & 0xff);
|
|
-}
|
|
-
|
|
-#ifdef CONFIG_X86_32
|
|
-
|
|
-static int delay_ucode_info;
|
|
-static int current_mc_date;
|
|
-static int early_old_rev;
|
|
-
|
|
-/*
|
|
- * Print early updated ucode info after printk works. This is delayed info dump.
|
|
- */
|
|
-void show_ucode_info_early(void)
|
|
-{
|
|
- struct ucode_cpu_info uci;
|
|
-
|
|
- if (delay_ucode_info) {
|
|
- intel_cpu_collect_info(&uci);
|
|
- print_ucode_info(early_old_rev, uci.cpu_sig.rev, current_mc_date);
|
|
- delay_ucode_info = 0;
|
|
- }
|
|
-}
|
|
-
|
|
-/*
|
|
- * At this point, we can not call printk() yet. Delay printing microcode info in
|
|
- * show_ucode_info_early() until printk() works.
|
|
- */
|
|
-static void print_ucode(int old_rev, int new_rev, int date)
|
|
-{
|
|
- int *delay_ucode_info_p;
|
|
- int *current_mc_date_p;
|
|
- int *early_old_rev_p;
|
|
-
|
|
- delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
|
|
- current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date);
|
|
- early_old_rev_p = (int *)__pa_nodebug(&early_old_rev);
|
|
-
|
|
- *delay_ucode_info_p = 1;
|
|
- *current_mc_date_p = date;
|
|
- *early_old_rev_p = old_rev;
|
|
-}
|
|
-#else
|
|
-
|
|
-static inline void print_ucode(int old_rev, int new_rev, int date)
|
|
-{
|
|
- print_ucode_info(old_rev, new_rev, date);
|
|
-}
|
|
-#endif
|
|
-
|
|
-static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
|
|
-{
|
|
- struct microcode_intel *mc;
|
|
- u32 rev, old_rev;
|
|
+ u32 rev;
|
|
|
|
- mc = uci->mc;
|
|
if (!mc)
|
|
- return 0;
|
|
+ return UCODE_NFOUND;
|
|
|
|
/*
|
|
* Save us the MSR write below - which is a particular expensive
|
|
* operation - when the other hyperthread has updated the microcode
|
|
* already.
|
|
*/
|
|
- rev = intel_get_microcode_revision();
|
|
- if (rev >= mc->hdr.rev) {
|
|
- uci->cpu_sig.rev = rev;
|
|
+ *cur_rev = intel_get_microcode_revision();
|
|
+ if (*cur_rev >= mc->hdr.rev) {
|
|
+ uci->cpu_sig.rev = *cur_rev;
|
|
return UCODE_OK;
|
|
}
|
|
|
|
- old_rev = rev;
|
|
-
|
|
- /*
|
|
- * Writeback and invalidate caches before updating microcode to avoid
|
|
- * internal issues depending on what the microcode is updating.
|
|
- */
|
|
- native_wbinvd();
|
|
-
|
|
/* write microcode via MSR 0x79 */
|
|
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
|
|
|
|
rev = intel_get_microcode_revision();
|
|
if (rev != mc->hdr.rev)
|
|
- return -1;
|
|
+ return UCODE_ERROR;
|
|
|
|
uci->cpu_sig.rev = rev;
|
|
+ return UCODE_UPDATED;
|
|
+}
|
|
|
|
- if (early)
|
|
- print_ucode(old_rev, uci->cpu_sig.rev, mc->hdr.date);
|
|
- else
|
|
- print_ucode_info(old_rev, uci->cpu_sig.rev, mc->hdr.date);
|
|
+static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci)
|
|
+{
|
|
+ struct microcode_intel *mc = uci->mc;
|
|
+ u32 cur_rev;
|
|
|
|
- return 0;
|
|
+ return __apply_microcode(uci, mc, &cur_rev);
|
|
}
|
|
|
|
-int __init save_microcode_in_initrd_intel(void)
|
|
+static __init bool load_builtin_intel_microcode(struct cpio_data *cp)
|
|
{
|
|
- struct ucode_cpu_info uci;
|
|
- struct cpio_data cp;
|
|
-
|
|
- /*
|
|
- * initrd is going away, clear patch ptr. We will scan the microcode one
|
|
- * last time before jettisoning and save a patch, if found. Then we will
|
|
- * update that pointer too, with a stable patch address to use when
|
|
- * resuming the cores.
|
|
- */
|
|
- intel_ucode_patch = NULL;
|
|
+ unsigned int eax = 1, ebx, ecx = 0, edx;
|
|
+ struct firmware fw;
|
|
+ char name[30];
|
|
|
|
- if (!load_builtin_intel_microcode(&cp))
|
|
- cp = find_microcode_in_initrd(ucode_path, false);
|
|
+ if (IS_ENABLED(CONFIG_X86_32))
|
|
+ return false;
|
|
|
|
- if (!(cp.data && cp.size))
|
|
- return 0;
|
|
+ native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
|
|
- intel_cpu_collect_info(&uci);
|
|
+ sprintf(name, "intel-ucode/%02x-%02x-%02x",
|
|
+ x86_family(eax), x86_model(eax), x86_stepping(eax));
|
|
|
|
- scan_microcode(cp.data, cp.size, &uci, true);
|
|
- return 0;
|
|
+ if (firmware_request_builtin(&fw, name)) {
|
|
+ cp->size = fw.size;
|
|
+ cp->data = (void *)fw.data;
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
}
|
|
|
|
-/*
|
|
- * @res_patch, output: a pointer to the patch we found.
|
|
- */
|
|
-static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
|
|
+static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save)
|
|
{
|
|
- static const char *path;
|
|
struct cpio_data cp;
|
|
- bool use_pa;
|
|
-
|
|
- if (IS_ENABLED(CONFIG_X86_32)) {
|
|
- path = (const char *)__pa_nodebug(ucode_path);
|
|
- use_pa = true;
|
|
- } else {
|
|
- path = ucode_path;
|
|
- use_pa = false;
|
|
- }
|
|
|
|
- /* try built-in microcode first */
|
|
+ intel_collect_cpu_info(&uci->cpu_sig);
|
|
+
|
|
if (!load_builtin_intel_microcode(&cp))
|
|
- cp = find_microcode_in_initrd(path, use_pa);
|
|
+ cp = find_microcode_in_initrd(ucode_path);
|
|
|
|
if (!(cp.data && cp.size))
|
|
return NULL;
|
|
|
|
- intel_cpu_collect_info(uci);
|
|
-
|
|
- return scan_microcode(cp.data, cp.size, uci, false);
|
|
+ return scan_microcode(cp.data, cp.size, uci, save);
|
|
}
|
|
|
|
-void __init load_ucode_intel_bsp(void)
|
|
+/*
|
|
+ * Invoked from an early init call to save the microcode blob which was
|
|
+ * selected during early boot when mm was not usable. The microcode must be
|
|
+ * saved because initrd is going away. It's an early init call so the APs
|
|
+ * just can use the pointer and do not have to scan initrd/builtin firmware
|
|
+ * again.
|
|
+ */
|
|
+static int __init save_builtin_microcode(void)
|
|
{
|
|
- struct microcode_intel *patch;
|
|
struct ucode_cpu_info uci;
|
|
|
|
- patch = __load_ucode_intel(&uci);
|
|
- if (!patch)
|
|
- return;
|
|
+ if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
|
|
+ return 0;
|
|
|
|
- uci.mc = patch;
|
|
+ if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
|
+ return 0;
|
|
|
|
- apply_microcode_early(&uci, true);
|
|
+ uci.mc = get_microcode_blob(&uci, true);
|
|
+ if (uci.mc)
|
|
+ save_microcode_patch(uci.mc);
|
|
+ return 0;
|
|
}
|
|
+early_initcall(save_builtin_microcode);
|
|
|
|
-void load_ucode_intel_ap(void)
|
|
+/* Load microcode on BSP from initrd or builtin blobs */
|
|
+void __init load_ucode_intel_bsp(struct early_load_data *ed)
|
|
{
|
|
- struct microcode_intel *patch, **iup;
|
|
struct ucode_cpu_info uci;
|
|
|
|
- if (IS_ENABLED(CONFIG_X86_32))
|
|
- iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
|
|
- else
|
|
- iup = &intel_ucode_patch;
|
|
+ uci.mc = get_microcode_blob(&uci, false);
|
|
+ ed->old_rev = uci.cpu_sig.rev;
|
|
|
|
- if (!*iup) {
|
|
- patch = __load_ucode_intel(&uci);
|
|
- if (!patch)
|
|
- return;
|
|
-
|
|
- *iup = patch;
|
|
+ if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) {
|
|
+ ucode_patch_va = UCODE_BSP_LOADED;
|
|
+ ed->new_rev = uci.cpu_sig.rev;
|
|
}
|
|
-
|
|
- uci.mc = *iup;
|
|
-
|
|
- apply_microcode_early(&uci, true);
|
|
}
|
|
|
|
-static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
|
|
+void load_ucode_intel_ap(void)
|
|
{
|
|
- struct microcode_header_intel *phdr;
|
|
- struct ucode_patch *iter, *tmp;
|
|
-
|
|
- list_for_each_entry_safe(iter, tmp, µcode_cache, plist) {
|
|
-
|
|
- phdr = (struct microcode_header_intel *)iter->data;
|
|
-
|
|
- if (phdr->rev <= uci->cpu_sig.rev)
|
|
- continue;
|
|
-
|
|
- if (!intel_find_matching_signature(phdr,
|
|
- uci->cpu_sig.sig,
|
|
- uci->cpu_sig.pf))
|
|
- continue;
|
|
+ struct ucode_cpu_info uci;
|
|
|
|
- return iter->data;
|
|
- }
|
|
- return NULL;
|
|
+ uci.mc = ucode_patch_va;
|
|
+ if (uci.mc)
|
|
+ apply_microcode_early(&uci);
|
|
}
|
|
|
|
+/* Reload microcode on resume */
|
|
void reload_ucode_intel(void)
|
|
{
|
|
- struct microcode_intel *p;
|
|
- struct ucode_cpu_info uci;
|
|
-
|
|
- intel_cpu_collect_info(&uci);
|
|
-
|
|
- p = find_patch(&uci);
|
|
- if (!p)
|
|
- return;
|
|
-
|
|
- uci.mc = p;
|
|
+ struct ucode_cpu_info uci = { .mc = ucode_patch_va, };
|
|
|
|
- apply_microcode_early(&uci, false);
|
|
+ if (uci.mc)
|
|
+ apply_microcode_early(&uci);
|
|
}
|
|
|
|
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
|
|
{
|
|
- struct cpuinfo_x86 *c = &cpu_data(cpu_num);
|
|
- unsigned int val[2];
|
|
-
|
|
- memset(csig, 0, sizeof(*csig));
|
|
-
|
|
- csig->sig = cpuid_eax(0x00000001);
|
|
-
|
|
- if ((c->x86_model >= 5) || (c->x86 > 6)) {
|
|
- /* get processor flags from MSR 0x17 */
|
|
- rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
|
|
- csig->pf = 1 << ((val[1] >> 18) & 7);
|
|
- }
|
|
-
|
|
- csig->rev = c->microcode;
|
|
-
|
|
+ intel_collect_cpu_info(csig);
|
|
return 0;
|
|
}
|
|
|
|
-static enum ucode_state apply_microcode_intel(int cpu)
|
|
+static enum ucode_state apply_microcode_late(int cpu)
|
|
{
|
|
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
|
- struct cpuinfo_x86 *c = &cpu_data(cpu);
|
|
- bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
|
|
- struct microcode_intel *mc;
|
|
+ struct microcode_intel *mc = ucode_patch_late;
|
|
enum ucode_state ret;
|
|
- static int prev_rev;
|
|
- u32 rev;
|
|
+ u32 cur_rev;
|
|
|
|
- /* We should bind the task to the CPU */
|
|
- if (WARN_ON(raw_smp_processor_id() != cpu))
|
|
+ if (WARN_ON_ONCE(smp_processor_id() != cpu))
|
|
return UCODE_ERROR;
|
|
|
|
- /* Look for a newer patch in our cache: */
|
|
- mc = find_patch(uci);
|
|
- if (!mc) {
|
|
- mc = uci->mc;
|
|
- if (!mc)
|
|
- return UCODE_NFOUND;
|
|
- }
|
|
+ ret = __apply_microcode(uci, mc, &cur_rev);
|
|
+ if (ret != UCODE_UPDATED && ret != UCODE_OK)
|
|
+ return ret;
|
|
|
|
- /*
|
|
- * Save us the MSR write below - which is a particular expensive
|
|
- * operation - when the other hyperthread has updated the microcode
|
|
- * already.
|
|
- */
|
|
- rev = intel_get_microcode_revision();
|
|
- if (rev >= mc->hdr.rev) {
|
|
- ret = UCODE_OK;
|
|
- goto out;
|
|
- }
|
|
-
|
|
- /*
|
|
- * Writeback and invalidate caches before updating microcode to avoid
|
|
- * internal issues depending on what the microcode is updating.
|
|
- */
|
|
- native_wbinvd();
|
|
-
|
|
- /* write microcode via MSR 0x79 */
|
|
- wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
|
|
-
|
|
- rev = intel_get_microcode_revision();
|
|
-
|
|
- if (rev != mc->hdr.rev) {
|
|
- pr_err("CPU%d update to revision 0x%x failed\n",
|
|
- cpu, mc->hdr.rev);
|
|
- return UCODE_ERROR;
|
|
- }
|
|
-
|
|
- if (bsp && rev != prev_rev) {
|
|
- pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
|
|
- rev,
|
|
- mc->hdr.date & 0xffff,
|
|
- mc->hdr.date >> 24,
|
|
+ if (!cpu && uci->cpu_sig.rev != cur_rev) {
|
|
+ pr_info("Updated to revision 0x%x, date = %04x-%02x-%02x\n",
|
|
+ uci->cpu_sig.rev, mc->hdr.date & 0xffff, mc->hdr.date >> 24,
|
|
(mc->hdr.date >> 16) & 0xff);
|
|
- prev_rev = rev;
|
|
}
|
|
|
|
- ret = UCODE_UPDATED;
|
|
-
|
|
-out:
|
|
- uci->cpu_sig.rev = rev;
|
|
- c->microcode = rev;
|
|
-
|
|
- /* Update boot_cpu_data's revision too, if we're on the BSP: */
|
|
- if (bsp)
|
|
- boot_cpu_data.microcode = rev;
|
|
+ cpu_data(cpu).microcode = uci->cpu_sig.rev;
|
|
+ if (!cpu)
|
|
+ boot_cpu_data.microcode = uci->cpu_sig.rev;
|
|
|
|
return ret;
|
|
}
|
|
|
|
-static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
|
|
+static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)
|
|
{
|
|
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
|
- unsigned int curr_mc_size = 0, new_mc_size = 0;
|
|
- enum ucode_state ret = UCODE_OK;
|
|
- int new_rev = uci->cpu_sig.rev;
|
|
+ int cur_rev = uci->cpu_sig.rev;
|
|
+ unsigned int curr_mc_size = 0;
|
|
u8 *new_mc = NULL, *mc = NULL;
|
|
- unsigned int csig, cpf;
|
|
+
|
|
+ if (force_minrev)
|
|
+ return UCODE_NFOUND;
|
|
|
|
while (iov_iter_count(iter)) {
|
|
struct microcode_header_intel mc_header;
|
|
@@ -758,68 +481,61 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
|
|
|
|
if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
|
|
pr_err("error! Truncated or inaccessible header in microcode data file\n");
|
|
- break;
|
|
+ goto fail;
|
|
}
|
|
|
|
mc_size = get_totalsize(&mc_header);
|
|
if (mc_size < sizeof(mc_header)) {
|
|
pr_err("error! Bad data in microcode data file (totalsize too small)\n");
|
|
- break;
|
|
+ goto fail;
|
|
}
|
|
data_size = mc_size - sizeof(mc_header);
|
|
if (data_size > iov_iter_count(iter)) {
|
|
pr_err("error! Bad data in microcode data file (truncated file?)\n");
|
|
- break;
|
|
+ goto fail;
|
|
}
|
|
|
|
/* For performance reasons, reuse mc area when possible */
|
|
if (!mc || mc_size > curr_mc_size) {
|
|
- vfree(mc);
|
|
- mc = vmalloc(mc_size);
|
|
+ kvfree(mc);
|
|
+ mc = kvmalloc(mc_size, GFP_KERNEL);
|
|
if (!mc)
|
|
- break;
|
|
+ goto fail;
|
|
curr_mc_size = mc_size;
|
|
}
|
|
|
|
memcpy(mc, &mc_header, sizeof(mc_header));
|
|
data = mc + sizeof(mc_header);
|
|
if (!copy_from_iter_full(data, data_size, iter) ||
|
|
- intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) {
|
|
- break;
|
|
- }
|
|
+ intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0)
|
|
+ goto fail;
|
|
|
|
- csig = uci->cpu_sig.sig;
|
|
- cpf = uci->cpu_sig.pf;
|
|
- if (has_newer_microcode(mc, csig, cpf, new_rev)) {
|
|
- vfree(new_mc);
|
|
- new_rev = mc_header.rev;
|
|
- new_mc = mc;
|
|
- new_mc_size = mc_size;
|
|
- mc = NULL; /* trigger new vmalloc */
|
|
- ret = UCODE_NEW;
|
|
- }
|
|
- }
|
|
+ if (cur_rev >= mc_header.rev)
|
|
+ continue;
|
|
|
|
- vfree(mc);
|
|
+ if (!intel_find_matching_signature(mc, &uci->cpu_sig))
|
|
+ continue;
|
|
|
|
- if (iov_iter_count(iter)) {
|
|
- vfree(new_mc);
|
|
- return UCODE_ERROR;
|
|
+ kvfree(new_mc);
|
|
+ cur_rev = mc_header.rev;
|
|
+ new_mc = mc;
|
|
+ mc = NULL;
|
|
}
|
|
|
|
+ if (iov_iter_count(iter))
|
|
+ goto fail;
|
|
+
|
|
+ kvfree(mc);
|
|
if (!new_mc)
|
|
return UCODE_NFOUND;
|
|
|
|
- vfree(uci->mc);
|
|
- uci->mc = (struct microcode_intel *)new_mc;
|
|
-
|
|
- /* Save for CPU hotplug */
|
|
- save_microcode_patch(uci, new_mc, new_mc_size);
|
|
+ ucode_patch_late = (struct microcode_intel *)new_mc;
|
|
+ return UCODE_NEW;
|
|
|
|
- pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
|
|
- cpu, new_rev, uci->cpu_sig.rev);
|
|
-
|
|
- return ret;
|
|
+fail:
|
|
+ kvfree(mc);
|
|
+ kvfree(new_mc);
|
|
+ return UCODE_ERROR;
|
|
}
|
|
|
|
static bool is_blacklisted(unsigned int cpu)
|
|
@@ -829,7 +545,7 @@ static bool is_blacklisted(unsigned int cpu)
|
|
/*
|
|
* Late loading on model 79 with microcode revision less than 0x0b000021
|
|
* and LLC size per core bigger than 2.5MB may result in a system hang.
|
|
- * This behavior is documented in item BDF90, #334165 (Intel Xeon
|
|
+ * This behavior is documented in item BDX90, #334165 (Intel Xeon
|
|
* Processor E7-8800/4800 v4 Product Family).
|
|
*/
|
|
if (c->x86 == 6 &&
|
|
@@ -837,7 +553,7 @@ static bool is_blacklisted(unsigned int cpu)
|
|
c->x86_stepping == 0x01 &&
|
|
llc_size_per_core > 2621440 &&
|
|
c->microcode < 0x0b000021) {
|
|
- pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
|
|
+ pr_err_once("Erratum BDX90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
|
|
pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
|
|
return true;
|
|
}
|
|
@@ -868,26 +584,36 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
|
|
kvec.iov_base = (void *)firmware->data;
|
|
kvec.iov_len = firmware->size;
|
|
iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size);
|
|
- ret = generic_load_microcode(cpu, &iter);
|
|
+ ret = parse_microcode_blobs(cpu, &iter);
|
|
|
|
release_firmware(firmware);
|
|
|
|
return ret;
|
|
}
|
|
|
|
+static void finalize_late_load(int result)
|
|
+{
|
|
+ if (!result)
|
|
+ update_ucode_pointer(ucode_patch_late);
|
|
+ else
|
|
+ kvfree(ucode_patch_late);
|
|
+ ucode_patch_late = NULL;
|
|
+}
|
|
+
|
|
static struct microcode_ops microcode_intel_ops = {
|
|
- .request_microcode_fw = request_microcode_fw,
|
|
- .collect_cpu_info = collect_cpu_info,
|
|
- .apply_microcode = apply_microcode_intel,
|
|
+ .request_microcode_fw = request_microcode_fw,
|
|
+ .collect_cpu_info = collect_cpu_info,
|
|
+ .apply_microcode = apply_microcode_late,
|
|
+ .finalize_late_load = finalize_late_load,
|
|
+ .use_nmi = IS_ENABLED(CONFIG_X86_64),
|
|
};
|
|
|
|
-static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
|
|
+static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c)
|
|
{
|
|
u64 llc_size = c->x86_cache_size * 1024ULL;
|
|
|
|
do_div(llc_size, c->x86_max_cores);
|
|
-
|
|
- return (int)llc_size;
|
|
+ llc_size_per_core = (unsigned int)llc_size;
|
|
}
|
|
|
|
struct microcode_ops * __init init_intel_microcode(void)
|
|
@@ -900,7 +626,7 @@ struct microcode_ops * __init init_intel_microcode(void)
|
|
return NULL;
|
|
}
|
|
|
|
- llc_size_per_core = calc_llc_size_per_core(c);
|
|
+ calc_llc_size_per_core(c);
|
|
|
|
return µcode_intel_ops;
|
|
}
|
|
diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
|
|
index bf883aa712330a..21776c529fa97a 100644
|
|
--- a/arch/x86/kernel/cpu/microcode/internal.h
|
|
+++ b/arch/x86/kernel/cpu/microcode/internal.h
|
|
@@ -8,43 +8,43 @@
|
|
#include <asm/cpu.h>
|
|
#include <asm/microcode.h>
|
|
|
|
-struct ucode_patch {
|
|
- struct list_head plist;
|
|
- void *data; /* Intel uses only this one */
|
|
- unsigned int size;
|
|
- u32 patch_id;
|
|
- u16 equiv_cpu;
|
|
-};
|
|
-
|
|
-extern struct list_head microcode_cache;
|
|
-
|
|
struct device;
|
|
|
|
enum ucode_state {
|
|
UCODE_OK = 0,
|
|
UCODE_NEW,
|
|
+ UCODE_NEW_SAFE,
|
|
UCODE_UPDATED,
|
|
UCODE_NFOUND,
|
|
UCODE_ERROR,
|
|
+ UCODE_TIMEOUT,
|
|
+ UCODE_OFFLINE,
|
|
};
|
|
|
|
struct microcode_ops {
|
|
enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev);
|
|
-
|
|
void (*microcode_fini_cpu)(int cpu);
|
|
|
|
/*
|
|
- * The generic 'microcode_core' part guarantees that
|
|
- * the callbacks below run on a target cpu when they
|
|
- * are being called.
|
|
+ * The generic 'microcode_core' part guarantees that the callbacks
|
|
+ * below run on a target CPU when they are being called.
|
|
* See also the "Synchronization" section in microcode_core.c.
|
|
*/
|
|
- enum ucode_state (*apply_microcode)(int cpu);
|
|
- int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
|
|
+ enum ucode_state (*apply_microcode)(int cpu);
|
|
+ int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
|
|
+ void (*finalize_late_load)(int result);
|
|
+ unsigned int nmi_safe : 1,
|
|
+ use_nmi : 1;
|
|
+};
|
|
+
|
|
+struct early_load_data {
|
|
+ u32 old_rev;
|
|
+ u32 new_rev;
|
|
};
|
|
|
|
+extern struct early_load_data early_data;
|
|
extern struct ucode_cpu_info ucode_cpu_info[];
|
|
-struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa);
|
|
+struct cpio_data find_microcode_in_initrd(const char *path);
|
|
|
|
#define MAX_UCODE_COUNT 128
|
|
|
|
@@ -94,20 +94,19 @@ static inline unsigned int x86_cpuid_family(void)
|
|
return x86_family(eax);
|
|
}
|
|
|
|
-extern bool initrd_gone;
|
|
+extern bool dis_ucode_ldr;
|
|
+extern bool force_minrev;
|
|
|
|
#ifdef CONFIG_CPU_SUP_AMD
|
|
-void load_ucode_amd_bsp(unsigned int family);
|
|
+void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
|
|
void load_ucode_amd_ap(unsigned int family);
|
|
-void load_ucode_amd_early(unsigned int cpuid_1_eax);
|
|
int save_microcode_in_initrd_amd(unsigned int family);
|
|
void reload_ucode_amd(unsigned int cpu);
|
|
struct microcode_ops *init_amd_microcode(void);
|
|
void exit_amd_microcode(void);
|
|
#else /* CONFIG_CPU_SUP_AMD */
|
|
-static inline void load_ucode_amd_bsp(unsigned int family) { }
|
|
+static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
|
|
static inline void load_ucode_amd_ap(unsigned int family) { }
|
|
-static inline void load_ucode_amd_early(unsigned int family) { }
|
|
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
|
|
static inline void reload_ucode_amd(unsigned int cpu) { }
|
|
static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
|
|
@@ -115,15 +114,13 @@ static inline void exit_amd_microcode(void) { }
|
|
#endif /* !CONFIG_CPU_SUP_AMD */
|
|
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
|
-void load_ucode_intel_bsp(void);
|
|
+void load_ucode_intel_bsp(struct early_load_data *ed);
|
|
void load_ucode_intel_ap(void);
|
|
-int save_microcode_in_initrd_intel(void);
|
|
void reload_ucode_intel(void);
|
|
struct microcode_ops *init_intel_microcode(void);
|
|
#else /* CONFIG_CPU_SUP_INTEL */
|
|
-static inline void load_ucode_intel_bsp(void) { }
|
|
+static inline void load_ucode_intel_bsp(struct early_load_data *ed) { }
|
|
static inline void load_ucode_intel_ap(void) { }
|
|
-static inline int save_microcode_in_initrd_intel(void) { return -EINVAL; }
|
|
static inline void reload_ucode_intel(void) { }
|
|
static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }
|
|
#endif /* !CONFIG_CPU_SUP_INTEL */
|
|
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
|
|
index 246a609f889b20..bde27a35bf2e28 100644
|
|
--- a/arch/x86/kernel/head32.c
|
|
+++ b/arch/x86/kernel/head32.c
|
|
@@ -19,6 +19,7 @@
|
|
#include <asm/apic.h>
|
|
#include <asm/io_apic.h>
|
|
#include <asm/bios_ebda.h>
|
|
+#include <asm/microcode.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/bootparam_utils.h>
|
|
|
|
@@ -34,6 +35,8 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void)
|
|
/* Make sure IDT is set up before any exception happens */
|
|
idt_setup_early_handler();
|
|
|
|
+ load_ucode_bsp();
|
|
+
|
|
cr4_init_shadow();
|
|
|
|
sanitize_boot_params(&boot_params);
|
|
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
|
|
index c9318993f95945..63f6ff4b28eb17 100644
|
|
--- a/arch/x86/kernel/head_32.S
|
|
+++ b/arch/x86/kernel/head_32.S
|
|
@@ -118,11 +118,6 @@ SYM_CODE_START(startup_32)
|
|
movl %eax, pa(olpc_ofw_pgd)
|
|
#endif
|
|
|
|
-#ifdef CONFIG_MICROCODE
|
|
- /* Early load ucode on BSP. */
|
|
- call load_ucode_bsp
|
|
-#endif
|
|
-
|
|
/* Create early pagetables. */
|
|
call mk_early_pgtbl_32
|
|
|
|
@@ -157,11 +152,6 @@ SYM_FUNC_START(startup_32_smp)
|
|
movl %eax,%ss
|
|
leal -__PAGE_OFFSET(%ecx),%esp
|
|
|
|
-#ifdef CONFIG_MICROCODE
|
|
- /* Early load ucode on AP. */
|
|
- call load_ucode_ap
|
|
-#endif
|
|
-
|
|
.Ldefault_entry:
|
|
movl $(CR0_STATE & ~X86_CR0_PG),%eax
|
|
movl %eax,%cr0
|
|
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
|
|
index 87aee638e1a5d8..6da2cfa23c2939 100644
|
|
--- a/arch/x86/kernel/nmi.c
|
|
+++ b/arch/x86/kernel/nmi.c
|
|
@@ -33,6 +33,7 @@
|
|
#include <asm/reboot.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/nospec-branch.h>
|
|
+#include <asm/microcode.h>
|
|
#include <asm/sev.h>
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
@@ -343,6 +344,9 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
|
|
|
|
instrumentation_begin();
|
|
|
|
+ if (microcode_nmi_handler_enabled() && microcode_nmi_handler())
|
|
+ goto out;
|
|
+
|
|
handled = nmi_handle(NMI_LOCAL, regs);
|
|
__this_cpu_add(nmi_stats.normal, handled);
|
|
if (handled) {
|
|
@@ -498,8 +502,11 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
|
|
if (IS_ENABLED(CONFIG_NMI_CHECK_CPU))
|
|
raw_atomic_long_inc(&nsp->idt_calls);
|
|
|
|
- if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
|
|
+ if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) {
|
|
+ if (microcode_nmi_handler_enabled())
|
|
+ microcode_offline_nmi_handler();
|
|
return;
|
|
+ }
|
|
|
|
if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
|
|
this_cpu_write(nmi_state, NMI_LATCHED);
|
|
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
|
|
index a8f2ab816d5ae2..77f0d9ccb2c004 100644
|
|
--- a/arch/x86/kernel/smpboot.c
|
|
+++ b/arch/x86/kernel/smpboot.c
|
|
@@ -259,12 +259,9 @@ static void notrace start_secondary(void *unused)
|
|
cpu_init_exception_handling();
|
|
|
|
/*
|
|
- * 32-bit systems load the microcode from the ASM startup code for
|
|
- * historical reasons.
|
|
- *
|
|
- * On 64-bit systems load it before reaching the AP alive
|
|
- * synchronization point below so it is not part of the full per
|
|
- * CPU serialized bringup part when "parallel" bringup is enabled.
|
|
+ * Load the microcode before reaching the AP alive synchronization
|
|
+ * point below so it is not part of the full per CPU serialized
|
|
+ * bringup part when "parallel" bringup is enabled.
|
|
*
|
|
* That's even safe when hyperthreading is enabled in the CPU as
|
|
* the core code starts the primary threads first and leaves the
|
|
@@ -277,8 +274,7 @@ static void notrace start_secondary(void *unused)
|
|
* CPUID, MSRs etc. must be strictly serialized to maintain
|
|
* software state correctness.
|
|
*/
|
|
- if (IS_ENABLED(CONFIG_X86_64))
|
|
- load_ucode_ap();
|
|
+ load_ucode_ap();
|
|
|
|
/*
|
|
* Synchronization point with the hotplug core. Sets this CPUs
|
|
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
|
|
index e62ffffe5fb8d4..4ce5681be18f05 100644
|
|
--- a/drivers/firmware/cirrus/cs_dsp.c
|
|
+++ b/drivers/firmware/cirrus/cs_dsp.c
|
|
@@ -1562,8 +1562,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
|
|
goto out_fw;
|
|
}
|
|
|
|
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
|
|
- le32_to_cpu(region->len));
|
|
+ ret = regmap_raw_write(regmap, reg, buf->buf,
|
|
+ le32_to_cpu(region->len));
|
|
if (ret != 0) {
|
|
cs_dsp_err(dsp,
|
|
"%s.%d: Failed to write %d bytes at %d in %s: %d\n",
|
|
@@ -1578,12 +1578,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
|
|
regions++;
|
|
}
|
|
|
|
- ret = regmap_async_complete(regmap);
|
|
- if (ret != 0) {
|
|
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
|
|
- goto out_fw;
|
|
- }
|
|
-
|
|
if (pos > firmware->size)
|
|
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
|
|
file, regions, pos - firmware->size);
|
|
@@ -1591,7 +1585,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
|
|
cs_dsp_debugfs_save_wmfwname(dsp, file);
|
|
|
|
out_fw:
|
|
- regmap_async_complete(regmap);
|
|
cs_dsp_buf_free(&buf_list);
|
|
kfree(text);
|
|
|
|
@@ -2287,8 +2280,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
|
|
cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
|
|
file, blocks, le32_to_cpu(blk->len),
|
|
reg);
|
|
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
|
|
- le32_to_cpu(blk->len));
|
|
+ ret = regmap_raw_write(regmap, reg, buf->buf,
|
|
+ le32_to_cpu(blk->len));
|
|
if (ret != 0) {
|
|
cs_dsp_err(dsp,
|
|
"%s.%d: Failed to write to %x in %s: %d\n",
|
|
@@ -2300,10 +2293,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
|
|
blocks++;
|
|
}
|
|
|
|
- ret = regmap_async_complete(regmap);
|
|
- if (ret != 0)
|
|
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
|
|
-
|
|
if (pos > firmware->size)
|
|
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
|
|
file, blocks, pos - firmware->size);
|
|
@@ -2311,7 +2300,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
|
|
cs_dsp_debugfs_save_binname(dsp, file);
|
|
|
|
out_fw:
|
|
- regmap_async_complete(regmap);
|
|
cs_dsp_buf_free(&buf_list);
|
|
kfree(text);
|
|
|
|
@@ -2523,8 +2511,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
|
|
{
|
|
int ret;
|
|
|
|
- ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
|
|
- ADSP2_SYS_ENA, ADSP2_SYS_ENA);
|
|
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
|
|
+ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
|
|
if (ret != 0)
|
|
return ret;
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
|
|
index 51467f132c2604..da47e68b10ce0d 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
|
|
@@ -891,6 +891,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
|
struct drm_device *dev = adev_to_drm(adev);
|
|
struct drm_connector *connector;
|
|
struct drm_connector_list_iter iter;
|
|
+ int i;
|
|
|
|
drm_connector_list_iter_begin(dev, &iter);
|
|
drm_for_each_connector_iter(connector, &iter) {
|
|
@@ -912,6 +913,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
|
}
|
|
}
|
|
drm_connector_list_iter_end(&iter);
|
|
+
|
|
+ /* Update reference counts for HPDs */
|
|
+ for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
|
|
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
|
|
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
|
|
+ }
|
|
}
|
|
|
|
/**
|
|
@@ -927,6 +934,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
|
struct drm_device *dev = adev_to_drm(adev);
|
|
struct drm_connector *connector;
|
|
struct drm_connector_list_iter iter;
|
|
+ int i;
|
|
|
|
drm_connector_list_iter_begin(dev, &iter);
|
|
drm_for_each_connector_iter(connector, &iter) {
|
|
@@ -947,4 +955,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
|
}
|
|
}
|
|
drm_connector_list_iter_end(&iter);
|
|
+
|
|
+ /* Update reference counts for HPDs */
|
|
+ for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
|
|
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
|
|
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
|
|
+ }
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
|
|
index 08ce3bb8f640d9..fe96bab7d05d7b 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
|
|
@@ -51,7 +51,8 @@ static bool link_supports_psrsu(struct dc_link *link)
|
|
!link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
|
|
return false;
|
|
|
|
- return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
|
|
+ /* Temporarily disable PSR-SU to avoid glitches */
|
|
+ return false;
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
|
|
index c8586cb7d0fec5..4d193313a6d6e3 100644
|
|
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
|
|
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
|
|
@@ -3043,6 +3043,7 @@ static int kv_dpm_hw_init(void *handle)
|
|
if (!amdgpu_dpm)
|
|
return 0;
|
|
|
|
+ mutex_lock(&adev->pm.mutex);
|
|
kv_dpm_setup_asic(adev);
|
|
ret = kv_dpm_enable(adev);
|
|
if (ret)
|
|
@@ -3050,6 +3051,8 @@ static int kv_dpm_hw_init(void *handle)
|
|
else
|
|
adev->pm.dpm_enabled = true;
|
|
amdgpu_legacy_dpm_compute_clocks(adev);
|
|
+ mutex_unlock(&adev->pm.mutex);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -3067,32 +3070,42 @@ static int kv_dpm_suspend(void *handle)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
|
|
+
|
|
if (adev->pm.dpm_enabled) {
|
|
+ mutex_lock(&adev->pm.mutex);
|
|
+ adev->pm.dpm_enabled = false;
|
|
/* disable dpm */
|
|
kv_dpm_disable(adev);
|
|
/* reset the power state */
|
|
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
|
|
+ mutex_unlock(&adev->pm.mutex);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int kv_dpm_resume(void *handle)
|
|
{
|
|
- int ret;
|
|
+ int ret = 0;
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
- if (adev->pm.dpm_enabled) {
|
|
+ if (!amdgpu_dpm)
|
|
+ return 0;
|
|
+
|
|
+ if (!adev->pm.dpm_enabled) {
|
|
+ mutex_lock(&adev->pm.mutex);
|
|
/* asic init will reset to the boot state */
|
|
kv_dpm_setup_asic(adev);
|
|
ret = kv_dpm_enable(adev);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
adev->pm.dpm_enabled = false;
|
|
- else
|
|
+ } else {
|
|
adev->pm.dpm_enabled = true;
|
|
- if (adev->pm.dpm_enabled)
|
|
amdgpu_legacy_dpm_compute_clocks(adev);
|
|
+ }
|
|
+ mutex_unlock(&adev->pm.mutex);
|
|
}
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static bool kv_dpm_is_idle(void *handle)
|
|
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
|
|
index 60377747bab4fc..48ad413d72afe7 100644
|
|
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
|
|
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
|
|
@@ -1018,9 +1018,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
|
|
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
|
|
int temp, size = sizeof(temp);
|
|
|
|
- if (!adev->pm.dpm_enabled)
|
|
- return;
|
|
+ mutex_lock(&adev->pm.mutex);
|
|
|
|
+ if (!adev->pm.dpm_enabled) {
|
|
+ mutex_unlock(&adev->pm.mutex);
|
|
+ return;
|
|
+ }
|
|
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
|
|
AMDGPU_PP_SENSOR_GPU_TEMP,
|
|
(void *)&temp,
|
|
@@ -1042,4 +1045,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
|
|
adev->pm.dpm.state = dpm_state;
|
|
|
|
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
|
|
+ mutex_unlock(&adev->pm.mutex);
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
|
|
index 99dde52a429013..e7b1fa2feb9288 100644
|
|
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
|
|
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
|
|
@@ -7789,6 +7789,7 @@ static int si_dpm_hw_init(void *handle)
|
|
if (!amdgpu_dpm)
|
|
return 0;
|
|
|
|
+ mutex_lock(&adev->pm.mutex);
|
|
si_dpm_setup_asic(adev);
|
|
ret = si_dpm_enable(adev);
|
|
if (ret)
|
|
@@ -7796,6 +7797,7 @@ static int si_dpm_hw_init(void *handle)
|
|
else
|
|
adev->pm.dpm_enabled = true;
|
|
amdgpu_legacy_dpm_compute_clocks(adev);
|
|
+ mutex_unlock(&adev->pm.mutex);
|
|
return ret;
|
|
}
|
|
|
|
@@ -7813,32 +7815,44 @@ static int si_dpm_suspend(void *handle)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
|
|
+
|
|
if (adev->pm.dpm_enabled) {
|
|
+ mutex_lock(&adev->pm.mutex);
|
|
+ adev->pm.dpm_enabled = false;
|
|
/* disable dpm */
|
|
si_dpm_disable(adev);
|
|
/* reset the power state */
|
|
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
|
|
+ mutex_unlock(&adev->pm.mutex);
|
|
}
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
static int si_dpm_resume(void *handle)
|
|
{
|
|
- int ret;
|
|
+ int ret = 0;
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
- if (adev->pm.dpm_enabled) {
|
|
+ if (!amdgpu_dpm)
|
|
+ return 0;
|
|
+
|
|
+ if (!adev->pm.dpm_enabled) {
|
|
/* asic init will reset to the boot state */
|
|
+ mutex_lock(&adev->pm.mutex);
|
|
si_dpm_setup_asic(adev);
|
|
ret = si_dpm_enable(adev);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
adev->pm.dpm_enabled = false;
|
|
- else
|
|
+ } else {
|
|
adev->pm.dpm_enabled = true;
|
|
- if (adev->pm.dpm_enabled)
|
|
amdgpu_legacy_dpm_compute_clocks(adev);
|
|
+ }
|
|
+ mutex_unlock(&adev->pm.mutex);
|
|
}
|
|
- return 0;
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static bool si_dpm_is_idle(void *handle)
|
|
diff --git a/drivers/i2c/busses/i2c-ls2x.c b/drivers/i2c/busses/i2c-ls2x.c
|
|
index ebae6035701db7..d74625d72f4c52 100644
|
|
--- a/drivers/i2c/busses/i2c-ls2x.c
|
|
+++ b/drivers/i2c/busses/i2c-ls2x.c
|
|
@@ -10,6 +10,7 @@
|
|
* Rewritten for mainline by Binbin Zhou <zhoubinbin@loongson.cn>
|
|
*/
|
|
|
|
+#include <linux/bitfield.h>
|
|
#include <linux/bits.h>
|
|
#include <linux/completion.h>
|
|
#include <linux/device.h>
|
|
@@ -26,7 +27,8 @@
|
|
#include <linux/units.h>
|
|
|
|
/* I2C Registers */
|
|
-#define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */
|
|
+#define I2C_LS2X_PRER_LO 0x0 /* Freq Division Low Byte Register */
|
|
+#define I2C_LS2X_PRER_HI 0x1 /* Freq Division High Byte Register */
|
|
#define I2C_LS2X_CTR 0x2 /* Control Register */
|
|
#define I2C_LS2X_TXR 0x3 /* Transport Data Register */
|
|
#define I2C_LS2X_RXR 0x3 /* Receive Data Register */
|
|
@@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id)
|
|
*/
|
|
static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
|
|
{
|
|
+ u16 val;
|
|
struct i2c_timings *t = &priv->i2c_t;
|
|
struct device *dev = priv->adapter.dev.parent;
|
|
u32 acpi_speed = i2c_acpi_find_bus_speed(dev);
|
|
@@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
|
|
else
|
|
t->bus_freq_hz = LS2X_I2C_FREQ_STD;
|
|
|
|
- /* Calculate and set i2c frequency. */
|
|
- writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1,
|
|
- priv->base + I2C_LS2X_PRER);
|
|
+ /*
|
|
+ * According to the chip manual, we can only access the registers as bytes,
|
|
+ * otherwise the high bits will be truncated.
|
|
+ * So set the I2C frequency with a sequential writeb() instead of writew().
|
|
+ */
|
|
+ val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1;
|
|
+ writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO);
|
|
+ writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI);
|
|
}
|
|
|
|
static void ls2x_i2c_init(struct ls2x_i2c_priv *priv)
|
|
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
|
|
index ae4bae63ad4f3c..91f508d50e7ab4 100644
|
|
--- a/drivers/i2c/busses/i2c-npcm7xx.c
|
|
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
|
|
@@ -2333,6 +2333,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
|
|
if (irq < 0)
|
|
return irq;
|
|
|
|
+ /*
|
|
+ * Disable the interrupt to avoid the interrupt handler being triggered
|
|
+ * incorrectly by the asynchronous interrupt status since the machine
|
|
+ * might do a warm reset during the last smbus/i2c transfer session.
|
|
+ */
|
|
+ npcm_i2c_int_enable(bus, false);
|
|
+
|
|
ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
|
|
dev_name(bus->dev), bus);
|
|
if (ret)
|
|
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
|
|
index 45500d2d5b4bb5..44842f243f40b5 100644
|
|
--- a/drivers/idle/intel_idle.c
|
|
+++ b/drivers/idle/intel_idle.c
|
|
@@ -56,6 +56,7 @@
|
|
#include <asm/nospec-branch.h>
|
|
#include <asm/mwait.h>
|
|
#include <asm/msr.h>
|
|
+#include <asm/tsc.h>
|
|
#include <asm/fpu/api.h>
|
|
|
|
#define INTEL_IDLE_VERSION "0.5.1"
|
|
@@ -1573,6 +1574,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
|
|
if (intel_idle_state_needs_timer_stop(state))
|
|
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
|
|
|
|
+ if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
|
+ mark_tsc_unstable("TSC halts in idle");
|
|
+
|
|
state->enter = intel_idle;
|
|
state->enter_s2idle = intel_idle_s2idle;
|
|
}
|
|
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
|
|
index ec5efdc1666013..9f97bef0214975 100644
|
|
--- a/drivers/infiniband/core/sysfs.c
|
|
+++ b/drivers/infiniband/core/sysfs.c
|
|
@@ -342,6 +342,10 @@ static ssize_t rate_show(struct ib_device *ibdev, u32 port_num,
|
|
speed = " NDR";
|
|
rate = 1000;
|
|
break;
|
|
+ case IB_SPEED_XDR:
|
|
+ speed = " XDR";
|
|
+ rate = 2000;
|
|
+ break;
|
|
case IB_SPEED_SDR:
|
|
default: /* default to SDR for invalid rates */
|
|
speed = " SDR";
|
|
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
|
|
index 049684880ae03d..fb0555647336f4 100644
|
|
--- a/drivers/infiniband/core/uverbs_std_types_device.c
|
|
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
|
|
@@ -203,6 +203,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
|
|
|
|
copy_port_attr_to_resp(&attr, &resp.legacy_resp, ib_dev, port_num);
|
|
resp.port_cap_flags2 = attr.port_cap_flags2;
|
|
+ resp.active_speed_ex = attr.active_speed;
|
|
|
|
return uverbs_copy_to_struct_or_zero(attrs, UVERBS_ATTR_QUERY_PORT_RESP,
|
|
&resp, sizeof(resp));
|
|
@@ -461,7 +462,7 @@ DECLARE_UVERBS_NAMED_METHOD(
|
|
UVERBS_ATTR_PTR_OUT(
|
|
UVERBS_ATTR_QUERY_PORT_RESP,
|
|
UVERBS_ATTR_STRUCT(struct ib_uverbs_query_port_resp_ex,
|
|
- reserved),
|
|
+ active_speed_ex),
|
|
UA_MANDATORY));
|
|
|
|
DECLARE_UVERBS_NAMED_METHOD(
|
|
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
|
|
index 186ed3c22ec9e3..ba05de0380e96e 100644
|
|
--- a/drivers/infiniband/core/verbs.c
|
|
+++ b/drivers/infiniband/core/verbs.c
|
|
@@ -147,6 +147,7 @@ __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
|
|
case IB_RATE_50_GBPS: return 20;
|
|
case IB_RATE_400_GBPS: return 160;
|
|
case IB_RATE_600_GBPS: return 240;
|
|
+ case IB_RATE_800_GBPS: return 320;
|
|
default: return -1;
|
|
}
|
|
}
|
|
@@ -176,6 +177,7 @@ __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
|
|
case 20: return IB_RATE_50_GBPS;
|
|
case 160: return IB_RATE_400_GBPS;
|
|
case 240: return IB_RATE_600_GBPS;
|
|
+ case 320: return IB_RATE_800_GBPS;
|
|
default: return IB_RATE_PORT_CURRENT;
|
|
}
|
|
}
|
|
@@ -205,6 +207,7 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
|
|
case IB_RATE_50_GBPS: return 53125;
|
|
case IB_RATE_400_GBPS: return 425000;
|
|
case IB_RATE_600_GBPS: return 637500;
|
|
+ case IB_RATE_800_GBPS: return 850000;
|
|
default: return -1;
|
|
}
|
|
}
|
|
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
|
|
index 85717482a616e7..6fa9b125329970 100644
|
|
--- a/drivers/infiniband/hw/mana/main.c
|
|
+++ b/drivers/infiniband/hw/mana/main.c
|
|
@@ -180,7 +180,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
|
|
|
|
req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
|
|
req.num_resources = 1;
|
|
- req.alignment = 1;
|
|
+ req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
|
|
|
|
/* Have GDMA start searching from 0 */
|
|
req.allocated_resources = 0;
|
|
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
|
|
index 505bc47fd575d5..99036afb3aef0b 100644
|
|
--- a/drivers/infiniband/hw/mlx5/ah.c
|
|
+++ b/drivers/infiniband/hw/mlx5/ah.c
|
|
@@ -67,7 +67,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
|
|
ah->av.tclass = grh->traffic_class;
|
|
}
|
|
|
|
- ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
|
|
+ ah->av.stat_rate_sl =
|
|
+ (mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
|
|
|
|
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
|
|
if (init_attr->xmit_slave)
|
|
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
|
|
index 8300ce6228350d..b049bba2157905 100644
|
|
--- a/drivers/infiniband/hw/mlx5/counters.c
|
|
+++ b/drivers/infiniband/hw/mlx5/counters.c
|
|
@@ -542,6 +542,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
|
|
struct ib_qp *qp)
|
|
{
|
|
struct mlx5_ib_dev *dev = to_mdev(qp->device);
|
|
+ bool new = false;
|
|
int err;
|
|
|
|
if (!counter->id) {
|
|
@@ -556,6 +557,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
|
|
return err;
|
|
counter->id =
|
|
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
|
|
+ new = true;
|
|
}
|
|
|
|
err = mlx5_ib_qp_set_counter(qp, counter);
|
|
@@ -565,8 +567,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
|
|
return 0;
|
|
|
|
fail_set_counter:
|
|
- mlx5_ib_counter_dealloc(counter);
|
|
- counter->id = 0;
|
|
+ if (new) {
|
|
+ mlx5_ib_counter_dealloc(counter);
|
|
+ counter->id = 0;
|
|
+ }
|
|
|
|
return err;
|
|
}
|
|
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
|
|
index 71a856409cee2c..0a9ae84600b204 100644
|
|
--- a/drivers/infiniband/hw/mlx5/qp.c
|
|
+++ b/drivers/infiniband/hw/mlx5/qp.c
|
|
@@ -3433,11 +3433,11 @@ static int ib_to_mlx5_rate_map(u8 rate)
|
|
return 0;
|
|
}
|
|
|
|
-static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
|
|
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate)
|
|
{
|
|
u32 stat_rate_support;
|
|
|
|
- if (rate == IB_RATE_PORT_CURRENT)
|
|
+ if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS)
|
|
return 0;
|
|
|
|
if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS)
|
|
@@ -3582,7 +3582,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
sizeof(grh->dgid.raw));
|
|
}
|
|
|
|
- err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
|
|
+ err = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah));
|
|
if (err < 0)
|
|
return err;
|
|
MLX5_SET(ads, path, stat_rate, err);
|
|
@@ -4555,6 +4555,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|
|
|
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
|
|
MLX5_SET(dctc, dctc, counter_set_id, set_id);
|
|
+
|
|
+ qp->port = attr->port_num;
|
|
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
|
|
struct mlx5_ib_modify_qp_resp resp = {};
|
|
u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
|
|
@@ -5045,7 +5047,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
|
|
}
|
|
|
|
if (qp_attr_mask & IB_QP_PORT)
|
|
- qp_attr->port_num = MLX5_GET(dctc, dctc, port);
|
|
+ qp_attr->port_num = mqp->port;
|
|
if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
|
|
qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
|
|
if (qp_attr_mask & IB_QP_AV) {
|
|
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
|
|
index b6ee7c3ee1ca1b..2530e7730635f3 100644
|
|
--- a/drivers/infiniband/hw/mlx5/qp.h
|
|
+++ b/drivers/infiniband/hw/mlx5/qp.h
|
|
@@ -56,4 +56,5 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
|
|
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
|
|
int mlx5_ib_qp_event_init(void);
|
|
void mlx5_ib_qp_event_cleanup(void);
|
|
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate);
|
|
#endif /* _MLX5_IB_QP_H */
|
|
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
|
|
index 78c972bb1d9623..9fb5a18e056d41 100644
|
|
--- a/drivers/net/ethernet/cadence/macb.h
|
|
+++ b/drivers/net/ethernet/cadence/macb.h
|
|
@@ -1270,6 +1270,8 @@ struct macb {
|
|
struct clk *rx_clk;
|
|
struct clk *tsu_clk;
|
|
struct net_device *dev;
|
|
+ /* Protects hw_stats and ethtool_stats */
|
|
+ spinlock_t stats_lock;
|
|
union {
|
|
struct macb_stats macb;
|
|
struct gem_stats gem;
|
|
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
|
|
index 8f61731e4554ba..4325d0ace1f268 100644
|
|
--- a/drivers/net/ethernet/cadence/macb_main.c
|
|
+++ b/drivers/net/ethernet/cadence/macb_main.c
|
|
@@ -1992,10 +1992,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
|
|
|
if (status & MACB_BIT(ISR_ROVR)) {
|
|
/* We missed at least one packet */
|
|
+ spin_lock(&bp->stats_lock);
|
|
if (macb_is_gem(bp))
|
|
bp->hw_stats.gem.rx_overruns++;
|
|
else
|
|
bp->hw_stats.macb.rx_overruns++;
|
|
+ spin_unlock(&bp->stats_lock);
|
|
|
|
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
|
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
|
|
@@ -3084,6 +3086,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
|
|
if (!netif_running(bp->dev))
|
|
return nstat;
|
|
|
|
+ spin_lock_irq(&bp->stats_lock);
|
|
gem_update_stats(bp);
|
|
|
|
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
|
|
@@ -3113,6 +3116,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
|
|
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
|
|
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
|
|
nstat->tx_fifo_errors = hwstat->tx_underrun;
|
|
+ spin_unlock_irq(&bp->stats_lock);
|
|
|
|
return nstat;
|
|
}
|
|
@@ -3120,12 +3124,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
|
|
static void gem_get_ethtool_stats(struct net_device *dev,
|
|
struct ethtool_stats *stats, u64 *data)
|
|
{
|
|
- struct macb *bp;
|
|
+ struct macb *bp = netdev_priv(dev);
|
|
|
|
- bp = netdev_priv(dev);
|
|
+ spin_lock_irq(&bp->stats_lock);
|
|
gem_update_stats(bp);
|
|
memcpy(data, &bp->ethtool_stats, sizeof(u64)
|
|
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
|
|
+ spin_unlock_irq(&bp->stats_lock);
|
|
}
|
|
|
|
static int gem_get_sset_count(struct net_device *dev, int sset)
|
|
@@ -3175,6 +3180,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
|
|
return gem_get_stats(bp);
|
|
|
|
/* read stats from hardware */
|
|
+ spin_lock_irq(&bp->stats_lock);
|
|
macb_update_stats(bp);
|
|
|
|
/* Convert HW stats into netdevice stats */
|
|
@@ -3208,6 +3214,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
|
|
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
|
|
nstat->tx_fifo_errors = hwstat->tx_underruns;
|
|
/* Don't know about heartbeat or window errors... */
|
|
+ spin_unlock_irq(&bp->stats_lock);
|
|
|
|
return nstat;
|
|
}
|
|
@@ -5063,6 +5070,7 @@ static int macb_probe(struct platform_device *pdev)
|
|
}
|
|
}
|
|
spin_lock_init(&bp->lock);
|
|
+ spin_lock_init(&bp->stats_lock);
|
|
|
|
/* setup capabilities */
|
|
macb_configure_caps(bp, macb_config);
|
|
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
|
|
index 87b27bd7a13bb1..9aa57134f460cd 100644
|
|
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
|
|
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
|
|
@@ -145,6 +145,24 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
|
|
return 0;
|
|
}
|
|
|
|
+/**
|
|
+ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
|
|
+ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
|
|
+ * @count: Number of Tx buffer descriptors which need to be unmapped
|
|
+ * @i: Index of the last successfully mapped Tx buffer descriptor
|
|
+ */
|
|
+static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
|
|
+{
|
|
+ while (count--) {
|
|
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
|
|
+
|
|
+ enetc_free_tx_frame(tx_ring, tx_swbd);
|
|
+ if (i == 0)
|
|
+ i = tx_ring->bd_count;
|
|
+ i--;
|
|
+ }
|
|
+}
|
|
+
|
|
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
|
|
{
|
|
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
|
|
@@ -235,9 +253,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
|
|
}
|
|
|
|
if (do_onestep_tstamp) {
|
|
- u32 lo, hi, val;
|
|
- u64 sec, nsec;
|
|
+ __be32 new_sec_l, new_nsec;
|
|
+ u32 lo, hi, nsec, val;
|
|
+ __be16 new_sec_h;
|
|
u8 *data;
|
|
+ u64 sec;
|
|
|
|
lo = enetc_rd_hot(hw, ENETC_SICTR0);
|
|
hi = enetc_rd_hot(hw, ENETC_SICTR1);
|
|
@@ -251,13 +271,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
|
|
/* Update originTimestamp field of Sync packet
|
|
* - 48 bits seconds field
|
|
* - 32 bits nanseconds field
|
|
+ *
|
|
+ * In addition, the UDP checksum needs to be updated
|
|
+ * by software after updating originTimestamp field,
|
|
+ * otherwise the hardware will calculate the wrong
|
|
+ * checksum when updating the correction field and
|
|
+ * update it to the packet.
|
|
*/
|
|
data = skb_mac_header(skb);
|
|
- *(__be16 *)(data + offset2) =
|
|
- htons((sec >> 32) & 0xffff);
|
|
- *(__be32 *)(data + offset2 + 2) =
|
|
- htonl(sec & 0xffffffff);
|
|
- *(__be32 *)(data + offset2 + 6) = htonl(nsec);
|
|
+ new_sec_h = htons((sec >> 32) & 0xffff);
|
|
+ new_sec_l = htonl(sec & 0xffffffff);
|
|
+ new_nsec = htonl(nsec);
|
|
+ if (udp) {
|
|
+ struct udphdr *uh = udp_hdr(skb);
|
|
+ __be32 old_sec_l, old_nsec;
|
|
+ __be16 old_sec_h;
|
|
+
|
|
+ old_sec_h = *(__be16 *)(data + offset2);
|
|
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
|
|
+ new_sec_h, false);
|
|
+
|
|
+ old_sec_l = *(__be32 *)(data + offset2 + 2);
|
|
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
|
|
+ new_sec_l, false);
|
|
+
|
|
+ old_nsec = *(__be32 *)(data + offset2 + 6);
|
|
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
|
|
+ new_nsec, false);
|
|
+ }
|
|
+
|
|
+ *(__be16 *)(data + offset2) = new_sec_h;
|
|
+ *(__be32 *)(data + offset2 + 2) = new_sec_l;
|
|
+ *(__be32 *)(data + offset2 + 6) = new_nsec;
|
|
|
|
/* Configure single-step register */
|
|
val = ENETC_PM0_SINGLE_STEP_EN;
|
|
@@ -328,25 +373,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
|
|
dma_err:
|
|
dev_err(tx_ring->dev, "DMA map error");
|
|
|
|
- do {
|
|
- tx_swbd = &tx_ring->tx_swbd[i];
|
|
- enetc_free_tx_frame(tx_ring, tx_swbd);
|
|
- if (i == 0)
|
|
- i = tx_ring->bd_count;
|
|
- i--;
|
|
- } while (count--);
|
|
+ enetc_unwind_tx_frame(tx_ring, count, i);
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
|
|
- struct enetc_tx_swbd *tx_swbd,
|
|
- union enetc_tx_bd *txbd, int *i, int hdr_len,
|
|
- int data_len)
|
|
+static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
|
|
+ struct enetc_tx_swbd *tx_swbd,
|
|
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
|
|
+ int data_len)
|
|
{
|
|
union enetc_tx_bd txbd_tmp;
|
|
u8 flags = 0, e_flags = 0;
|
|
dma_addr_t addr;
|
|
+ int count = 1;
|
|
|
|
enetc_clear_tx_bd(&txbd_tmp);
|
|
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
|
|
@@ -389,7 +429,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
|
|
/* Write the BD */
|
|
txbd_tmp.ext.e_flags = e_flags;
|
|
*txbd = txbd_tmp;
|
|
+ count++;
|
|
}
|
|
+
|
|
+ return count;
|
|
}
|
|
|
|
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
|
|
@@ -521,9 +564,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
|
|
|
|
/* compute the csum over the L4 header */
|
|
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
|
|
- enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
|
|
+ count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
|
|
+ &i, hdr_len, data_len);
|
|
bd_data_num = 0;
|
|
- count++;
|
|
|
|
while (data_len > 0) {
|
|
int size;
|
|
@@ -547,8 +590,13 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
|
|
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
|
|
tso.data, size,
|
|
size == data_len);
|
|
- if (err)
|
|
+ if (err) {
|
|
+ if (i == 0)
|
|
+ i = tx_ring->bd_count;
|
|
+ i--;
|
|
+
|
|
goto err_map_data;
|
|
+ }
|
|
|
|
data_len -= size;
|
|
count++;
|
|
@@ -577,13 +625,7 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
|
|
dev_err(tx_ring->dev, "DMA map error");
|
|
|
|
err_chained_bd:
|
|
- do {
|
|
- tx_swbd = &tx_ring->tx_swbd[i];
|
|
- enetc_free_tx_frame(tx_ring, tx_swbd);
|
|
- if (i == 0)
|
|
- i = tx_ring->bd_count;
|
|
- i--;
|
|
- } while (count--);
|
|
+ enetc_unwind_tx_frame(tx_ring, count, i);
|
|
|
|
return 0;
|
|
}
|
|
@@ -1626,7 +1668,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
|
|
enetc_xdp_drop(rx_ring, orig_i, i);
|
|
tx_ring->stats.xdp_tx_drops++;
|
|
} else {
|
|
- tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
|
|
+ tx_ring->stats.xdp_tx++;
|
|
rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
|
|
xdp_tx_frm_cnt++;
|
|
/* The XDP_TX enqueue was successful, so we
|
|
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
|
|
index 0d1e681be25070..d59e28c8677523 100644
|
|
--- a/drivers/net/ethernet/google/gve/gve.h
|
|
+++ b/drivers/net/ethernet/google/gve/gve.h
|
|
@@ -1030,6 +1030,16 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
|
|
return gve_xdp_tx_queue_id(priv, 0);
|
|
}
|
|
|
|
+static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
|
|
+{
|
|
+ switch (priv->queue_format) {
|
|
+ case GVE_GQI_QPL_FORMAT:
|
|
+ return true;
|
|
+ default:
|
|
+ return false;
|
|
+ }
|
|
+}
|
|
+
|
|
/* buffers */
|
|
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
|
|
struct page **page, dma_addr_t *dma,
|
|
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
|
|
index 90d433b36799fb..8cd098fe88ef26 100644
|
|
--- a/drivers/net/ethernet/google/gve/gve_main.c
|
|
+++ b/drivers/net/ethernet/google/gve/gve_main.c
|
|
@@ -1753,6 +1753,8 @@ static void gve_turndown(struct gve_priv *priv)
|
|
/* Stop tx queues */
|
|
netif_tx_disable(priv->dev);
|
|
|
|
+ xdp_features_clear_redirect_target(priv->dev);
|
|
+
|
|
gve_clear_napi_enabled(priv);
|
|
gve_clear_report_stats(priv);
|
|
|
|
@@ -1793,6 +1795,9 @@ static void gve_turnup(struct gve_priv *priv)
|
|
}
|
|
}
|
|
|
|
+ if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
|
|
+ xdp_features_set_redirect_target(priv->dev, false);
|
|
+
|
|
gve_set_napi_enabled(priv);
|
|
}
|
|
|
|
@@ -2014,7 +2019,6 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
|
|
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
|
|
xdp_features = NETDEV_XDP_ACT_BASIC;
|
|
xdp_features |= NETDEV_XDP_ACT_REDIRECT;
|
|
- xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
|
|
xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
|
|
} else {
|
|
xdp_features = 0;
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
|
|
index f943964ec05ae7..e29a7ffd5f1437 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice.h
|
|
+++ b/drivers/net/ethernet/intel/ice/ice.h
|
|
@@ -202,6 +202,7 @@ enum ice_feature {
|
|
ICE_F_GNSS,
|
|
ICE_F_ROCE_LAG,
|
|
ICE_F_SRIOV_LAG,
|
|
+ ICE_F_MBX_LIMIT,
|
|
ICE_F_MAX
|
|
};
|
|
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
|
|
index 80deca45ab599a..983332cbace21f 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_common.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
|
|
@@ -1,5 +1,5 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
-/* Copyright (c) 2018, Intel Corporation. */
|
|
+/* Copyright (c) 2018-2023, Intel Corporation. */
|
|
|
|
#include "ice_common.h"
|
|
#include "ice_sched.h"
|
|
@@ -153,6 +153,12 @@ static int ice_set_mac_type(struct ice_hw *hw)
|
|
case ICE_DEV_ID_E823L_SFP:
|
|
hw->mac_type = ICE_MAC_GENERIC;
|
|
break;
|
|
+ case ICE_DEV_ID_E830_BACKPLANE:
|
|
+ case ICE_DEV_ID_E830_QSFP56:
|
|
+ case ICE_DEV_ID_E830_SFP:
|
|
+ case ICE_DEV_ID_E830_SFP_DD:
|
|
+ hw->mac_type = ICE_MAC_E830;
|
|
+ break;
|
|
default:
|
|
hw->mac_type = ICE_MAC_UNKNOWN;
|
|
break;
|
|
@@ -684,8 +690,7 @@ static void
|
|
ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
|
|
struct ice_aqc_set_mac_cfg *cmd)
|
|
{
|
|
- u16 fc_thres_val, tx_timer_val;
|
|
- u32 val;
|
|
+ u32 val, fc_thres_m;
|
|
|
|
/* We read back the transmit timer and FC threshold value of
|
|
* LFC. Thus, we will use index =
|
|
@@ -694,19 +699,32 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
|
|
* Also, because we are operating on transmit timer and FC
|
|
* threshold of LFC, we don't turn on any bit in tx_tmr_priority
|
|
*/
|
|
-#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
|
|
-
|
|
- /* Retrieve the transmit timer */
|
|
- val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
|
|
- tx_timer_val = val &
|
|
- PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
|
|
- cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
|
|
-
|
|
- /* Retrieve the FC threshold */
|
|
- val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
|
|
- fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
|
|
-
|
|
- cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
|
|
+#define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX
|
|
+#define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR
|
|
+
|
|
+ if (hw->mac_type == ICE_MAC_E830) {
|
|
+ /* Retrieve the transmit timer */
|
|
+ val = rd32(hw, E830_PRTMAC_CL01_PS_QNT);
|
|
+ cmd->tx_tmr_value =
|
|
+ le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M);
|
|
+
|
|
+ /* Retrieve the fc threshold */
|
|
+ val = rd32(hw, E830_PRTMAC_CL01_QNT_THR);
|
|
+ fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M;
|
|
+ } else {
|
|
+ /* Retrieve the transmit timer */
|
|
+ val = rd32(hw,
|
|
+ E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC));
|
|
+ cmd->tx_tmr_value =
|
|
+ le16_encode_bits(val,
|
|
+ E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M);
|
|
+
|
|
+ /* Retrieve the fc threshold */
|
|
+ val = rd32(hw,
|
|
+ E800_REFRESH_TMR(E800_IDX_OF_LFC));
|
|
+ fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M;
|
|
+ }
|
|
+ cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m);
|
|
}
|
|
|
|
/**
|
|
@@ -2389,16 +2407,21 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
|
|
static void
|
|
ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
|
|
{
|
|
- u32 reg_val, val;
|
|
+ u32 reg_val, gsize, bsize;
|
|
|
|
reg_val = rd32(hw, GLQF_FD_SIZE);
|
|
- val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
|
|
- GLQF_FD_SIZE_FD_GSIZE_S;
|
|
- func_p->fd_fltr_guar =
|
|
- ice_get_num_per_func(hw, val);
|
|
- val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
|
|
- GLQF_FD_SIZE_FD_BSIZE_S;
|
|
- func_p->fd_fltr_best_effort = val;
|
|
+ switch (hw->mac_type) {
|
|
+ case ICE_MAC_E830:
|
|
+ gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
|
|
+ bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
|
|
+ break;
|
|
+ case ICE_MAC_E810:
|
|
+ default:
|
|
+ gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
|
|
+ bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
|
|
+ }
|
|
+ func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize);
|
|
+ func_p->fd_fltr_best_effort = bsize;
|
|
|
|
ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
|
|
func_p->fd_fltr_guar);
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
|
|
index 6d560d1c74a4a3..a2d384dbfc767b 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
|
|
@@ -1,5 +1,5 @@
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
-/* Copyright (c) 2018, Intel Corporation. */
|
|
+/* Copyright (c) 2018-2023, Intel Corporation. */
|
|
|
|
#ifndef _ICE_DEVIDS_H_
|
|
#define _ICE_DEVIDS_H_
|
|
@@ -16,6 +16,14 @@
|
|
#define ICE_DEV_ID_E823L_1GBE 0x124F
|
|
/* Intel(R) Ethernet Connection E823-L for QSFP */
|
|
#define ICE_DEV_ID_E823L_QSFP 0x151D
|
|
+/* Intel(R) Ethernet Controller E830-C for backplane */
|
|
+#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
|
|
+/* Intel(R) Ethernet Controller E830-C for QSFP */
|
|
+#define ICE_DEV_ID_E830_QSFP56 0x12D2
|
|
+/* Intel(R) Ethernet Controller E830-C for SFP */
|
|
+#define ICE_DEV_ID_E830_SFP 0x12D3
|
|
+/* Intel(R) Ethernet Controller E830-C for SFP-DD */
|
|
+#define ICE_DEV_ID_E830_SFP_DD 0x12D4
|
|
/* Intel(R) Ethernet Controller E810-C for backplane */
|
|
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
|
|
/* Intel(R) Ethernet Controller E810-C for QSFP */
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
|
|
index b6bbf2376ef5c1..d43b642cbc01cc 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
|
|
@@ -1,5 +1,5 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
-/* Copyright (C) 2018-2020, Intel Corporation. */
|
|
+/* Copyright (C) 2018-2023, Intel Corporation. */
|
|
|
|
/* flow director ethtool support for ice */
|
|
|
|
@@ -540,16 +540,24 @@ int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
|
|
/* total guaranteed filters assigned to this VSI */
|
|
num_guar = vsi->num_gfltr;
|
|
|
|
- /* minus the guaranteed filters programed by this VSI */
|
|
- num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) &
|
|
- VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S;
|
|
-
|
|
/* total global best effort filters */
|
|
num_be = hw->func_caps.fd_fltr_best_effort;
|
|
|
|
- /* minus the global best effort filters programmed */
|
|
- num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >>
|
|
- GLQF_FD_CNT_FD_BCNT_S;
|
|
+ /* Subtract the number of programmed filters from the global values */
|
|
+ switch (hw->mac_type) {
|
|
+ case ICE_MAC_E830:
|
|
+ num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M,
|
|
+ rd32(hw, VSIQF_FD_CNT(vsi_num)));
|
|
+ num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M,
|
|
+ rd32(hw, GLQF_FD_CNT));
|
|
+ break;
|
|
+ case ICE_MAC_E810:
|
|
+ default:
|
|
+ num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M,
|
|
+ rd32(hw, VSIQF_FD_CNT(vsi_num)));
|
|
+ num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M,
|
|
+ rd32(hw, GLQF_FD_CNT));
|
|
+ }
|
|
|
|
return num_guar + num_be;
|
|
}
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
|
|
index 531cc2194741ed..96f70c0a965980 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
|
|
@@ -1,5 +1,5 @@
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
-/* Copyright (c) 2018, Intel Corporation. */
|
|
+/* Copyright (c) 2018-2023, Intel Corporation. */
|
|
|
|
/* Machine-generated file */
|
|
|
|
@@ -284,11 +284,11 @@
|
|
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
|
|
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
|
|
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
|
|
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32))
|
|
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
|
|
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M ICE_M(0xFFFF, 0)
|
|
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32))
|
|
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M ICE_M(0xFFFF, 0)
|
|
+#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT(_i) (0x001E36E0 + ((_i) * 32))
|
|
+#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 8
|
|
+#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M GENMASK(15, 0)
|
|
+#define E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR(_i) (0x001E3800 + ((_i) * 32))
|
|
+#define E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M GENMASK(15, 0)
|
|
#define GL_MDCK_TX_TDPU 0x00049348
|
|
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
|
|
#define GL_MDET_RX 0x00294C00
|
|
@@ -311,7 +311,11 @@
|
|
#define GL_MDET_TX_PQM_MAL_TYPE_S 26
|
|
#define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26)
|
|
#define GL_MDET_TX_PQM_VALID_M BIT(31)
|
|
-#define GL_MDET_TX_TCLAN 0x000FC068
|
|
+#define GL_MDET_TX_TCLAN_BY_MAC(hw) \
|
|
+ ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MDET_TX_TCLAN : \
|
|
+ E800_GL_MDET_TX_TCLAN)
|
|
+#define E800_GL_MDET_TX_TCLAN 0x000FC068
|
|
+#define E830_GL_MDET_TX_TCLAN 0x000FCCC0
|
|
#define GL_MDET_TX_TCLAN_QNUM_S 0
|
|
#define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0)
|
|
#define GL_MDET_TX_TCLAN_VF_NUM_S 15
|
|
@@ -325,7 +329,11 @@
|
|
#define PF_MDET_RX_VALID_M BIT(0)
|
|
#define PF_MDET_TX_PQM 0x002D2C80
|
|
#define PF_MDET_TX_PQM_VALID_M BIT(0)
|
|
-#define PF_MDET_TX_TCLAN 0x000FC000
|
|
+#define PF_MDET_TX_TCLAN_BY_MAC(hw) \
|
|
+ ((hw)->mac_type == ICE_MAC_E830 ? E830_PF_MDET_TX_TCLAN : \
|
|
+ E800_PF_MDET_TX_TCLAN)
|
|
+#define E800_PF_MDET_TX_TCLAN 0x000FC000
|
|
+#define E830_PF_MDET_TX_TCLAN 0x000FCC00
|
|
#define PF_MDET_TX_TCLAN_VALID_M BIT(0)
|
|
#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4))
|
|
#define VP_MDET_RX_VALID_M BIT(0)
|
|
@@ -335,6 +343,8 @@
|
|
#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
|
|
#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
|
|
#define VP_MDET_TX_TDPU_VALID_M BIT(0)
|
|
+#define E800_GL_MNG_FWSM_FW_MODES_M GENMASK(2, 0)
|
|
+#define E830_GL_MNG_FWSM_FW_MODES_M GENMASK(1, 0)
|
|
#define GL_MNG_FWSM 0x000B6134
|
|
#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
|
|
#define GLNVM_FLA 0x000B6108
|
|
@@ -363,13 +373,18 @@
|
|
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
|
|
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
|
|
#define GLQF_FD_CNT 0x00460018
|
|
+#define E800_GLQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
|
|
+#define E830_GLQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
|
|
#define GLQF_FD_CNT_FD_BCNT_S 16
|
|
-#define GLQF_FD_CNT_FD_BCNT_M ICE_M(0x7FFF, 16)
|
|
+#define E800_GLQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)
|
|
+#define E830_GLQF_FD_CNT_FD_BCNT_M GENMASK(31, 16)
|
|
#define GLQF_FD_SIZE 0x00460010
|
|
#define GLQF_FD_SIZE_FD_GSIZE_S 0
|
|
-#define GLQF_FD_SIZE_FD_GSIZE_M ICE_M(0x7FFF, 0)
|
|
+#define E800_GLQF_FD_SIZE_FD_GSIZE_M GENMASK(14, 0)
|
|
+#define E830_GLQF_FD_SIZE_FD_GSIZE_M GENMASK(15, 0)
|
|
#define GLQF_FD_SIZE_FD_BSIZE_S 16
|
|
-#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16)
|
|
+#define E800_GLQF_FD_SIZE_FD_BSIZE_M GENMASK(30, 16)
|
|
+#define E830_GLQF_FD_SIZE_FD_BSIZE_M GENMASK(31, 16)
|
|
#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512))
|
|
#define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4))
|
|
#define GLQF_FDMASK_MAX_INDEX 31
|
|
@@ -388,6 +403,10 @@
|
|
#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
|
|
#define GLQF_HMASK_SEL_MAX_INDEX 127
|
|
#define GLQF_HMASK_SEL_MASK_SEL_S 0
|
|
+#define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
|
|
+#define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
|
|
+#define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)
|
|
+#define E830_PFQF_FD_CNT_FD_BCNT_M GENMASK(31, 16)
|
|
#define PFQF_FD_ENA 0x0043A000
|
|
#define PFQF_FD_ENA_FD_ENA_M BIT(0)
|
|
#define PFQF_FD_SIZE 0x00460100
|
|
@@ -478,6 +497,7 @@
|
|
#define GLTSYN_SYNC_DLAY 0x00088818
|
|
#define GLTSYN_TGT_H_0(_i) (0x00088930 + ((_i) * 4))
|
|
#define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4))
|
|
+#define GLTSYN_TIME_0(_i) (0x000888C8 + ((_i) * 4))
|
|
#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4))
|
|
#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4))
|
|
#define PFHH_SEM 0x000A4200 /* Reset Source: PFR */
|
|
@@ -486,9 +506,11 @@
|
|
#define PFTSYN_SEM_BUSY_M BIT(0)
|
|
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
|
|
#define VSIQF_FD_CNT_FD_GCNT_S 0
|
|
-#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
|
|
+#define E800_VSIQF_FD_CNT_FD_GCNT_M GENMASK(13, 0)
|
|
+#define E830_VSIQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
|
|
#define VSIQF_FD_CNT_FD_BCNT_S 16
|
|
-#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16)
|
|
+#define E800_VSIQF_FD_CNT_FD_BCNT_M GENMASK(29, 16)
|
|
+#define E830_VSIQF_FD_CNT_FD_BCNT_M GENMASK(31, 16)
|
|
#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4))
|
|
#define VSIQF_HKEY_MAX_INDEX 12
|
|
#define PFPM_APM 0x000B8080
|
|
@@ -500,7 +522,14 @@
|
|
#define PFPM_WUS_MAG_M BIT(1)
|
|
#define PFPM_WUS_MNG_M BIT(3)
|
|
#define PFPM_WUS_FW_RST_WK_M BIT(31)
|
|
+#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
|
|
+#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
|
|
+#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
|
|
+#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
|
|
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
|
|
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
|
|
+#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
|
|
+#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4)
|
|
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4)
|
|
|
|
#endif /* _ICE_HW_AUTOGEN_H_ */
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
|
|
index 3a0ef56d3edcac..1fc4805353eb58 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
|
|
@@ -4023,6 +4023,9 @@ void ice_init_feature_support(struct ice_pf *pf)
|
|
default:
|
|
break;
|
|
}
|
|
+
|
|
+ if (pf->hw.mac_type == ICE_MAC_E830)
|
|
+ ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
|
|
index 9f12c9a0fe2968..0ae7bdfff83fb2 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_main.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
|
|
@@ -1,5 +1,5 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
-/* Copyright (c) 2018, Intel Corporation. */
|
|
+/* Copyright (c) 2018-2023, Intel Corporation. */
|
|
|
|
/* Intel(R) Ethernet Connection E800 Series Linux Driver */
|
|
|
|
@@ -1514,12 +1514,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
|
ice_vf_lan_overflow_event(pf, &event);
|
|
break;
|
|
case ice_mbx_opc_send_msg_to_pf:
|
|
- data.num_msg_proc = i;
|
|
- data.num_pending_arq = pending;
|
|
- data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
|
|
- data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
|
|
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
|
|
+ ice_vc_process_vf_msg(pf, &event, NULL);
|
|
+ ice_mbx_vf_dec_trig_e830(hw, &event);
|
|
+ } else {
|
|
+ u16 val = hw->mailboxq.num_rq_entries;
|
|
+
|
|
+ data.max_num_msgs_mbx = val;
|
|
+ val = ICE_MBX_OVERFLOW_WATERMARK;
|
|
+ data.async_watermark_val = val;
|
|
+ data.num_msg_proc = i;
|
|
+ data.num_pending_arq = pending;
|
|
|
|
- ice_vc_process_vf_msg(pf, &event, &data);
|
|
+ ice_vc_process_vf_msg(pf, &event, &data);
|
|
+ }
|
|
break;
|
|
case ice_aqc_opc_fw_logging:
|
|
ice_output_fw_log(hw, &event.desc, event.msg_buf);
|
|
@@ -1748,7 +1756,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
|
|
}
|
|
|
|
- reg = rd32(hw, GL_MDET_TX_TCLAN);
|
|
+ reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
|
|
if (reg & GL_MDET_TX_TCLAN_VALID_M) {
|
|
u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
|
|
GL_MDET_TX_TCLAN_PF_NUM_S;
|
|
@@ -1762,7 +1770,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|
if (netif_msg_tx_err(pf))
|
|
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
|
|
event, queue, pf_num, vf_num);
|
|
- wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
|
|
+ wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
|
|
}
|
|
|
|
reg = rd32(hw, GL_MDET_RX);
|
|
@@ -1790,9 +1798,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
|
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
|
|
}
|
|
|
|
- reg = rd32(hw, PF_MDET_TX_TCLAN);
|
|
+ reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
|
|
if (reg & PF_MDET_TX_TCLAN_VALID_M) {
|
|
- wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
|
|
+ wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
|
|
if (netif_msg_tx_err(pf))
|
|
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
|
|
}
|
|
@@ -3873,7 +3881,8 @@ static void ice_set_pf_caps(struct ice_pf *pf)
|
|
}
|
|
|
|
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
|
|
- if (func_caps->common_cap.ieee_1588)
|
|
+ if (func_caps->common_cap.ieee_1588 &&
|
|
+ !(pf->hw.mac_type == ICE_MAC_E830))
|
|
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
|
|
|
|
pf->max_pf_txqs = func_caps->common_cap.num_txq;
|
|
@@ -3919,7 +3928,11 @@ static int ice_init_pf(struct ice_pf *pf)
|
|
|
|
mutex_init(&pf->vfs.table_lock);
|
|
hash_init(pf->vfs.table);
|
|
- ice_mbx_init_snapshot(&pf->hw);
|
|
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
|
|
+ wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
|
|
+ ICE_MBX_OVERFLOW_WATERMARK);
|
|
+ else
|
|
+ ice_mbx_init_snapshot(&pf->hw);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
|
|
index 31314e7540f8cf..56345fe6537079 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
|
|
@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
|
|
|
|
hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
|
|
hash_del_rcu(&vf->entry);
|
|
+ ice_deinitialize_vf_entry(vf);
|
|
ice_put_vf(vf);
|
|
}
|
|
}
|
|
@@ -194,9 +195,6 @@ void ice_free_vfs(struct ice_pf *pf)
|
|
wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
|
|
}
|
|
|
|
- /* clear malicious info since the VF is getting released */
|
|
- list_del(&vf->mbx_info.list_entry);
|
|
-
|
|
mutex_unlock(&vf->cfg_lock);
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
|
|
index 5e353b0cbe6f73..35ee5b29ea34e4 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_type.h
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
|
|
@@ -1,5 +1,5 @@
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
-/* Copyright (c) 2018, Intel Corporation. */
|
|
+/* Copyright (c) 2018-2023, Intel Corporation. */
|
|
|
|
#ifndef _ICE_TYPE_H_
|
|
#define _ICE_TYPE_H_
|
|
@@ -129,6 +129,7 @@ enum ice_set_fc_aq_failures {
|
|
enum ice_mac_type {
|
|
ICE_MAC_UNKNOWN = 0,
|
|
ICE_MAC_E810,
|
|
+ ICE_MAC_E830,
|
|
ICE_MAC_GENERIC,
|
|
};
|
|
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
|
|
index 03b9d7d748518c..58f9ac81dfbb2c 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
|
|
@@ -701,6 +701,23 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
|
|
return 0;
|
|
}
|
|
|
|
+/**
|
|
+ * ice_reset_vf_mbx_cnt - reset VF mailbox message count
|
|
+ * @vf: pointer to the VF structure
|
|
+ *
|
|
+ * This function clears the VF mailbox message count, and should be called on
|
|
+ * VF reset.
|
|
+ */
|
|
+static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
|
|
+{
|
|
+ struct ice_pf *pf = vf->pf;
|
|
+
|
|
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
|
|
+ ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
|
|
+ else
|
|
+ ice_mbx_clear_malvf(&vf->mbx_info);
|
|
+}
|
|
+
|
|
/**
|
|
* ice_reset_all_vfs - reset all allocated VFs in one go
|
|
* @pf: pointer to the PF structure
|
|
@@ -727,7 +744,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
|
|
|
|
/* clear all malicious info if the VFs are getting reset */
|
|
ice_for_each_vf(pf, bkt, vf)
|
|
- ice_mbx_clear_malvf(&vf->mbx_info);
|
|
+ ice_reset_vf_mbx_cnt(vf);
|
|
|
|
/* If VFs have been disabled, there is no need to reset */
|
|
if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
|
|
@@ -944,7 +961,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
|
|
ice_eswitch_update_repr(vsi);
|
|
|
|
/* if the VF has been reset allow it to come up again */
|
|
- ice_mbx_clear_malvf(&vf->mbx_info);
|
|
+ ice_reset_vf_mbx_cnt(vf);
|
|
|
|
out_unlock:
|
|
if (lag && lag->bonded && lag->primary &&
|
|
@@ -994,11 +1011,22 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
|
|
ice_vf_fdir_init(vf);
|
|
|
|
/* Initialize mailbox info for this VF */
|
|
- ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
|
|
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
|
|
+ ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
|
|
+ else
|
|
+ ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
|
|
|
|
mutex_init(&vf->cfg_lock);
|
|
}
|
|
|
|
+void ice_deinitialize_vf_entry(struct ice_vf *vf)
|
|
+{
|
|
+ struct ice_pf *pf = vf->pf;
|
|
+
|
|
+ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
|
|
+ list_del(&vf->mbx_info.list_entry);
|
|
+}
|
|
+
|
|
/**
|
|
* ice_dis_vf_qs - Disable the VF queues
|
|
* @vf: pointer to the VF structure
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
|
|
index 0c7e77c0a09fa6..5392b040498621 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
|
|
@@ -24,6 +24,7 @@
|
|
#endif
|
|
|
|
void ice_initialize_vf_entry(struct ice_vf *vf);
|
|
+void ice_deinitialize_vf_entry(struct ice_vf *vf);
|
|
void ice_dis_vf_qs(struct ice_vf *vf);
|
|
int ice_check_vf_init(struct ice_vf *vf);
|
|
enum virtchnl_status_code ice_err_to_virt_err(int err);
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
|
|
index 40cb4ba0789ced..75c8113e58ee92 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
|
|
@@ -210,6 +210,38 @@ ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info,
|
|
return 0;
|
|
}
|
|
|
|
+/**
|
|
+ * ice_mbx_vf_dec_trig_e830 - Decrements the VF mailbox queue counter
|
|
+ * @hw: pointer to the HW struct
|
|
+ * @event: pointer to the control queue receive event
|
|
+ *
|
|
+ * This function triggers to decrement the counter
|
|
+ * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes
|
|
+ * the buffers at the PF mailbox queue.
|
|
+ */
|
|
+void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
|
|
+ const struct ice_rq_event_info *event)
|
|
+{
|
|
+ u16 vfid = le16_to_cpu(event->desc.retval);
|
|
+
|
|
+ wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count
|
|
+ * @hw: pointer to the HW struct
|
|
+ * @vf_id: VF ID in the PF space
|
|
+ *
|
|
+ * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should
|
|
+ * be called when a VF is created and on VF reset.
|
|
+ */
|
|
+void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id)
|
|
+{
|
|
+ u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id));
|
|
+
|
|
+ wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg);
|
|
+}
|
|
+
|
|
/**
|
|
* ice_mbx_vf_state_handler - Handle states of the overflow algorithm
|
|
* @hw: pointer to the HW struct
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
|
|
index 44bc030d17e07a..684de89e5c5ed7 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
|
|
@@ -19,6 +19,9 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
|
|
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
|
|
|
|
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
|
|
+void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
|
|
+ const struct ice_rq_event_info *event);
|
|
+void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id);
|
|
int
|
|
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
|
|
struct ice_mbx_vf_info *vf_info, bool *report_malvf);
|
|
@@ -47,5 +50,11 @@ static inline void ice_mbx_init_snapshot(struct ice_hw *hw)
|
|
{
|
|
}
|
|
|
|
+static inline void
|
|
+ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
|
|
+ const struct ice_rq_event_info *event)
|
|
+{
|
|
+}
|
|
+
|
|
#endif /* CONFIG_PCI_IOV */
|
|
#endif /* _ICE_VF_MBX_H_ */
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
|
|
index 9f7268bb2ee3b4..e709b10a29761b 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
|
|
@@ -3899,8 +3899,10 @@ ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
|
|
* @event: pointer to the AQ event
|
|
* @mbxdata: information used to detect VF attempting mailbox overflow
|
|
*
|
|
- * called from the common asq/arq handler to
|
|
- * process request from VF
|
|
+ * Called from the common asq/arq handler to process request from VF. When this
|
|
+ * flow is used for devices with hardware VF to PF message queue overflow
|
|
+ * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf
|
|
+ * check is skipped.
|
|
*/
|
|
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
|
|
struct ice_mbx_data *mbxdata)
|
|
@@ -3926,7 +3928,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
|
|
mutex_lock(&vf->cfg_lock);
|
|
|
|
/* Check if the VF is trying to overflow the mailbox */
|
|
- if (ice_is_malicious_vf(vf, mbxdata))
|
|
+ if (mbxdata && ice_is_malicious_vf(vf, mbxdata))
|
|
goto finish;
|
|
|
|
/* Check if VF is disabled. */
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
|
|
index 974c71490d97c0..3ca5f44dea26eb 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
|
|
@@ -1,5 +1,5 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
-/* Copyright (C) 2021, Intel Corporation. */
|
|
+/* Copyright (C) 2021-2023, Intel Corporation. */
|
|
|
|
#include "ice.h"
|
|
#include "ice_base.h"
|
|
@@ -1421,8 +1421,8 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
|
|
*/
|
|
static void ice_vf_fdir_dump_info(struct ice_vf *vf)
|
|
{
|
|
+ u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
|
|
struct ice_vsi *vf_vsi;
|
|
- u32 fd_size, fd_cnt;
|
|
struct device *dev;
|
|
struct ice_pf *pf;
|
|
struct ice_hw *hw;
|
|
@@ -1441,12 +1441,25 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf)
|
|
|
|
fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
|
|
fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
|
|
- dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n",
|
|
- vf->vf_id,
|
|
- (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
|
|
- (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
|
|
- (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
|
|
- (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
|
|
+ switch (hw->mac_type) {
|
|
+ case ICE_MAC_E830:
|
|
+ fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
|
|
+ fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
|
|
+ fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
|
|
+ fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
|
|
+ break;
|
|
+ case ICE_MAC_E810:
|
|
+ default:
|
|
+ fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
|
|
+ fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
|
|
+ fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
|
|
+ fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
|
|
+ }
|
|
+
|
|
+ dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
|
|
+ vf->vf_id, fd_size_g, fd_size_b);
|
|
+ dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
|
|
+ vf->vf_id, fd_cnt_g, fd_cnt_b);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
|
|
index 40aeaa7bd739fa..d2757cc1161391 100644
|
|
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
|
|
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
|
|
@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
|
|
MVPP2_PRS_RI_VLAN_MASK),
|
|
/* Non IP flow, with vlan tag */
|
|
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
|
|
- MVPP22_CLS_HEK_OPT_VLAN,
|
|
+ MVPP22_CLS_HEK_TAGGED,
|
|
0, 0),
|
|
};
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
|
|
index 6bac8ad70ba60b..a8d6fd18c0f557 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
|
|
@@ -617,7 +617,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
|
|
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
|
|
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
|
|
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
|
|
- name, size, start);
|
|
+ name ? name : "mlx5_pcif_pool", size, start);
|
|
return pool;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
|
|
index f06cdec14ed7a1..3f9a030471fe2f 100644
|
|
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
|
|
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
|
|
@@ -110,7 +110,6 @@ struct icss_iep {
|
|
struct ptp_clock_info ptp_info;
|
|
struct ptp_clock *ptp_clock;
|
|
struct mutex ptp_clk_mutex; /* PHC access serializer */
|
|
- spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
|
|
u32 def_inc;
|
|
s16 slow_cmp_inc;
|
|
u32 slow_cmp_count;
|
|
@@ -192,14 +191,11 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
|
|
*/
|
|
static void icss_iep_settime(struct icss_iep *iep, u64 ns)
|
|
{
|
|
- unsigned long flags;
|
|
-
|
|
if (iep->ops && iep->ops->settime) {
|
|
iep->ops->settime(iep->clockops_data, ns);
|
|
return;
|
|
}
|
|
|
|
- spin_lock_irqsave(&iep->irq_lock, flags);
|
|
if (iep->pps_enabled || iep->perout_enabled)
|
|
writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
|
|
|
|
@@ -210,7 +206,6 @@ static void icss_iep_settime(struct icss_iep *iep, u64 ns)
|
|
writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
|
|
iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
|
|
}
|
|
- spin_unlock_irqrestore(&iep->irq_lock, flags);
|
|
}
|
|
|
|
/**
|
|
@@ -549,36 +544,13 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
|
|
static int icss_iep_perout_enable(struct icss_iep *iep,
|
|
struct ptp_perout_request *req, int on)
|
|
{
|
|
- unsigned long flags;
|
|
- int ret = 0;
|
|
-
|
|
- mutex_lock(&iep->ptp_clk_mutex);
|
|
-
|
|
- if (iep->pps_enabled) {
|
|
- ret = -EBUSY;
|
|
- goto exit;
|
|
- }
|
|
-
|
|
- if (iep->perout_enabled == !!on)
|
|
- goto exit;
|
|
-
|
|
- spin_lock_irqsave(&iep->irq_lock, flags);
|
|
- ret = icss_iep_perout_enable_hw(iep, req, on);
|
|
- if (!ret)
|
|
- iep->perout_enabled = !!on;
|
|
- spin_unlock_irqrestore(&iep->irq_lock, flags);
|
|
-
|
|
-exit:
|
|
- mutex_unlock(&iep->ptp_clk_mutex);
|
|
-
|
|
- return ret;
|
|
+ return -EOPNOTSUPP;
|
|
}
|
|
|
|
static int icss_iep_pps_enable(struct icss_iep *iep, int on)
|
|
{
|
|
struct ptp_clock_request rq;
|
|
struct timespec64 ts;
|
|
- unsigned long flags;
|
|
int ret = 0;
|
|
u64 ns;
|
|
|
|
@@ -592,8 +564,6 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
|
|
if (iep->pps_enabled == !!on)
|
|
goto exit;
|
|
|
|
- spin_lock_irqsave(&iep->irq_lock, flags);
|
|
-
|
|
rq.perout.index = 0;
|
|
if (on) {
|
|
ns = icss_iep_gettime(iep, NULL);
|
|
@@ -610,8 +580,6 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
|
|
if (!ret)
|
|
iep->pps_enabled = !!on;
|
|
|
|
- spin_unlock_irqrestore(&iep->irq_lock, flags);
|
|
-
|
|
exit:
|
|
mutex_unlock(&iep->ptp_clk_mutex);
|
|
|
|
@@ -861,7 +829,6 @@ static int icss_iep_probe(struct platform_device *pdev)
|
|
|
|
iep->ptp_info = icss_iep_ptp_info;
|
|
mutex_init(&iep->ptp_clk_mutex);
|
|
- spin_lock_init(&iep->irq_lock);
|
|
dev_set_drvdata(dev, iep);
|
|
icss_iep_disable(iep);
|
|
|
|
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
|
|
index fef4eff7753a7a..ca62188a317ad4 100644
|
|
--- a/drivers/net/ipvlan/ipvlan_core.c
|
|
+++ b/drivers/net/ipvlan/ipvlan_core.c
|
|
@@ -2,6 +2,9 @@
|
|
/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
|
|
*/
|
|
|
|
+#include <net/inet_dscp.h>
|
|
+#include <net/ip.h>
|
|
+
|
|
#include "ipvlan.h"
|
|
|
|
static u32 ipvlan_jhash_secret __read_mostly;
|
|
@@ -413,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
|
|
|
|
static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
|
|
{
|
|
- const struct iphdr *ip4h = ip_hdr(skb);
|
|
struct net_device *dev = skb->dev;
|
|
struct net *net = dev_net(dev);
|
|
- struct rtable *rt;
|
|
int err, ret = NET_XMIT_DROP;
|
|
+ const struct iphdr *ip4h;
|
|
+ struct rtable *rt;
|
|
struct flowi4 fl4 = {
|
|
.flowi4_oif = dev->ifindex,
|
|
- .flowi4_tos = RT_TOS(ip4h->tos),
|
|
.flowi4_flags = FLOWI_FLAG_ANYSRC,
|
|
.flowi4_mark = skb->mark,
|
|
- .daddr = ip4h->daddr,
|
|
- .saddr = ip4h->saddr,
|
|
};
|
|
|
|
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
|
|
+ goto err;
|
|
+
|
|
+ ip4h = ip_hdr(skb);
|
|
+ fl4.daddr = ip4h->daddr;
|
|
+ fl4.saddr = ip4h->saddr;
|
|
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
|
|
+
|
|
rt = ip_route_output_flow(net, &fl4, NULL);
|
|
if (IS_ERR(rt))
|
|
goto err;
|
|
@@ -485,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
|
|
struct net_device *dev = skb->dev;
|
|
int err, ret = NET_XMIT_DROP;
|
|
|
|
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
|
|
+ DEV_STATS_INC(dev, tx_errors);
|
|
+ kfree_skb(skb);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
err = ipvlan_route_v6_outbound(dev, skb);
|
|
if (unlikely(err)) {
|
|
DEV_STATS_INC(dev, tx_errors);
|
|
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
|
|
index f6eab66c266081..6aa00f62b1f902 100644
|
|
--- a/drivers/net/loopback.c
|
|
+++ b/drivers/net/loopback.c
|
|
@@ -247,8 +247,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
+static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb)
|
|
+{
|
|
+ kfree_skb(skb);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int blackhole_neigh_construct(struct net_device *dev,
|
|
+ struct neighbour *n)
|
|
+{
|
|
+ n->output = blackhole_neigh_output;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static const struct net_device_ops blackhole_netdev_ops = {
|
|
.ndo_start_xmit = blackhole_netdev_xmit,
|
|
+ .ndo_neigh_construct = blackhole_neigh_construct,
|
|
};
|
|
|
|
/* This is a dst-dummy device used specifically for invalidated
|
|
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
|
|
index 46af78caf457a6..0bfa37c1405918 100644
|
|
--- a/drivers/net/usb/gl620a.c
|
|
+++ b/drivers/net/usb/gl620a.c
|
|
@@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
|
|
{
|
|
dev->hard_mtu = GL_RCV_BUF_SIZE;
|
|
dev->net->hard_header_len += 4;
|
|
- dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
|
|
- dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
|
|
- return 0;
|
|
+ return usbnet_get_endpoints(dev, intf);
|
|
}
|
|
|
|
static const struct driver_info genelink_info = {
|
|
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
|
|
index 9c231094ba3594..2354ce8b215943 100644
|
|
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
|
|
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
|
|
@@ -309,7 +309,10 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy
|
|
|
|
priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk");
|
|
|
|
- priv->phy_rst = devm_reset_control_get(dev, "phy");
|
|
+ priv->phy_rst = devm_reset_control_get_exclusive(dev, "phy");
|
|
+ /* fallback to old behaviour */
|
|
+ if (PTR_ERR(priv->phy_rst) == -ENOENT)
|
|
+ priv->phy_rst = devm_reset_control_array_get_exclusive(dev);
|
|
if (IS_ERR(priv->phy_rst))
|
|
return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n");
|
|
|
|
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
|
|
index 3f310b28bfff79..fea76f2ce6fff6 100644
|
|
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
|
|
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
|
|
@@ -319,9 +319,9 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
|
|
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
|
|
|
|
/* FSEL settings corresponding to reference clock */
|
|
- reg &= ~PHYCLKRST_FSEL_PIPE_MASK |
|
|
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
|
|
- PHYCLKRST_SSC_REFCLKSEL_MASK;
|
|
+ reg &= ~(PHYCLKRST_FSEL_PIPE_MASK |
|
|
+ PHYCLKRST_MPLL_MULTIPLIER_MASK |
|
|
+ PHYCLKRST_SSC_REFCLKSEL_MASK);
|
|
switch (phy_drd->extrefclk) {
|
|
case EXYNOS5_FSEL_50MHZ:
|
|
reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF |
|
|
@@ -363,9 +363,9 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
|
|
reg &= ~PHYCLKRST_REFCLKSEL_MASK;
|
|
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
|
|
|
|
- reg &= ~PHYCLKRST_FSEL_UTMI_MASK |
|
|
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
|
|
- PHYCLKRST_SSC_REFCLKSEL_MASK;
|
|
+ reg &= ~(PHYCLKRST_FSEL_UTMI_MASK |
|
|
+ PHYCLKRST_MPLL_MULTIPLIER_MASK |
|
|
+ PHYCLKRST_SSC_REFCLKSEL_MASK);
|
|
reg |= PHYCLKRST_FSEL(phy_drd->extrefclk);
|
|
|
|
return reg;
|
|
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
|
|
index 0f60d5d1c1678d..fae6242aa730e0 100644
|
|
--- a/drivers/phy/tegra/xusb-tegra186.c
|
|
+++ b/drivers/phy/tegra/xusb-tegra186.c
|
|
@@ -928,6 +928,7 @@ static int tegra186_utmi_phy_init(struct phy *phy)
|
|
unsigned int index = lane->index;
|
|
struct device *dev = padctl->dev;
|
|
int err;
|
|
+ u32 reg;
|
|
|
|
port = tegra_xusb_find_usb2_port(padctl, index);
|
|
if (!port) {
|
|
@@ -935,6 +936,16 @@ static int tegra186_utmi_phy_init(struct phy *phy)
|
|
return -ENODEV;
|
|
}
|
|
|
|
+ if (port->mode == USB_DR_MODE_OTG ||
|
|
+ port->mode == USB_DR_MODE_PERIPHERAL) {
|
|
+ /* reset VBUS&ID OVERRIDE */
|
|
+ reg = padctl_readl(padctl, USB2_VBUS_ID);
|
|
+ reg &= ~VBUS_OVERRIDE;
|
|
+ reg &= ~ID_OVERRIDE(~0);
|
|
+ reg |= ID_OVERRIDE_FLOATING;
|
|
+ padctl_writel(padctl, reg, USB2_VBUS_ID);
|
|
+ }
|
|
+
|
|
if (port->supply && port->mode == USB_DR_MODE_HOST) {
|
|
err = regulator_enable(port->supply);
|
|
if (err) {
|
|
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
|
|
index 53d957d4eea4d1..1c50ecd8a03299 100644
|
|
--- a/drivers/platform/x86/intel/ifs/load.c
|
|
+++ b/drivers/platform/x86/intel/ifs/load.c
|
|
@@ -227,7 +227,7 @@ static int scan_chunks_sanity_check(struct device *dev)
|
|
|
|
static int image_sanity_check(struct device *dev, const struct microcode_header_intel *data)
|
|
{
|
|
- struct ucode_cpu_info uci;
|
|
+ struct cpu_signature sig;
|
|
|
|
/* Provide a specific error message when loading an older/unsupported image */
|
|
if (data->hdrver != MC_HEADER_TYPE_IFS) {
|
|
@@ -240,11 +240,9 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_
|
|
return -EINVAL;
|
|
}
|
|
|
|
- intel_cpu_collect_info(&uci);
|
|
+ intel_collect_cpu_info(&sig);
|
|
|
|
- if (!intel_find_matching_signature((void *)data,
|
|
- uci.cpu_sig.sig,
|
|
- uci.cpu_sig.pf)) {
|
|
+ if (!intel_find_matching_signature((void *)data, &sig)) {
|
|
dev_err(dev, "cpu signature, processor flags not matching\n");
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
|
|
index f026377f1cf1c4..e6dc2c556fde9e 100644
|
|
--- a/drivers/scsi/scsi_lib.c
|
|
+++ b/drivers/scsi/scsi_lib.c
|
|
@@ -1570,13 +1570,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
|
|
if (in_flight)
|
|
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
|
|
|
|
- /*
|
|
- * Only clear the driver-private command data if the LLD does not supply
|
|
- * a function to initialize that data.
|
|
- */
|
|
- if (!shost->hostt->init_cmd_priv)
|
|
- memset(cmd + 1, 0, shost->hostt->cmd_size);
|
|
-
|
|
cmd->prot_op = SCSI_PROT_NORMAL;
|
|
if (blk_rq_bytes(req))
|
|
cmd->sc_data_direction = rq_dma_dir(req);
|
|
@@ -1743,6 +1736,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
|
|
goto out_dec_target_busy;
|
|
|
|
+ /*
|
|
+ * Only clear the driver-private command data if the LLD does not supply
|
|
+ * a function to initialize that data.
|
|
+ */
|
|
+ if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv)
|
|
+ memset(cmd + 1, 0, shost->hostt->cmd_size);
|
|
+
|
|
if (!(req->rq_flags & RQF_DONTPREP)) {
|
|
ret = scsi_prepare_cmd(req);
|
|
if (ret != BLK_STS_OK)
|
|
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
|
|
index 8fbd46cd8c2b8e..0043d69077b646 100644
|
|
--- a/drivers/ufs/core/ufs_bsg.c
|
|
+++ b/drivers/ufs/core/ufs_bsg.c
|
|
@@ -194,10 +194,12 @@ static int ufs_bsg_request(struct bsg_job *job)
|
|
ufshcd_rpm_put_sync(hba);
|
|
kfree(buff);
|
|
bsg_reply->result = ret;
|
|
- job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
|
|
/* complete the job here only if no error */
|
|
- if (ret == 0)
|
|
+ if (ret == 0) {
|
|
+ job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) :
|
|
+ sizeof(struct ufs_bsg_reply);
|
|
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
|
|
+ }
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
|
|
index 524863b157e8aa..dffc932285ac50 100644
|
|
--- a/drivers/ufs/core/ufshcd-priv.h
|
|
+++ b/drivers/ufs/core/ufshcd-priv.h
|
|
@@ -324,6 +324,11 @@ static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
|
|
return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev);
|
|
}
|
|
|
|
+static inline int ufshcd_rpm_get_if_active(struct ufs_hba *hba)
|
|
+{
|
|
+ return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev, true);
|
|
+}
|
|
+
|
|
static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
|
|
{
|
|
return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev);
|
|
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
|
|
index 0ac0b6aaf9c62c..6d53dd7d411a85 100644
|
|
--- a/drivers/ufs/core/ufshcd.c
|
|
+++ b/drivers/ufs/core/ufshcd.c
|
|
@@ -98,6 +98,9 @@
|
|
/* Polling time to wait for fDeviceInit */
|
|
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
|
|
|
|
+/* Default RTC update every 10 seconds */
|
|
+#define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
|
|
+
|
|
/* UFSHC 4.0 compliant HC support this mode. */
|
|
static bool use_mcq_mode = true;
|
|
|
|
@@ -234,6 +237,17 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
|
|
return UFS_PM_LVL_0;
|
|
}
|
|
|
|
+static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
|
|
+{
|
|
+ return hba->outstanding_tasks || hba->active_uic_cmd ||
|
|
+ hba->uic_async_done;
|
|
+}
|
|
+
|
|
+static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
|
|
+{
|
|
+ return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
|
|
+}
|
|
+
|
|
static const struct ufs_dev_quirk ufs_fixups[] = {
|
|
/* UFS cards deviations table */
|
|
{ .wmanufacturerid = UFS_VENDOR_MICRON,
|
|
@@ -602,8 +616,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
|
|
const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
|
|
|
|
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
|
|
- dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
|
|
- hba->outstanding_reqs, hba->outstanding_tasks);
|
|
+ dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
|
|
+ scsi_host_busy(hba->host), hba->outstanding_tasks);
|
|
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
|
|
hba->saved_err, hba->saved_uic_err);
|
|
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
|
|
@@ -676,6 +690,8 @@ static void ufshcd_device_reset(struct ufs_hba *hba)
|
|
hba->dev_info.wb_enabled = false;
|
|
hba->dev_info.wb_buf_flush_enabled = false;
|
|
}
|
|
+ if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
|
|
+ hba->dev_info.rtc_time_baseline = 0;
|
|
}
|
|
if (err != -EOPNOTSUPP)
|
|
ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
|
|
@@ -1816,10 +1832,9 @@ static void ufshcd_gate_work(struct work_struct *work)
|
|
goto rel_lock;
|
|
}
|
|
|
|
- if (hba->clk_gating.active_reqs
|
|
- || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|
|
- || hba->outstanding_reqs || hba->outstanding_tasks
|
|
- || hba->active_uic_cmd || hba->uic_async_done)
|
|
+ if (ufshcd_is_ufs_dev_busy(hba) ||
|
|
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
|
|
+ hba->clk_gating.active_reqs)
|
|
goto rel_lock;
|
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
@@ -1875,8 +1890,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
|
|
|
|
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
|
|
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
|
|
- hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
|
|
- hba->active_uic_cmd || hba->uic_async_done ||
|
|
+ ufshcd_has_pending_tasks(hba) || !hba->clk_gating.is_initialized ||
|
|
hba->clk_gating.state == CLKS_OFF)
|
|
return;
|
|
|
|
@@ -8146,6 +8160,77 @@ static void ufs_fixup_device_setup(struct ufs_hba *hba)
|
|
ufshcd_vops_fixup_dev_quirks(hba);
|
|
}
|
|
|
|
+static void ufshcd_update_rtc(struct ufs_hba *hba)
|
|
+{
|
|
+ struct timespec64 ts64;
|
|
+ int err;
|
|
+ u32 val;
|
|
+
|
|
+ ktime_get_real_ts64(&ts64);
|
|
+
|
|
+ if (ts64.tv_sec < hba->dev_info.rtc_time_baseline) {
|
|
+ dev_warn_once(hba->dev, "%s: Current time precedes previous setting!\n", __func__);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * The Absolute RTC mode has a 136-year limit, spanning from 2010 to 2146. If a time beyond
|
|
+ * 2146 is required, it is recommended to choose the relative RTC mode.
|
|
+ */
|
|
+ val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
|
|
+
|
|
+ /* Skip update RTC if RPM state is not RPM_ACTIVE */
|
|
+ if (ufshcd_rpm_get_if_active(hba) <= 0)
|
|
+ return;
|
|
+
|
|
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
|
|
+ 0, 0, &val);
|
|
+ ufshcd_rpm_put(hba);
|
|
+
|
|
+ if (err)
|
|
+ dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
|
|
+ else if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
|
|
+ hba->dev_info.rtc_time_baseline = ts64.tv_sec;
|
|
+}
|
|
+
|
|
+static void ufshcd_rtc_work(struct work_struct *work)
|
|
+{
|
|
+ struct ufs_hba *hba;
|
|
+
|
|
+ hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
|
|
+
|
|
+ /* Update RTC only when there are no requests in progress and UFSHCI is operational */
|
|
+ if (!ufshcd_is_ufs_dev_busy(hba) &&
|
|
+ hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
|
|
+ !hba->clk_gating.active_reqs)
|
|
+ ufshcd_update_rtc(hba);
|
|
+
|
|
+ if (ufshcd_is_ufs_dev_active(hba))
|
|
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
|
|
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
|
|
+}
|
|
+
|
|
+static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
|
|
+{
|
|
+ u16 periodic_rtc_update = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_FRQ_RTC]);
|
|
+ struct ufs_dev_info *dev_info = &hba->dev_info;
|
|
+
|
|
+ if (periodic_rtc_update & UFS_RTC_TIME_BASELINE) {
|
|
+ dev_info->rtc_type = UFS_RTC_ABSOLUTE;
|
|
+
|
|
+ /*
|
|
+ * The concept of measuring time in Linux as the number of seconds elapsed since
|
|
+ * 00:00:00 UTC on January 1, 1970, and UFS ABS RTC is elapsed from January 1st
|
|
+ * 2010 00:00, here we need to adjust ABS baseline.
|
|
+ */
|
|
+ dev_info->rtc_time_baseline = mktime64(2010, 1, 1, 0, 0, 0) -
|
|
+ mktime64(1970, 1, 1, 0, 0, 0);
|
|
+ } else {
|
|
+ dev_info->rtc_type = UFS_RTC_RELATIVE;
|
|
+ dev_info->rtc_time_baseline = 0;
|
|
+ }
|
|
+}
|
|
+
|
|
static int ufs_get_device_desc(struct ufs_hba *hba)
|
|
{
|
|
int err;
|
|
@@ -8198,6 +8283,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
|
|
|
|
ufshcd_temp_notif_probe(hba, desc_buf);
|
|
|
|
+ ufs_init_rtc(hba, desc_buf);
|
|
+
|
|
if (hba->ext_iid_sup)
|
|
ufshcd_ext_iid_probe(hba, desc_buf);
|
|
|
|
@@ -8591,6 +8678,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
|
|
ufshcd_init_clk_scaling_sysfs(hba);
|
|
}
|
|
|
|
+ /*
|
|
+ * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev
|
|
+ * pointer and hence must only be started after the WLUN pointer has
|
|
+ * been initialized by ufshcd_scsi_add_wlus().
|
|
+ */
|
|
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
|
|
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
|
|
+
|
|
ufs_bsg_probe(hba);
|
|
scsi_scan_host(hba->host);
|
|
|
|
@@ -8887,7 +8982,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
|
|
dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
|
|
__func__, hba->outstanding_tasks);
|
|
|
|
- return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
|
|
+ return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
|
|
}
|
|
|
|
static const struct attribute_group *ufshcd_driver_groups[] = {
|
|
@@ -9695,6 +9790,8 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
|
|
ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
|
|
if (ret)
|
|
goto set_link_active;
|
|
+
|
|
+ cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
|
|
goto out;
|
|
|
|
set_link_active:
|
|
@@ -9789,6 +9886,8 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
|
|
if (ret)
|
|
goto set_old_link_state;
|
|
ufshcd_set_timestamp_attr(hba);
|
|
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
|
|
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
|
|
}
|
|
|
|
if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
|
|
@@ -10160,6 +10259,7 @@ void ufshcd_remove(struct ufs_hba *hba)
|
|
ufs_hwmon_remove(hba);
|
|
ufs_bsg_remove(hba);
|
|
ufs_sysfs_remove_nodes(hba->dev);
|
|
+ cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
|
|
blk_mq_destroy_queue(hba->tmf_queue);
|
|
blk_put_queue(hba->tmf_queue);
|
|
blk_mq_free_tag_set(&hba->tmf_tag_set);
|
|
@@ -10497,8 +10597,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|
UFS_SLEEP_PWR_MODE,
|
|
UIC_LINK_HIBERN8_STATE);
|
|
|
|
- INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
|
|
- ufshcd_rpm_dev_flush_recheck_work);
|
|
+ INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
|
|
+ INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
|
|
|
|
/* Set the default auto-hiberate idle timer value to 150 ms */
|
|
if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
|
|
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
|
|
index 926cb1188eba6c..7c0dce8eecadd1 100644
|
|
--- a/fs/afs/cell.c
|
|
+++ b/fs/afs/cell.c
|
|
@@ -161,6 +161,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
|
|
refcount_set(&cell->ref, 1);
|
|
atomic_set(&cell->active, 0);
|
|
INIT_WORK(&cell->manager, afs_manage_cell_work);
|
|
+ spin_lock_init(&cell->vs_lock);
|
|
cell->volumes = RB_ROOT;
|
|
INIT_HLIST_HEAD(&cell->proc_volumes);
|
|
seqlock_init(&cell->volume_lock);
|
|
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
|
|
index 2f135d19545b19..0973cd0a396959 100644
|
|
--- a/fs/afs/internal.h
|
|
+++ b/fs/afs/internal.h
|
|
@@ -379,6 +379,7 @@ struct afs_cell {
|
|
unsigned int debug_id;
|
|
|
|
/* The volumes belonging to this cell */
|
|
+ spinlock_t vs_lock; /* Lock for server->volumes */
|
|
struct rb_root volumes; /* Tree of volumes on this server */
|
|
struct hlist_head proc_volumes; /* procfs volume list */
|
|
seqlock_t volume_lock; /* For volumes */
|
|
@@ -502,6 +503,7 @@ struct afs_server {
|
|
struct hlist_node addr4_link; /* Link in net->fs_addresses4 */
|
|
struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
|
|
struct hlist_node proc_link; /* Link in net->fs_proc */
|
|
+ struct list_head volumes; /* RCU list of afs_server_entry objects */
|
|
struct work_struct initcb_work; /* Work for CB.InitCallBackState* */
|
|
struct afs_server *gc_next; /* Next server in manager's list */
|
|
time64_t unuse_time; /* Time at which last unused */
|
|
@@ -550,12 +552,14 @@ struct afs_server {
|
|
*/
|
|
struct afs_server_entry {
|
|
struct afs_server *server;
|
|
+ struct afs_volume *volume;
|
|
+ struct list_head slink; /* Link in server->volumes */
|
|
};
|
|
|
|
struct afs_server_list {
|
|
struct rcu_head rcu;
|
|
- afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */
|
|
refcount_t usage;
|
|
+ bool attached; /* T if attached to servers */
|
|
unsigned char nr_servers;
|
|
unsigned char preferred; /* Preferred server */
|
|
unsigned short vnovol_mask; /* Servers to be skipped due to VNOVOL */
|
|
@@ -568,10 +572,9 @@ struct afs_server_list {
|
|
* Live AFS volume management.
|
|
*/
|
|
struct afs_volume {
|
|
- union {
|
|
- struct rcu_head rcu;
|
|
- afs_volid_t vid; /* volume ID */
|
|
- };
|
|
+ struct rcu_head rcu;
|
|
+ afs_volid_t vid; /* The volume ID of this volume */
|
|
+ afs_volid_t vids[AFS_MAXTYPES]; /* All associated volume IDs */
|
|
refcount_t ref;
|
|
time64_t update_at; /* Time at which to next update */
|
|
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
|
|
@@ -1453,10 +1456,14 @@ static inline struct afs_server_list *afs_get_serverlist(struct afs_server_list
|
|
}
|
|
|
|
extern void afs_put_serverlist(struct afs_net *, struct afs_server_list *);
|
|
-extern struct afs_server_list *afs_alloc_server_list(struct afs_cell *, struct key *,
|
|
- struct afs_vldb_entry *,
|
|
- u8);
|
|
+struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
|
|
+ struct key *key,
|
|
+ struct afs_vldb_entry *vldb);
|
|
extern bool afs_annotate_server_list(struct afs_server_list *, struct afs_server_list *);
|
|
+void afs_attach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist);
|
|
+void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist,
|
|
+ struct afs_server_list *old);
|
|
+void afs_detach_volume_from_servers(struct afs_volume *volume, struct afs_server_list *slist);
|
|
|
|
/*
|
|
* super.c
|
|
diff --git a/fs/afs/server.c b/fs/afs/server.c
|
|
index 0bd2f5ba6900c1..87381c2ffe374c 100644
|
|
--- a/fs/afs/server.c
|
|
+++ b/fs/afs/server.c
|
|
@@ -236,6 +236,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
|
|
server->addr_version = alist->version;
|
|
server->uuid = *uuid;
|
|
rwlock_init(&server->fs_lock);
|
|
+ INIT_LIST_HEAD(&server->volumes);
|
|
INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
|
|
init_waitqueue_head(&server->probe_wq);
|
|
INIT_LIST_HEAD(&server->probe_link);
|
|
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
|
|
index b59896b1de0af2..89c75d934f79e1 100644
|
|
--- a/fs/afs/server_list.c
|
|
+++ b/fs/afs/server_list.c
|
|
@@ -24,13 +24,13 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
|
|
/*
|
|
* Build a server list from a VLDB record.
|
|
*/
|
|
-struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
|
|
+struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
|
|
struct key *key,
|
|
- struct afs_vldb_entry *vldb,
|
|
- u8 type_mask)
|
|
+ struct afs_vldb_entry *vldb)
|
|
{
|
|
struct afs_server_list *slist;
|
|
struct afs_server *server;
|
|
+ unsigned int type_mask = 1 << volume->type;
|
|
int ret = -ENOMEM, nr_servers = 0, i, j;
|
|
|
|
for (i = 0; i < vldb->nr_servers; i++)
|
|
@@ -44,15 +44,12 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
|
|
refcount_set(&slist->usage, 1);
|
|
rwlock_init(&slist->lock);
|
|
|
|
- for (i = 0; i < AFS_MAXTYPES; i++)
|
|
- slist->vids[i] = vldb->vid[i];
|
|
-
|
|
/* Make sure a records exists for each server in the list. */
|
|
for (i = 0; i < vldb->nr_servers; i++) {
|
|
if (!(vldb->fs_mask[i] & type_mask))
|
|
continue;
|
|
|
|
- server = afs_lookup_server(cell, key, &vldb->fs_server[i],
|
|
+ server = afs_lookup_server(volume->cell, key, &vldb->fs_server[i],
|
|
vldb->addr_version[i]);
|
|
if (IS_ERR(server)) {
|
|
ret = PTR_ERR(server);
|
|
@@ -70,8 +67,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
|
|
break;
|
|
if (j < slist->nr_servers) {
|
|
if (slist->servers[j].server == server) {
|
|
- afs_put_server(cell->net, server,
|
|
- afs_server_trace_put_slist_isort);
|
|
+ afs_unuse_server(volume->cell->net, server,
|
|
+ afs_server_trace_put_slist_isort);
|
|
continue;
|
|
}
|
|
|
|
@@ -81,6 +78,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
|
|
}
|
|
|
|
slist->servers[j].server = server;
|
|
+ slist->servers[j].volume = volume;
|
|
slist->nr_servers++;
|
|
}
|
|
|
|
@@ -92,7 +90,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
|
|
return slist;
|
|
|
|
error_2:
|
|
- afs_put_serverlist(cell->net, slist);
|
|
+ afs_put_serverlist(volume->cell->net, slist);
|
|
error:
|
|
return ERR_PTR(ret);
|
|
}
|
|
@@ -127,3 +125,99 @@ bool afs_annotate_server_list(struct afs_server_list *new,
|
|
|
|
return true;
|
|
}
|
|
+
|
|
+/*
|
|
+ * Attach a volume to the servers it is going to use.
|
|
+ */
|
|
+void afs_attach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist)
|
|
+{
|
|
+ struct afs_server_entry *se, *pe;
|
|
+ struct afs_server *server;
|
|
+ struct list_head *p;
|
|
+ unsigned int i;
|
|
+
|
|
+ spin_lock(&volume->cell->vs_lock);
|
|
+
|
|
+ for (i = 0; i < slist->nr_servers; i++) {
|
|
+ se = &slist->servers[i];
|
|
+ server = se->server;
|
|
+
|
|
+ list_for_each(p, &server->volumes) {
|
|
+ pe = list_entry(p, struct afs_server_entry, slink);
|
|
+ if (volume->vid <= pe->volume->vid)
|
|
+ break;
|
|
+ }
|
|
+ list_add_tail_rcu(&se->slink, p);
|
|
+ }
|
|
+
|
|
+ slist->attached = true;
|
|
+ spin_unlock(&volume->cell->vs_lock);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Reattach a volume to the servers it is going to use when server list is
|
|
+ * replaced. We try to switch the attachment points to avoid rewalking the
|
|
+ * lists.
|
|
+ */
|
|
+void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *new,
|
|
+ struct afs_server_list *old)
|
|
+{
|
|
+ unsigned int n = 0, o = 0;
|
|
+
|
|
+ spin_lock(&volume->cell->vs_lock);
|
|
+
|
|
+ while (n < new->nr_servers || o < old->nr_servers) {
|
|
+ struct afs_server_entry *pn = n < new->nr_servers ? &new->servers[n] : NULL;
|
|
+ struct afs_server_entry *po = o < old->nr_servers ? &old->servers[o] : NULL;
|
|
+ struct afs_server_entry *s;
|
|
+ struct list_head *p;
|
|
+ int diff;
|
|
+
|
|
+ if (pn && po && pn->server == po->server) {
|
|
+ list_replace_rcu(&po->slink, &pn->slink);
|
|
+ n++;
|
|
+ o++;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (pn && po)
|
|
+ diff = memcmp(&pn->server->uuid, &po->server->uuid,
|
|
+ sizeof(pn->server->uuid));
|
|
+ else
|
|
+ diff = pn ? -1 : 1;
|
|
+
|
|
+ if (diff < 0) {
|
|
+ list_for_each(p, &pn->server->volumes) {
|
|
+ s = list_entry(p, struct afs_server_entry, slink);
|
|
+ if (volume->vid <= s->volume->vid)
|
|
+ break;
|
|
+ }
|
|
+ list_add_tail_rcu(&pn->slink, p);
|
|
+ n++;
|
|
+ } else {
|
|
+ list_del_rcu(&po->slink);
|
|
+ o++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ spin_unlock(&volume->cell->vs_lock);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Detach a volume from the servers it has been using.
|
|
+ */
|
|
+void afs_detach_volume_from_servers(struct afs_volume *volume, struct afs_server_list *slist)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ if (!slist->attached)
|
|
+ return;
|
|
+
|
|
+ spin_lock(&volume->cell->vs_lock);
|
|
+
|
|
+ for (i = 0; i < slist->nr_servers; i++)
|
|
+ list_del_rcu(&slist->servers[i].slink);
|
|
+
|
|
+ slist->attached = false;
|
|
+ spin_unlock(&volume->cell->vs_lock);
|
|
+}
|
|
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
|
|
index 83cf1bfbe343ae..b2cc10df95308c 100644
|
|
--- a/fs/afs/vl_alias.c
|
|
+++ b/fs/afs/vl_alias.c
|
|
@@ -126,7 +126,7 @@ static int afs_compare_volume_slists(const struct afs_volume *vol_a,
|
|
lb = rcu_dereference(vol_b->servers);
|
|
|
|
for (i = 0; i < AFS_MAXTYPES; i++)
|
|
- if (la->vids[i] != lb->vids[i])
|
|
+ if (vol_a->vids[i] != vol_b->vids[i])
|
|
return 0;
|
|
|
|
while (a < la->nr_servers && b < lb->nr_servers) {
|
|
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
|
|
index c028598a903c9c..0f64b97581272e 100644
|
|
--- a/fs/afs/volume.c
|
|
+++ b/fs/afs/volume.c
|
|
@@ -72,11 +72,11 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
|
|
*/
|
|
static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
|
|
struct afs_vldb_entry *vldb,
|
|
- unsigned long type_mask)
|
|
+ struct afs_server_list **_slist)
|
|
{
|
|
struct afs_server_list *slist;
|
|
struct afs_volume *volume;
|
|
- int ret = -ENOMEM;
|
|
+ int ret = -ENOMEM, i;
|
|
|
|
volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL);
|
|
if (!volume)
|
|
@@ -95,13 +95,16 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
|
|
rwlock_init(&volume->cb_v_break_lock);
|
|
memcpy(volume->name, vldb->name, vldb->name_len + 1);
|
|
|
|
- slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask);
|
|
+ for (i = 0; i < AFS_MAXTYPES; i++)
|
|
+ volume->vids[i] = vldb->vid[i];
|
|
+
|
|
+ slist = afs_alloc_server_list(volume, params->key, vldb);
|
|
if (IS_ERR(slist)) {
|
|
ret = PTR_ERR(slist);
|
|
goto error_1;
|
|
}
|
|
|
|
- refcount_set(&slist->usage, 1);
|
|
+ *_slist = slist;
|
|
rcu_assign_pointer(volume->servers, slist);
|
|
trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc);
|
|
return volume;
|
|
@@ -117,17 +120,19 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
|
|
* Look up or allocate a volume record.
|
|
*/
|
|
static struct afs_volume *afs_lookup_volume(struct afs_fs_context *params,
|
|
- struct afs_vldb_entry *vldb,
|
|
- unsigned long type_mask)
|
|
+ struct afs_vldb_entry *vldb)
|
|
{
|
|
+ struct afs_server_list *slist;
|
|
struct afs_volume *candidate, *volume;
|
|
|
|
- candidate = afs_alloc_volume(params, vldb, type_mask);
|
|
+ candidate = afs_alloc_volume(params, vldb, &slist);
|
|
if (IS_ERR(candidate))
|
|
return candidate;
|
|
|
|
volume = afs_insert_volume_into_cell(params->cell, candidate);
|
|
- if (volume != candidate)
|
|
+ if (volume == candidate)
|
|
+ afs_attach_volume_to_servers(volume, slist);
|
|
+ else
|
|
afs_put_volume(params->net, candidate, afs_volume_trace_put_cell_dup);
|
|
return volume;
|
|
}
|
|
@@ -208,8 +213,7 @@ struct afs_volume *afs_create_volume(struct afs_fs_context *params)
|
|
goto error;
|
|
}
|
|
|
|
- type_mask = 1UL << params->type;
|
|
- volume = afs_lookup_volume(params, vldb, type_mask);
|
|
+ volume = afs_lookup_volume(params, vldb);
|
|
|
|
error:
|
|
kfree(vldb);
|
|
@@ -221,14 +225,17 @@ struct afs_volume *afs_create_volume(struct afs_fs_context *params)
|
|
*/
|
|
static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
|
|
{
|
|
+ struct afs_server_list *slist = rcu_access_pointer(volume->servers);
|
|
+
|
|
_enter("%p", volume);
|
|
|
|
#ifdef CONFIG_AFS_FSCACHE
|
|
ASSERTCMP(volume->cache, ==, NULL);
|
|
#endif
|
|
|
|
+ afs_detach_volume_from_servers(volume, slist);
|
|
afs_remove_volume_from_cell(volume);
|
|
- afs_put_serverlist(net, rcu_access_pointer(volume->servers));
|
|
+ afs_put_serverlist(net, slist);
|
|
afs_put_cell(volume->cell, afs_cell_trace_put_vol);
|
|
trace_afs_volume(volume->vid, refcount_read(&volume->ref),
|
|
afs_volume_trace_free);
|
|
@@ -362,8 +369,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
|
|
}
|
|
|
|
/* See if the volume's server list got updated. */
|
|
- new = afs_alloc_server_list(volume->cell, key,
|
|
- vldb, (1 << volume->type));
|
|
+ new = afs_alloc_server_list(volume, key, vldb);
|
|
if (IS_ERR(new)) {
|
|
ret = PTR_ERR(new);
|
|
goto error_vldb;
|
|
@@ -384,9 +390,11 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
|
|
|
|
volume->update_at = ktime_get_real_seconds() + afs_volume_record_life;
|
|
write_unlock(&volume->servers_lock);
|
|
- ret = 0;
|
|
|
|
+ if (discard == old)
|
|
+ afs_reattach_volume_to_servers(volume, new, old);
|
|
afs_put_serverlist(volume->cell->net, discard);
|
|
+ ret = 0;
|
|
error_vldb:
|
|
kfree(vldb);
|
|
error:
|
|
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
|
|
index 18e018cb181179..dbf7b3cd70ca5e 100644
|
|
--- a/fs/overlayfs/copy_up.c
|
|
+++ b/fs/overlayfs/copy_up.c
|
|
@@ -570,7 +570,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
|
|
err = PTR_ERR(upper);
|
|
if (!IS_ERR(upper)) {
|
|
err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper);
|
|
- dput(upper);
|
|
|
|
if (!err) {
|
|
/* Restore timestamps on parent (best effort) */
|
|
@@ -578,6 +577,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
|
|
ovl_dentry_set_upper_alias(c->dentry);
|
|
ovl_dentry_update_reval(c->dentry, upper);
|
|
}
|
|
+ dput(upper);
|
|
}
|
|
inode_unlock(udir);
|
|
if (err)
|
|
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
|
|
index 7e11ca6f86dcda..cf3f8b9bf43f08 100644
|
|
--- a/include/asm-generic/vmlinux.lds.h
|
|
+++ b/include/asm-generic/vmlinux.lds.h
|
|
@@ -445,7 +445,7 @@
|
|
. = ALIGN((align)); \
|
|
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
|
|
__start_rodata = .; \
|
|
- *(.rodata) *(.rodata.*) \
|
|
+ *(.rodata) *(.rodata.*) *(.data.rel.ro*) \
|
|
SCHED_DATA \
|
|
RO_AFTER_INIT_DATA /* Read only after init */ \
|
|
. = ALIGN(8); \
|
|
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
|
|
index 2c8bfd0f1b6b3a..6322d8c1c6b429 100644
|
|
--- a/include/linux/rcuref.h
|
|
+++ b/include/linux/rcuref.h
|
|
@@ -71,27 +71,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref)
|
|
return rcuref_get_slowpath(ref);
|
|
}
|
|
|
|
-extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
|
|
+extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);
|
|
|
|
/*
|
|
* Internal helper. Do not invoke directly.
|
|
*/
|
|
static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
|
|
{
|
|
+ int cnt;
|
|
+
|
|
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
|
|
"suspicious rcuref_put_rcusafe() usage");
|
|
/*
|
|
* Unconditionally decrease the reference count. The saturation and
|
|
* dead zones provide enough tolerance for this.
|
|
*/
|
|
- if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
|
|
+ cnt = atomic_sub_return_release(1, &ref->refcnt);
|
|
+ if (likely(cnt >= 0))
|
|
return false;
|
|
|
|
/*
|
|
* Handle the last reference drop and cases inside the saturation
|
|
* and dead zones.
|
|
*/
|
|
- return rcuref_put_slowpath(ref);
|
|
+ return rcuref_put_slowpath(ref, cnt);
|
|
}
|
|
|
|
/**
|
|
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
|
|
index 8f9bee0e21c3bc..a220b28904ca52 100644
|
|
--- a/include/linux/sunrpc/sched.h
|
|
+++ b/include/linux/sunrpc/sched.h
|
|
@@ -140,13 +140,14 @@ struct rpc_task_setup {
|
|
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
|
|
#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE)
|
|
|
|
-#define RPC_TASK_RUNNING 0
|
|
-#define RPC_TASK_QUEUED 1
|
|
-#define RPC_TASK_ACTIVE 2
|
|
-#define RPC_TASK_NEED_XMIT 3
|
|
-#define RPC_TASK_NEED_RECV 4
|
|
-#define RPC_TASK_MSG_PIN_WAIT 5
|
|
-#define RPC_TASK_SIGNALLED 6
|
|
+enum {
|
|
+ RPC_TASK_RUNNING,
|
|
+ RPC_TASK_QUEUED,
|
|
+ RPC_TASK_ACTIVE,
|
|
+ RPC_TASK_NEED_XMIT,
|
|
+ RPC_TASK_NEED_RECV,
|
|
+ RPC_TASK_MSG_PIN_WAIT,
|
|
+};
|
|
|
|
#define rpc_test_and_set_running(t) \
|
|
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
|
|
@@ -158,7 +159,7 @@ struct rpc_task_setup {
|
|
|
|
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
|
|
|
|
-#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
|
|
+#define RPC_SIGNALLED(t) (READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS)
|
|
|
|
/*
|
|
* Task priorities.
|
|
diff --git a/include/net/dst.h b/include/net/dst.h
|
|
index 78884429deed82..16b7b99b5f309c 100644
|
|
--- a/include/net/dst.h
|
|
+++ b/include/net/dst.h
|
|
@@ -448,6 +448,15 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
|
|
dst->expires = expires;
|
|
}
|
|
|
|
+static inline unsigned int dst_dev_overhead(struct dst_entry *dst,
|
|
+ struct sk_buff *skb)
|
|
+{
|
|
+ if (likely(dst))
|
|
+ return LL_RESERVED_SPACE(dst->dev);
|
|
+
|
|
+ return skb->mac_len;
|
|
+}
|
|
+
|
|
INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *,
|
|
struct sk_buff *));
|
|
INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
|
|
diff --git a/include/net/ip.h b/include/net/ip.h
|
|
index 7db5912e0c5f63..d8bf1f0a6919c4 100644
|
|
--- a/include/net/ip.h
|
|
+++ b/include/net/ip.h
|
|
@@ -415,6 +415,11 @@ int ip_decrease_ttl(struct iphdr *iph)
|
|
return --iph->ttl;
|
|
}
|
|
|
|
+static inline dscp_t ip4h_dscp(const struct iphdr *ip4h)
|
|
+{
|
|
+ return inet_dsfield_to_dscp(ip4h->tos);
|
|
+}
|
|
+
|
|
static inline int ip_mtu_locked(const struct dst_entry *dst)
|
|
{
|
|
const struct rtable *rt = (const struct rtable *)dst;
|
|
diff --git a/include/net/route.h b/include/net/route.h
|
|
index 0171e9e1bbea3d..27c17aff0bbe14 100644
|
|
--- a/include/net/route.h
|
|
+++ b/include/net/route.h
|
|
@@ -200,12 +200,13 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
|
|
const struct sk_buff *hint);
|
|
|
|
static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
|
|
- u8 tos, struct net_device *devin)
|
|
+ dscp_t dscp, struct net_device *devin)
|
|
{
|
|
int err;
|
|
|
|
rcu_read_lock();
|
|
- err = ip_route_input_noref(skb, dst, src, tos, devin);
|
|
+ err = ip_route_input_noref(skb, dst, src, inet_dscp_to_dsfield(dscp),
|
|
+ devin);
|
|
if (!err) {
|
|
skb_dst_force(skb);
|
|
if (!skb_dst(skb))
|
|
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
|
|
index 62f9d126a71ad1..bc459d06162971 100644
|
|
--- a/include/rdma/ib_verbs.h
|
|
+++ b/include/rdma/ib_verbs.h
|
|
@@ -561,6 +561,7 @@ enum ib_port_speed {
|
|
IB_SPEED_EDR = 32,
|
|
IB_SPEED_HDR = 64,
|
|
IB_SPEED_NDR = 128,
|
|
+ IB_SPEED_XDR = 256,
|
|
};
|
|
|
|
enum ib_stat_flag {
|
|
@@ -840,6 +841,7 @@ enum ib_rate {
|
|
IB_RATE_50_GBPS = 20,
|
|
IB_RATE_400_GBPS = 21,
|
|
IB_RATE_600_GBPS = 22,
|
|
+ IB_RATE_800_GBPS = 23,
|
|
};
|
|
|
|
/**
|
|
diff --git a/include/trace/events/icmp.h b/include/trace/events/icmp.h
|
|
new file mode 100644
|
|
index 00000000000000..31559796949a78
|
|
--- /dev/null
|
|
+++ b/include/trace/events/icmp.h
|
|
@@ -0,0 +1,67 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+#undef TRACE_SYSTEM
|
|
+#define TRACE_SYSTEM icmp
|
|
+
|
|
+#if !defined(_TRACE_ICMP_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
+#define _TRACE_ICMP_H
|
|
+
|
|
+#include <linux/icmp.h>
|
|
+#include <linux/tracepoint.h>
|
|
+
|
|
+TRACE_EVENT(icmp_send,
|
|
+
|
|
+ TP_PROTO(const struct sk_buff *skb, int type, int code),
|
|
+
|
|
+ TP_ARGS(skb, type, code),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(const void *, skbaddr)
|
|
+ __field(int, type)
|
|
+ __field(int, code)
|
|
+ __array(__u8, saddr, 4)
|
|
+ __array(__u8, daddr, 4)
|
|
+ __field(__u16, sport)
|
|
+ __field(__u16, dport)
|
|
+ __field(unsigned short, ulen)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ struct iphdr *iph = ip_hdr(skb);
|
|
+ struct udphdr *uh = udp_hdr(skb);
|
|
+ int proto_4 = iph->protocol;
|
|
+ __be32 *p32;
|
|
+
|
|
+ __entry->skbaddr = skb;
|
|
+ __entry->type = type;
|
|
+ __entry->code = code;
|
|
+
|
|
+ if (proto_4 != IPPROTO_UDP || (u8 *)uh < skb->head ||
|
|
+ (u8 *)uh + sizeof(struct udphdr)
|
|
+ > skb_tail_pointer(skb)) {
|
|
+ __entry->sport = 0;
|
|
+ __entry->dport = 0;
|
|
+ __entry->ulen = 0;
|
|
+ } else {
|
|
+ __entry->sport = ntohs(uh->source);
|
|
+ __entry->dport = ntohs(uh->dest);
|
|
+ __entry->ulen = ntohs(uh->len);
|
|
+ }
|
|
+
|
|
+ p32 = (__be32 *) __entry->saddr;
|
|
+ *p32 = iph->saddr;
|
|
+
|
|
+ p32 = (__be32 *) __entry->daddr;
|
|
+ *p32 = iph->daddr;
|
|
+ ),
|
|
+
|
|
+ TP_printk("icmp_send: type=%d, code=%d. From %pI4:%u to %pI4:%u ulen=%d skbaddr=%p",
|
|
+ __entry->type, __entry->code,
|
|
+ __entry->saddr, __entry->sport, __entry->daddr,
|
|
+ __entry->dport, __entry->ulen, __entry->skbaddr)
|
|
+);
|
|
+
|
|
+#endif /* _TRACE_ICMP_H */
|
|
+
|
|
+/* This part must be outside protection */
|
|
+#include <trace/define_trace.h>
|
|
+
|
|
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
|
|
index 6beb38c1dcb5eb..9eba2ca0a6ff80 100644
|
|
--- a/include/trace/events/sunrpc.h
|
|
+++ b/include/trace/events/sunrpc.h
|
|
@@ -360,8 +360,7 @@ TRACE_EVENT(rpc_request,
|
|
{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \
|
|
{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \
|
|
{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \
|
|
- { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \
|
|
- { (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" })
|
|
+ { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
|
|
|
|
DECLARE_EVENT_CLASS(rpc_task_running,
|
|
|
|
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
|
|
index d7c5aaa3274453..fe15bc7e9f707b 100644
|
|
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
|
|
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
|
|
@@ -220,7 +220,8 @@ enum ib_uverbs_advise_mr_flag {
|
|
struct ib_uverbs_query_port_resp_ex {
|
|
struct ib_uverbs_query_port_resp legacy_resp;
|
|
__u16 port_cap_flags2;
|
|
- __u8 reserved[6];
|
|
+ __u8 reserved[2];
|
|
+ __u32 active_speed_ex;
|
|
};
|
|
|
|
struct ib_uverbs_qp_cap {
|
|
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
|
|
index 49c90795a2a677..571a08ce912429 100644
|
|
--- a/include/ufs/ufs.h
|
|
+++ b/include/ufs/ufs.h
|
|
@@ -14,6 +14,7 @@
|
|
#include <linux/bitops.h>
|
|
#include <linux/types.h>
|
|
#include <uapi/scsi/scsi_bsg_ufs.h>
|
|
+#include <linux/time64.h>
|
|
|
|
/*
|
|
* Using static_assert() is not allowed in UAPI header files. Hence the check
|
|
@@ -550,6 +551,14 @@ struct ufs_vreg_info {
|
|
struct ufs_vreg *vdd_hba;
|
|
};
|
|
|
|
+/* UFS device descriptor wPeriodicRTCUpdate bit9 defines RTC time baseline */
|
|
+#define UFS_RTC_TIME_BASELINE BIT(9)
|
|
+
|
|
+enum ufs_rtc_time {
|
|
+ UFS_RTC_RELATIVE,
|
|
+ UFS_RTC_ABSOLUTE
|
|
+};
|
|
+
|
|
struct ufs_dev_info {
|
|
bool f_power_on_wp_en;
|
|
/* Keeps information if any of the LU is power on write protected */
|
|
@@ -577,6 +586,10 @@ struct ufs_dev_info {
|
|
|
|
/* UFS EXT_IID Enable */
|
|
bool b_ext_iid_en;
|
|
+
|
|
+ /* UFS RTC */
|
|
+ enum ufs_rtc_time rtc_type;
|
|
+ time64_t rtc_time_baseline;
|
|
};
|
|
|
|
/*
|
|
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
|
|
index 20d129914121d5..d5aa832f8dba3c 100644
|
|
--- a/include/ufs/ufshcd.h
|
|
+++ b/include/ufs/ufshcd.h
|
|
@@ -908,6 +908,8 @@ enum ufshcd_mcq_opr {
|
|
* @mcq_base: Multi circular queue registers base address
|
|
* @uhq: array of supported hardware queues
|
|
* @dev_cmd_queue: Queue for issuing device management commands
|
|
+ * @mcq_opr: MCQ operation and runtime registers
|
|
+ * @ufs_rtc_update_work: A work for UFS RTC periodic update
|
|
*/
|
|
struct ufs_hba {
|
|
void __iomem *mmio_base;
|
|
@@ -1068,6 +1070,8 @@ struct ufs_hba {
|
|
struct ufs_hw_queue *uhq;
|
|
struct ufs_hw_queue *dev_cmd_queue;
|
|
struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
|
|
+
|
|
+ struct delayed_work ufs_rtc_update_work;
|
|
};
|
|
|
|
/**
|
|
diff --git a/io_uring/net.c b/io_uring/net.c
|
|
index 56091292950fd6..1a0e98e19dc0ed 100644
|
|
--- a/io_uring/net.c
|
|
+++ b/io_uring/net.c
|
|
@@ -303,7 +303,9 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
|
|
if (unlikely(ret))
|
|
return ret;
|
|
|
|
- return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
|
|
+ ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
|
|
+ sr->msg_control = iomsg->msg.msg_control_user;
|
|
+ return ret;
|
|
}
|
|
#endif
|
|
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index 5d6458ea675e9d..4f6b18ecfdb219 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -4842,7 +4842,7 @@ static struct perf_event_pmu_context *
|
|
find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
|
|
struct perf_event *event)
|
|
{
|
|
- struct perf_event_pmu_context *new = NULL, *epc;
|
|
+ struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc;
|
|
void *task_ctx_data = NULL;
|
|
|
|
if (!ctx->task) {
|
|
@@ -4899,12 +4899,19 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
|
|
atomic_inc(&epc->refcount);
|
|
goto found_epc;
|
|
}
|
|
+ /* Make sure the pmu_ctx_list is sorted by PMU type: */
|
|
+ if (!pos && epc->pmu->type > pmu->type)
|
|
+ pos = epc;
|
|
}
|
|
|
|
epc = new;
|
|
new = NULL;
|
|
|
|
- list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
|
|
+ if (!pos)
|
|
+ list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
|
|
+ else
|
|
+ list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev);
|
|
+
|
|
epc->ctx = ctx;
|
|
|
|
found_epc:
|
|
@@ -5854,14 +5861,15 @@ static int _perf_event_period(struct perf_event *event, u64 value)
|
|
if (!value)
|
|
return -EINVAL;
|
|
|
|
- if (event->attr.freq && value > sysctl_perf_event_sample_rate)
|
|
- return -EINVAL;
|
|
-
|
|
- if (perf_event_check_period(event, value))
|
|
- return -EINVAL;
|
|
-
|
|
- if (!event->attr.freq && (value & (1ULL << 63)))
|
|
- return -EINVAL;
|
|
+ if (event->attr.freq) {
|
|
+ if (value > sysctl_perf_event_sample_rate)
|
|
+ return -EINVAL;
|
|
+ } else {
|
|
+ if (perf_event_check_period(event, value))
|
|
+ return -EINVAL;
|
|
+ if (value & (1ULL << 63))
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
event_function_call(event, __perf_event_period, &value);
|
|
|
|
@@ -8106,7 +8114,8 @@ void perf_event_exec(void)
|
|
|
|
perf_event_enable_on_exec(ctx);
|
|
perf_event_remove_on_exec(ctx);
|
|
- perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
|
|
+ scoped_guard(rcu)
|
|
+ perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
|
|
|
|
perf_unpin_context(ctx);
|
|
put_ctx(ctx);
|
|
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
|
|
index 6dac0b5798213b..7e2edd1b069397 100644
|
|
--- a/kernel/events/uprobes.c
|
|
+++ b/kernel/events/uprobes.c
|
|
@@ -481,6 +481,11 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
|
|
if (ret <= 0)
|
|
goto put_old;
|
|
|
|
+ if (is_zero_page(old_page)) {
|
|
+ ret = -EINVAL;
|
|
+ goto put_old;
|
|
+ }
|
|
+
|
|
if (WARN(!is_register && PageCompound(old_page),
|
|
"uprobe unregister should never work on compound page\n")) {
|
|
ret = -EINVAL;
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
|
index c686d826a91cf5..784a4f8409453d 100644
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -8561,7 +8561,7 @@ SYSCALL_DEFINE0(sched_yield)
|
|
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
|
|
int __sched __cond_resched(void)
|
|
{
|
|
- if (should_resched(0)) {
|
|
+ if (should_resched(0) && !irqs_disabled()) {
|
|
preempt_schedule_common();
|
|
return 1;
|
|
}
|
|
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
|
|
index 6e6b2a5aa1402b..99fdeee3bcd87a 100644
|
|
--- a/kernel/trace/ftrace.c
|
|
+++ b/kernel/trace/ftrace.c
|
|
@@ -538,6 +538,7 @@ static int function_stat_show(struct seq_file *m, void *v)
|
|
static struct trace_seq s;
|
|
unsigned long long avg;
|
|
unsigned long long stddev;
|
|
+ unsigned long long stddev_denom;
|
|
#endif
|
|
mutex_lock(&ftrace_profile_lock);
|
|
|
|
@@ -559,23 +560,19 @@ static int function_stat_show(struct seq_file *m, void *v)
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
seq_puts(m, " ");
|
|
|
|
- /* Sample standard deviation (s^2) */
|
|
- if (rec->counter <= 1)
|
|
- stddev = 0;
|
|
- else {
|
|
- /*
|
|
- * Apply Welford's method:
|
|
- * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
|
|
- */
|
|
+ /*
|
|
+ * Variance formula:
|
|
+ * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
|
|
+ * Maybe Welford's method is better here?
|
|
+ * Divide only by 1000 for ns^2 -> us^2 conversion.
|
|
+ * trace_print_graph_duration will divide by 1000 again.
|
|
+ */
|
|
+ stddev = 0;
|
|
+ stddev_denom = rec->counter * (rec->counter - 1) * 1000;
|
|
+ if (stddev_denom) {
|
|
stddev = rec->counter * rec->time_squared -
|
|
rec->time * rec->time;
|
|
-
|
|
- /*
|
|
- * Divide only 1000 for ns^2 -> us^2 conversion.
|
|
- * trace_print_graph_duration will divide 1000 again.
|
|
- */
|
|
- stddev = div64_ul(stddev,
|
|
- rec->counter * (rec->counter - 1) * 1000);
|
|
+ stddev = div64_ul(stddev, stddev_denom);
|
|
}
|
|
|
|
trace_seq_init(&s);
|
|
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
|
|
index dd16faf0d1500c..604d63380a90b1 100644
|
|
--- a/kernel/trace/trace_events_hist.c
|
|
+++ b/kernel/trace/trace_events_hist.c
|
|
@@ -6660,27 +6660,27 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
|
|
if (existing_hist_update_only(glob, trigger_data, file))
|
|
goto out_free;
|
|
|
|
- ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
|
|
- if (ret < 0)
|
|
- goto out_free;
|
|
+ if (!get_named_trigger_data(trigger_data)) {
|
|
|
|
- if (get_named_trigger_data(trigger_data))
|
|
- goto enable;
|
|
+ ret = create_actions(hist_data);
|
|
+ if (ret)
|
|
+ goto out_free;
|
|
|
|
- ret = create_actions(hist_data);
|
|
- if (ret)
|
|
- goto out_unreg;
|
|
+ if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
|
|
+ ret = save_hist_vars(hist_data);
|
|
+ if (ret)
|
|
+ goto out_free;
|
|
+ }
|
|
|
|
- if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
|
|
- ret = save_hist_vars(hist_data);
|
|
+ ret = tracing_map_init(hist_data->map);
|
|
if (ret)
|
|
- goto out_unreg;
|
|
+ goto out_free;
|
|
}
|
|
|
|
- ret = tracing_map_init(hist_data->map);
|
|
- if (ret)
|
|
- goto out_unreg;
|
|
-enable:
|
|
+ ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
|
|
+ if (ret < 0)
|
|
+ goto out_free;
|
|
+
|
|
ret = hist_trigger_enable(trigger_data, file);
|
|
if (ret)
|
|
goto out_unreg;
|
|
diff --git a/lib/rcuref.c b/lib/rcuref.c
|
|
index 5ec00a4a64d11c..185967b8508e86 100644
|
|
--- a/lib/rcuref.c
|
|
+++ b/lib/rcuref.c
|
|
@@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
|
|
/**
|
|
* rcuref_put_slowpath - Slowpath of __rcuref_put()
|
|
* @ref: Pointer to the reference count
|
|
+ * @cnt: The resulting value of the fastpath decrement
|
|
*
|
|
* Invoked when the reference count is outside of the valid zone.
|
|
*
|
|
@@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
|
|
* with a concurrent get()/put() pair. Caller is not allowed to
|
|
* deconstruct the protected object.
|
|
*/
|
|
-bool rcuref_put_slowpath(rcuref_t *ref)
|
|
+bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt)
|
|
{
|
|
- unsigned int cnt = atomic_read(&ref->refcnt);
|
|
-
|
|
/* Did this drop the last reference? */
|
|
if (likely(cnt == RCUREF_NOREF)) {
|
|
/*
|
|
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
|
|
index acb148759bd049..304ebb31cebba6 100644
|
|
--- a/net/bluetooth/l2cap_core.c
|
|
+++ b/net/bluetooth/l2cap_core.c
|
|
@@ -636,7 +636,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
|
|
test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
|
|
hci_conn_hold(conn->hcon);
|
|
|
|
- list_add(&chan->list, &conn->chan_l);
|
|
+ /* Append to the list since the order matters for ECRED */
|
|
+ list_add_tail(&chan->list, &conn->chan_l);
|
|
}
|
|
|
|
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
|
|
@@ -3774,7 +3775,11 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
|
|
{
|
|
struct l2cap_ecred_rsp_data *rsp = data;
|
|
|
|
- if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
|
|
+ /* Check if channel for outgoing connection or if it wasn't deferred
|
|
+ * since in those cases it must be skipped.
|
|
+ */
|
|
+ if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
|
|
+ !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
|
|
return;
|
|
|
|
/* Reset ident so only one response is sent */
|
|
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
|
|
index a1cfa75bbadb97..2a4958e995f2d9 100644
|
|
--- a/net/bridge/br_netfilter_hooks.c
|
|
+++ b/net/bridge/br_netfilter_hooks.c
|
|
@@ -366,9 +366,9 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
|
|
*/
|
|
static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
- struct net_device *dev = skb->dev, *br_indev;
|
|
- struct iphdr *iph = ip_hdr(skb);
|
|
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
|
|
+ struct net_device *dev = skb->dev, *br_indev;
|
|
+ const struct iphdr *iph = ip_hdr(skb);
|
|
struct rtable *rt;
|
|
int err;
|
|
|
|
@@ -386,7 +386,9 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
|
|
}
|
|
nf_bridge->in_prerouting = 0;
|
|
if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
|
|
- if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
|
|
+ err = ip_route_input(skb, iph->daddr, iph->saddr,
|
|
+ ip4h_dscp(iph), dev);
|
|
+ if (err) {
|
|
struct in_device *in_dev = __in_dev_get_rcu(dev);
|
|
|
|
/* If err equals -EHOSTUNREACH the error is due to a
|
|
diff --git a/net/core/gro.c b/net/core/gro.c
|
|
index 85d3f686ba539b..397cf598425034 100644
|
|
--- a/net/core/gro.c
|
|
+++ b/net/core/gro.c
|
|
@@ -627,6 +627,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
|
|
skb->pkt_type = PACKET_HOST;
|
|
|
|
skb->encapsulation = 0;
|
|
+ skb->ip_summed = CHECKSUM_NONE;
|
|
skb_shinfo(skb)->gso_type = 0;
|
|
skb_shinfo(skb)->gso_size = 0;
|
|
if (unlikely(skb->slow_gro)) {
|
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
|
index f0a9ef1aeaa298..21a83e26f004bb 100644
|
|
--- a/net/core/skbuff.c
|
|
+++ b/net/core/skbuff.c
|
|
@@ -5867,11 +5867,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
|
|
skb->offload_fwd_mark = 0;
|
|
skb->offload_l3_fwd_mark = 0;
|
|
#endif
|
|
+ ipvs_reset(skb);
|
|
|
|
if (!xnet)
|
|
return;
|
|
|
|
- ipvs_reset(skb);
|
|
skb->mark = 0;
|
|
skb_clear_tstamp(skb);
|
|
}
|
|
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
|
|
index 0b15272dd2d35b..a7fa17b6a12978 100644
|
|
--- a/net/core/sysctl_net_core.c
|
|
+++ b/net/core/sysctl_net_core.c
|
|
@@ -31,6 +31,7 @@ static int min_sndbuf = SOCK_MIN_SNDBUF;
|
|
static int min_rcvbuf = SOCK_MIN_RCVBUF;
|
|
static int max_skb_frags = MAX_SKB_FRAGS;
|
|
static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
|
|
+static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ;
|
|
|
|
static int net_msg_warn; /* Unused, but still a sysctl */
|
|
|
|
@@ -613,7 +614,7 @@ static struct ctl_table net_core_table[] = {
|
|
.maxlen = sizeof(unsigned int),
|
|
.mode = 0644,
|
|
.proc_handler = proc_dointvec_minmax,
|
|
- .extra1 = SYSCTL_ZERO,
|
|
+ .extra1 = &netdev_budget_usecs_min,
|
|
},
|
|
{
|
|
.procname = "fb_tunnels_only_for_init_net",
|
|
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
|
|
index a21d32b3ae6c36..94501bb30c431b 100644
|
|
--- a/net/ipv4/icmp.c
|
|
+++ b/net/ipv4/icmp.c
|
|
@@ -93,6 +93,9 @@
|
|
#include <net/ip_fib.h>
|
|
#include <net/l3mdev.h>
|
|
#include <net/addrconf.h>
|
|
+#include <net/inet_dscp.h>
|
|
+#define CREATE_TRACE_POINTS
|
|
+#include <trace/events/icmp.h>
|
|
|
|
/*
|
|
* Build xmit assembly blocks
|
|
@@ -481,13 +484,11 @@ static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
|
|
return route_lookup_dev;
|
|
}
|
|
|
|
-static struct rtable *icmp_route_lookup(struct net *net,
|
|
- struct flowi4 *fl4,
|
|
+static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
|
|
struct sk_buff *skb_in,
|
|
- const struct iphdr *iph,
|
|
- __be32 saddr, u8 tos, u32 mark,
|
|
- int type, int code,
|
|
- struct icmp_bxm *param)
|
|
+ const struct iphdr *iph, __be32 saddr,
|
|
+ dscp_t dscp, u32 mark, int type,
|
|
+ int code, struct icmp_bxm *param)
|
|
{
|
|
struct net_device *route_lookup_dev;
|
|
struct rtable *rt, *rt2;
|
|
@@ -500,7 +501,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
|
|
fl4->saddr = saddr;
|
|
fl4->flowi4_mark = mark;
|
|
fl4->flowi4_uid = sock_net_uid(net, NULL);
|
|
- fl4->flowi4_tos = RT_TOS(tos);
|
|
+ fl4->flowi4_tos = inet_dscp_to_dsfield(dscp);
|
|
fl4->flowi4_proto = IPPROTO_ICMP;
|
|
fl4->fl4_icmp_type = type;
|
|
fl4->fl4_icmp_code = code;
|
|
@@ -548,7 +549,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
|
|
orefdst = skb_in->_skb_refdst; /* save old refdst */
|
|
skb_dst_set(skb_in, NULL);
|
|
err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
|
|
- RT_TOS(tos), rt2->dst.dev);
|
|
+ dscp, rt2->dst.dev);
|
|
|
|
dst_release(&rt2->dst);
|
|
rt2 = skb_rtable(skb_in);
|
|
@@ -744,8 +745,9 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
|
|
ipc.opt = &icmp_param.replyopts.opt;
|
|
ipc.sockc.mark = mark;
|
|
|
|
- rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
|
|
- type, code, &icmp_param);
|
|
+ rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr,
|
|
+ inet_dsfield_to_dscp(tos), mark, type, code,
|
|
+ &icmp_param);
|
|
if (IS_ERR(rt))
|
|
goto out_unlock;
|
|
|
|
@@ -778,6 +780,8 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
|
|
if (!fl4.saddr)
|
|
fl4.saddr = htonl(INADDR_DUMMY);
|
|
|
|
+ trace_icmp_send(skb_in, type, code);
|
|
+
|
|
icmp_push_reply(sk, &icmp_param, &fl4, &ipc, &rt);
|
|
ende:
|
|
ip_rt_put(rt);
|
|
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
|
|
index a9e22a098872fa..b4c59708fc0956 100644
|
|
--- a/net/ipv4/ip_options.c
|
|
+++ b/net/ipv4/ip_options.c
|
|
@@ -617,7 +617,8 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
orefdst = skb->_skb_refdst;
|
|
skb_dst_set(skb, NULL);
|
|
- err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
|
|
+ err = ip_route_input(skb, nexthop, iph->saddr, ip4h_dscp(iph),
|
|
+ dev);
|
|
rt2 = skb_rtable(skb);
|
|
if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
|
|
skb_dst_drop(skb);
|
|
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
|
|
index cc2b608b1a8e78..ddb90b9057e756 100644
|
|
--- a/net/ipv4/tcp_minisocks.c
|
|
+++ b/net/ipv4/tcp_minisocks.c
|
|
@@ -754,12 +754,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
|
|
|
/* In sequence, PAWS is OK. */
|
|
|
|
- /* TODO: We probably should defer ts_recent change once
|
|
- * we take ownership of @req.
|
|
- */
|
|
- if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
|
|
- WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
|
|
-
|
|
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
|
|
/* Truncate SYN, it is out of window starting
|
|
at tcp_rsk(req)->rcv_isn + 1. */
|
|
@@ -808,6 +802,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
|
if (!child)
|
|
goto listen_overflow;
|
|
|
|
+ if (own_req && tmp_opt.saw_tstamp &&
|
|
+ !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
|
|
+ tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
|
|
+
|
|
if (own_req && rsk_drop_req(req)) {
|
|
reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
|
|
inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
|
|
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
|
|
index 97905d4174eca5..d645d022ce7745 100644
|
|
--- a/net/ipv6/ip6_tunnel.c
|
|
+++ b/net/ipv6/ip6_tunnel.c
|
|
@@ -628,8 +628,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|
}
|
|
skb_dst_set(skb2, &rt->dst);
|
|
} else {
|
|
- if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
|
|
- skb2->dev) ||
|
|
+ if (ip_route_input(skb2, eiph->daddr, eiph->saddr,
|
|
+ ip4h_dscp(eiph), skb2->dev) ||
|
|
skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
|
|
goto out;
|
|
}
|
|
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
|
|
index db3c19a42e1ca7..28fc7fae579723 100644
|
|
--- a/net/ipv6/rpl_iptunnel.c
|
|
+++ b/net/ipv6/rpl_iptunnel.c
|
|
@@ -125,7 +125,8 @@ static void rpl_destroy_state(struct lwtunnel_state *lwt)
|
|
}
|
|
|
|
static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
|
|
- const struct ipv6_rpl_sr_hdr *srh)
|
|
+ const struct ipv6_rpl_sr_hdr *srh,
|
|
+ struct dst_entry *cache_dst)
|
|
{
|
|
struct ipv6_rpl_sr_hdr *isrh, *csrh;
|
|
const struct ipv6hdr *oldhdr;
|
|
@@ -153,7 +154,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
|
|
|
|
hdrlen = ((csrh->hdrlen + 1) << 3);
|
|
|
|
- err = skb_cow_head(skb, hdrlen + skb->mac_len);
|
|
+ err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
|
|
if (unlikely(err)) {
|
|
kfree(buf);
|
|
return err;
|
|
@@ -186,7 +187,8 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
|
|
return 0;
|
|
}
|
|
|
|
-static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt)
|
|
+static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt,
|
|
+ struct dst_entry *cache_dst)
|
|
{
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
struct rpl_iptunnel_encap *tinfo;
|
|
@@ -196,7 +198,7 @@ static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt)
|
|
|
|
tinfo = rpl_encap_lwtunnel(dst->lwtstate);
|
|
|
|
- return rpl_do_srh_inline(skb, rlwt, tinfo->srh);
|
|
+ return rpl_do_srh_inline(skb, rlwt, tinfo->srh, cache_dst);
|
|
}
|
|
|
|
static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
@@ -208,14 +210,14 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
|
|
rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
|
|
|
|
- err = rpl_do_srh(skb, rlwt);
|
|
- if (unlikely(err))
|
|
- goto drop;
|
|
-
|
|
local_bh_disable();
|
|
dst = dst_cache_get(&rlwt->cache);
|
|
local_bh_enable();
|
|
|
|
+ err = rpl_do_srh(skb, rlwt, dst);
|
|
+ if (unlikely(err))
|
|
+ goto drop;
|
|
+
|
|
if (unlikely(!dst)) {
|
|
struct ipv6hdr *hdr = ipv6_hdr(skb);
|
|
struct flowi6 fl6;
|
|
@@ -237,15 +239,15 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
local_bh_disable();
|
|
dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
|
|
local_bh_enable();
|
|
+
|
|
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
|
+ if (unlikely(err))
|
|
+ goto drop;
|
|
}
|
|
|
|
skb_dst_drop(skb);
|
|
skb_dst_set(skb, dst);
|
|
|
|
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
|
- if (unlikely(err))
|
|
- goto drop;
|
|
-
|
|
return dst_output(net, sk, skb);
|
|
|
|
drop:
|
|
@@ -257,34 +259,46 @@ static int rpl_input(struct sk_buff *skb)
|
|
{
|
|
struct dst_entry *orig_dst = skb_dst(skb);
|
|
struct dst_entry *dst = NULL;
|
|
+ struct lwtunnel_state *lwtst;
|
|
struct rpl_lwt *rlwt;
|
|
int err;
|
|
|
|
- rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
|
|
+ /* We cannot dereference "orig_dst" once ip6_route_input() or
|
|
+ * skb_dst_drop() is called. However, in order to detect a dst loop, we
|
|
+ * need the address of its lwtstate. So, save the address of lwtstate
|
|
+ * now and use it later as a comparison.
|
|
+ */
|
|
+ lwtst = orig_dst->lwtstate;
|
|
|
|
- err = rpl_do_srh(skb, rlwt);
|
|
- if (unlikely(err))
|
|
- goto drop;
|
|
+ rlwt = rpl_lwt_lwtunnel(lwtst);
|
|
|
|
local_bh_disable();
|
|
dst = dst_cache_get(&rlwt->cache);
|
|
+ local_bh_enable();
|
|
+
|
|
+ err = rpl_do_srh(skb, rlwt, dst);
|
|
+ if (unlikely(err))
|
|
+ goto drop;
|
|
|
|
if (!dst) {
|
|
ip6_route_input(skb);
|
|
dst = skb_dst(skb);
|
|
- if (!dst->error) {
|
|
+
|
|
+ /* cache only if we don't create a dst reference loop */
|
|
+ if (!dst->error && lwtst != dst->lwtstate) {
|
|
+ local_bh_disable();
|
|
dst_cache_set_ip6(&rlwt->cache, dst,
|
|
&ipv6_hdr(skb)->saddr);
|
|
+ local_bh_enable();
|
|
}
|
|
+
|
|
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
|
+ if (unlikely(err))
|
|
+ goto drop;
|
|
} else {
|
|
skb_dst_drop(skb);
|
|
skb_dst_set(skb, dst);
|
|
}
|
|
- local_bh_enable();
|
|
-
|
|
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
|
- if (unlikely(err))
|
|
- goto drop;
|
|
|
|
return dst_input(skb);
|
|
|
|
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
|
|
index 098632adc9b5af..c44e4c0824e0d8 100644
|
|
--- a/net/ipv6/seg6_iptunnel.c
|
|
+++ b/net/ipv6/seg6_iptunnel.c
|
|
@@ -124,8 +124,8 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb,
|
|
return flowlabel;
|
|
}
|
|
|
|
-/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
|
|
-int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
|
|
+static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
|
|
+ int proto, struct dst_entry *cache_dst)
|
|
{
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
struct net *net = dev_net(dst->dev);
|
|
@@ -137,7 +137,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
|
|
hdrlen = (osrh->hdrlen + 1) << 3;
|
|
tot_len = hdrlen + sizeof(*hdr);
|
|
|
|
- err = skb_cow_head(skb, tot_len + skb->mac_len);
|
|
+ err = skb_cow_head(skb, tot_len + dst_dev_overhead(cache_dst, skb));
|
|
if (unlikely(err))
|
|
return err;
|
|
|
|
@@ -197,11 +197,18 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
|
|
|
|
return 0;
|
|
}
|
|
+
|
|
+/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
|
|
+int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
|
|
+{
|
|
+ return __seg6_do_srh_encap(skb, osrh, proto, NULL);
|
|
+}
|
|
EXPORT_SYMBOL_GPL(seg6_do_srh_encap);
|
|
|
|
/* encapsulate an IPv6 packet within an outer IPv6 header with reduced SRH */
|
|
static int seg6_do_srh_encap_red(struct sk_buff *skb,
|
|
- struct ipv6_sr_hdr *osrh, int proto)
|
|
+ struct ipv6_sr_hdr *osrh, int proto,
|
|
+ struct dst_entry *cache_dst)
|
|
{
|
|
__u8 first_seg = osrh->first_segment;
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
@@ -230,7 +237,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
|
|
|
|
tot_len = red_hdrlen + sizeof(struct ipv6hdr);
|
|
|
|
- err = skb_cow_head(skb, tot_len + skb->mac_len);
|
|
+ err = skb_cow_head(skb, tot_len + dst_dev_overhead(cache_dst, skb));
|
|
if (unlikely(err))
|
|
return err;
|
|
|
|
@@ -317,8 +324,8 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
|
|
return 0;
|
|
}
|
|
|
|
-/* insert an SRH within an IPv6 packet, just after the IPv6 header */
|
|
-int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
|
|
+static int __seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
|
|
+ struct dst_entry *cache_dst)
|
|
{
|
|
struct ipv6hdr *hdr, *oldhdr;
|
|
struct ipv6_sr_hdr *isrh;
|
|
@@ -326,7 +333,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
|
|
|
|
hdrlen = (osrh->hdrlen + 1) << 3;
|
|
|
|
- err = skb_cow_head(skb, hdrlen + skb->mac_len);
|
|
+ err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
|
|
if (unlikely(err))
|
|
return err;
|
|
|
|
@@ -369,9 +376,8 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
|
|
|
|
return 0;
|
|
}
|
|
-EXPORT_SYMBOL_GPL(seg6_do_srh_inline);
|
|
|
|
-static int seg6_do_srh(struct sk_buff *skb)
|
|
+static int seg6_do_srh(struct sk_buff *skb, struct dst_entry *cache_dst)
|
|
{
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
struct seg6_iptunnel_encap *tinfo;
|
|
@@ -384,7 +390,7 @@ static int seg6_do_srh(struct sk_buff *skb)
|
|
if (skb->protocol != htons(ETH_P_IPV6))
|
|
return -EINVAL;
|
|
|
|
- err = seg6_do_srh_inline(skb, tinfo->srh);
|
|
+ err = __seg6_do_srh_inline(skb, tinfo->srh, cache_dst);
|
|
if (err)
|
|
return err;
|
|
break;
|
|
@@ -402,9 +408,11 @@ static int seg6_do_srh(struct sk_buff *skb)
|
|
return -EINVAL;
|
|
|
|
if (tinfo->mode == SEG6_IPTUN_MODE_ENCAP)
|
|
- err = seg6_do_srh_encap(skb, tinfo->srh, proto);
|
|
+ err = __seg6_do_srh_encap(skb, tinfo->srh,
|
|
+ proto, cache_dst);
|
|
else
|
|
- err = seg6_do_srh_encap_red(skb, tinfo->srh, proto);
|
|
+ err = seg6_do_srh_encap_red(skb, tinfo->srh,
|
|
+ proto, cache_dst);
|
|
|
|
if (err)
|
|
return err;
|
|
@@ -425,11 +433,13 @@ static int seg6_do_srh(struct sk_buff *skb)
|
|
skb_push(skb, skb->mac_len);
|
|
|
|
if (tinfo->mode == SEG6_IPTUN_MODE_L2ENCAP)
|
|
- err = seg6_do_srh_encap(skb, tinfo->srh,
|
|
- IPPROTO_ETHERNET);
|
|
+ err = __seg6_do_srh_encap(skb, tinfo->srh,
|
|
+ IPPROTO_ETHERNET,
|
|
+ cache_dst);
|
|
else
|
|
err = seg6_do_srh_encap_red(skb, tinfo->srh,
|
|
- IPPROTO_ETHERNET);
|
|
+ IPPROTO_ETHERNET,
|
|
+ cache_dst);
|
|
|
|
if (err)
|
|
return err;
|
|
@@ -444,6 +454,13 @@ static int seg6_do_srh(struct sk_buff *skb)
|
|
return 0;
|
|
}
|
|
|
|
+/* insert an SRH within an IPv6 packet, just after the IPv6 header */
|
|
+int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
|
|
+{
|
|
+ return __seg6_do_srh_inline(skb, osrh, NULL);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(seg6_do_srh_inline);
|
|
+
|
|
static int seg6_input_finish(struct net *net, struct sock *sk,
|
|
struct sk_buff *skb)
|
|
{
|
|
@@ -455,34 +472,46 @@ static int seg6_input_core(struct net *net, struct sock *sk,
|
|
{
|
|
struct dst_entry *orig_dst = skb_dst(skb);
|
|
struct dst_entry *dst = NULL;
|
|
+ struct lwtunnel_state *lwtst;
|
|
struct seg6_lwt *slwt;
|
|
int err;
|
|
|
|
- err = seg6_do_srh(skb);
|
|
- if (unlikely(err))
|
|
- goto drop;
|
|
+ /* We cannot dereference "orig_dst" once ip6_route_input() or
|
|
+ * skb_dst_drop() is called. However, in order to detect a dst loop, we
|
|
+ * need the address of its lwtstate. So, save the address of lwtstate
|
|
+ * now and use it later as a comparison.
|
|
+ */
|
|
+ lwtst = orig_dst->lwtstate;
|
|
|
|
- slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
|
|
+ slwt = seg6_lwt_lwtunnel(lwtst);
|
|
|
|
local_bh_disable();
|
|
dst = dst_cache_get(&slwt->cache);
|
|
+ local_bh_enable();
|
|
+
|
|
+ err = seg6_do_srh(skb, dst);
|
|
+ if (unlikely(err))
|
|
+ goto drop;
|
|
|
|
if (!dst) {
|
|
ip6_route_input(skb);
|
|
dst = skb_dst(skb);
|
|
- if (!dst->error) {
|
|
+
|
|
+ /* cache only if we don't create a dst reference loop */
|
|
+ if (!dst->error && lwtst != dst->lwtstate) {
|
|
+ local_bh_disable();
|
|
dst_cache_set_ip6(&slwt->cache, dst,
|
|
&ipv6_hdr(skb)->saddr);
|
|
+ local_bh_enable();
|
|
}
|
|
+
|
|
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
|
+ if (unlikely(err))
|
|
+ goto drop;
|
|
} else {
|
|
skb_dst_drop(skb);
|
|
skb_dst_set(skb, dst);
|
|
}
|
|
- local_bh_enable();
|
|
-
|
|
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
|
- if (unlikely(err))
|
|
- goto drop;
|
|
|
|
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
|
|
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
|
|
@@ -528,16 +557,16 @@ static int seg6_output_core(struct net *net, struct sock *sk,
|
|
struct seg6_lwt *slwt;
|
|
int err;
|
|
|
|
- err = seg6_do_srh(skb);
|
|
- if (unlikely(err))
|
|
- goto drop;
|
|
-
|
|
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
|
|
|
|
local_bh_disable();
|
|
dst = dst_cache_get(&slwt->cache);
|
|
local_bh_enable();
|
|
|
|
+ err = seg6_do_srh(skb, dst);
|
|
+ if (unlikely(err))
|
|
+ goto drop;
|
|
+
|
|
if (unlikely(!dst)) {
|
|
struct ipv6hdr *hdr = ipv6_hdr(skb);
|
|
struct flowi6 fl6;
|
|
@@ -559,15 +588,15 @@ static int seg6_output_core(struct net *net, struct sock *sk,
|
|
local_bh_disable();
|
|
dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
|
|
local_bh_enable();
|
|
+
|
|
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
|
+ if (unlikely(err))
|
|
+ goto drop;
|
|
}
|
|
|
|
skb_dst_drop(skb);
|
|
skb_dst_set(skb, dst);
|
|
|
|
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
|
- if (unlikely(err))
|
|
- goto drop;
|
|
-
|
|
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
|
|
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
|
|
NULL, skb_dst(skb)->dev, dst_output);
|
|
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
|
|
index 2b63c5492eedc2..5f16e2fa2de67a 100644
|
|
--- a/net/mptcp/pm_netlink.c
|
|
+++ b/net/mptcp/pm_netlink.c
|
|
@@ -1559,11 +1559,6 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
|
|
if (mptcp_pm_is_userspace(msk))
|
|
goto next;
|
|
|
|
- if (list_empty(&msk->conn_list)) {
|
|
- mptcp_pm_remove_anno_addr(msk, addr, false);
|
|
- goto next;
|
|
- }
|
|
-
|
|
lock_sock(sk);
|
|
remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
|
|
mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
|
|
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
|
|
index 282ecc8bf75e80..b3eeeb948b6132 100644
|
|
--- a/net/mptcp/subflow.c
|
|
+++ b/net/mptcp/subflow.c
|
|
@@ -1109,7 +1109,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
|
|
if (data_len == 0) {
|
|
pr_debug("infinite mapping received\n");
|
|
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
|
|
- subflow->map_data_len = 0;
|
|
return MAPPING_INVALID;
|
|
}
|
|
|
|
@@ -1251,18 +1250,6 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
|
|
mptcp_schedule_work(sk);
|
|
}
|
|
|
|
-static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
|
|
-{
|
|
- struct mptcp_sock *msk = mptcp_sk(subflow->conn);
|
|
-
|
|
- if (subflow->mp_join)
|
|
- return false;
|
|
- else if (READ_ONCE(msk->csum_enabled))
|
|
- return !subflow->valid_csum_seen;
|
|
- else
|
|
- return READ_ONCE(msk->allow_infinite_fallback);
|
|
-}
|
|
-
|
|
static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
|
|
{
|
|
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
|
@@ -1358,7 +1345,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
|
|
return true;
|
|
}
|
|
|
|
- if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
|
|
+ if (!READ_ONCE(msk->allow_infinite_fallback)) {
|
|
/* fatal protocol error, close the socket.
|
|
* subflow_error_report() will introduce the appropriate barriers
|
|
*/
|
|
diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
|
|
index 085e7892d31040..b1536da2246b82 100644
|
|
--- a/net/rxrpc/rxperf.c
|
|
+++ b/net/rxrpc/rxperf.c
|
|
@@ -478,6 +478,18 @@ static int rxperf_deliver_request(struct rxperf_call *call)
|
|
call->unmarshal++;
|
|
fallthrough;
|
|
case 2:
|
|
+ ret = rxperf_extract_data(call, true);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ /* Deal with the terminal magic cookie. */
|
|
+ call->iov_len = 4;
|
|
+ call->kvec[0].iov_len = call->iov_len;
|
|
+ call->kvec[0].iov_base = call->tmp;
|
|
+ iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
|
|
+ call->unmarshal++;
|
|
+ fallthrough;
|
|
+ case 3:
|
|
ret = rxperf_extract_data(call, false);
|
|
if (ret < 0)
|
|
return ret;
|
|
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
|
|
index 3298da2e37e43d..cb6a6bc9fea77d 100644
|
|
--- a/net/sunrpc/cache.c
|
|
+++ b/net/sunrpc/cache.c
|
|
@@ -1675,12 +1675,14 @@ static void remove_cache_proc_entries(struct cache_detail *cd)
|
|
}
|
|
}
|
|
|
|
-#ifdef CONFIG_PROC_FS
|
|
static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
|
|
{
|
|
struct proc_dir_entry *p;
|
|
struct sunrpc_net *sn;
|
|
|
|
+ if (!IS_ENABLED(CONFIG_PROC_FS))
|
|
+ return 0;
|
|
+
|
|
sn = net_generic(net, sunrpc_net_id);
|
|
cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
|
|
if (cd->procfs == NULL)
|
|
@@ -1708,12 +1710,6 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
|
|
remove_cache_proc_entries(cd);
|
|
return -ENOMEM;
|
|
}
|
|
-#else /* CONFIG_PROC_FS */
|
|
-static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
|
|
-{
|
|
- return 0;
|
|
-}
|
|
-#endif
|
|
|
|
void __init cache_initialize(void)
|
|
{
|
|
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
|
|
index cef623ea150609..9b45fbdc90cabe 100644
|
|
--- a/net/sunrpc/sched.c
|
|
+++ b/net/sunrpc/sched.c
|
|
@@ -864,8 +864,6 @@ void rpc_signal_task(struct rpc_task *task)
|
|
if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
|
|
return;
|
|
trace_rpc_task_signalled(task, task->tk_action);
|
|
- set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
|
|
- smp_mb__after_atomic();
|
|
queue = READ_ONCE(task->tk_waitqueue);
|
|
if (queue)
|
|
rpc_wake_up_queued_task(queue, task);
|
|
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
|
|
index 1c4bc8234ea875..29df05879c8e95 100644
|
|
--- a/net/sunrpc/xprtsock.c
|
|
+++ b/net/sunrpc/xprtsock.c
|
|
@@ -2561,7 +2561,15 @@ static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid)
|
|
struct sock_xprt *lower_transport =
|
|
container_of(lower_xprt, struct sock_xprt, xprt);
|
|
|
|
- lower_transport->xprt_err = status ? -EACCES : 0;
|
|
+ switch (status) {
|
|
+ case 0:
|
|
+ case -EACCES:
|
|
+ case -ETIMEDOUT:
|
|
+ lower_transport->xprt_err = status;
|
|
+ break;
|
|
+ default:
|
|
+ lower_transport->xprt_err = -EACCES;
|
|
+ }
|
|
complete(&lower_transport->handshake_done);
|
|
xprt_put(lower_xprt);
|
|
}
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index 75162e5f712b40..822bd9a00892c3 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -10084,23 +10084,27 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
|
|
- SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650P", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
|
|
- SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
|
|
- SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
- SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
- SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601V", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601VV/VU/VJ/VQ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G614JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2),
|
|
+ SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS G513PI/PU/PV", ALC287_FIXUP_CS35L41_I2C_2),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1503, "ASUS G733PY/PZ/PZV/PYV", ALC287_FIXUP_CS35L41_I2C_2),
|
|
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
|
|
- SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA", ALC287_FIXUP_CS35L41_I2C_2),
|
|
- SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301VV/VQ/VU/VJ/VA/VC/VE/VVC/VQC/VUC/VJC/VEC/VCC", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
|
|
- SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZI/ZJ/ZQ/ZU/ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
|
|
SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
|
|
SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
|
|
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
|
|
SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
|
|
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
|
|
- SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally RC71L_RC71L", ALC294_FIXUP_ASUS_ALLY),
|
|
+ SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally NR2301L/X", ALC294_FIXUP_ASUS_ALLY),
|
|
SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
|
|
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x18d3, "ASUS UM3504DA", ALC294_FIXUP_CS35L41_I2C_2),
|
|
@@ -10111,7 +10115,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
|
|
SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
|
|
- SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
|
|
SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
|
|
SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
|
|
@@ -10125,10 +10128,13 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2),
|
|
SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
|
|
SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
|
|
- SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
- SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
|
|
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
|
|
- SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1ccf, "ASUS G814JU/JV/JI", ALC245_FIXUP_CS35L41_SPI_2),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1cdf, "ASUS G814JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1cef, "ASUS G834JY/JZ/JI/JG", ALC285_FIXUP_ASUS_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS G713PI/PU/PV/PVN", ALC287_FIXUP_CS35L41_I2C_2),
|
|
SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
|
|
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
|
|
SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2),
|
|
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
|
|
index 0bd9ba5a11b4e5..43792e175d75f1 100644
|
|
--- a/sound/soc/codecs/es8328.c
|
|
+++ b/sound/soc/codecs/es8328.c
|
|
@@ -234,7 +234,6 @@ static const struct snd_kcontrol_new es8328_right_line_controls =
|
|
|
|
/* Left Mixer */
|
|
static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
|
|
- SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0),
|
|
SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0),
|
|
SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0),
|
|
SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0),
|
|
@@ -244,7 +243,6 @@ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
|
|
static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
|
|
SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0),
|
|
SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0),
|
|
- SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0),
|
|
SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0),
|
|
};
|
|
|
|
@@ -337,10 +335,10 @@ static const struct snd_soc_dapm_widget es8328_dapm_widgets[] = {
|
|
SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8328_DACPOWER,
|
|
ES8328_DACPOWER_LDAC_OFF, 1),
|
|
|
|
- SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0,
|
|
+ SND_SOC_DAPM_MIXER("Left Mixer", ES8328_DACCONTROL17, 7, 0,
|
|
&es8328_left_mixer_controls[0],
|
|
ARRAY_SIZE(es8328_left_mixer_controls)),
|
|
- SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0,
|
|
+ SND_SOC_DAPM_MIXER("Right Mixer", ES8328_DACCONTROL20, 7, 0,
|
|
&es8328_right_mixer_controls[0],
|
|
ARRAY_SIZE(es8328_right_mixer_controls)),
|
|
|
|
@@ -419,19 +417,14 @@ static const struct snd_soc_dapm_route es8328_dapm_routes[] = {
|
|
{ "Right Line Mux", "PGA", "Right PGA Mux" },
|
|
{ "Right Line Mux", "Differential", "Differential Mux" },
|
|
|
|
- { "Left Out 1", NULL, "Left DAC" },
|
|
- { "Right Out 1", NULL, "Right DAC" },
|
|
- { "Left Out 2", NULL, "Left DAC" },
|
|
- { "Right Out 2", NULL, "Right DAC" },
|
|
-
|
|
- { "Left Mixer", "Playback Switch", "Left DAC" },
|
|
+ { "Left Mixer", NULL, "Left DAC" },
|
|
{ "Left Mixer", "Left Bypass Switch", "Left Line Mux" },
|
|
{ "Left Mixer", "Right Playback Switch", "Right DAC" },
|
|
{ "Left Mixer", "Right Bypass Switch", "Right Line Mux" },
|
|
|
|
{ "Right Mixer", "Left Playback Switch", "Left DAC" },
|
|
{ "Right Mixer", "Left Bypass Switch", "Left Line Mux" },
|
|
- { "Right Mixer", "Playback Switch", "Right DAC" },
|
|
+ { "Right Mixer", NULL, "Right DAC" },
|
|
{ "Right Mixer", "Right Bypass Switch", "Right Line Mux" },
|
|
|
|
{ "DAC DIG", NULL, "DAC STM" },
|
|
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
|
|
index 6b0993258e039b..6d861046b582b5 100644
|
|
--- a/sound/usb/midi.c
|
|
+++ b/sound/usb/midi.c
|
|
@@ -1145,7 +1145,7 @@ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
|
|
{
|
|
struct usbmidi_out_port *port = substream->runtime->private_data;
|
|
|
|
- cancel_work_sync(&port->ep->work);
|
|
+ flush_work(&port->ep->work);
|
|
return substream_open(substream, 0, 0);
|
|
}
|
|
|
|
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
|
|
index 93d9ed8983dfce..d9d4c5922a50bb 100644
|
|
--- a/sound/usb/quirks.c
|
|
+++ b/sound/usb/quirks.c
|
|
@@ -1775,6 +1775,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
|
|
case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
|
|
subs->stream_offset_adj = 2;
|
|
break;
|
|
+ case USB_ID(0x2b73, 0x000a): /* Pioneer DJM-900NXS2 */
|
|
case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */
|
|
pioneer_djm_set_format_quirk(subs, 0x0082);
|
|
break;
|
|
diff --git a/tools/testing/selftests/rseq/rseq-riscv-bits.h b/tools/testing/selftests/rseq/rseq-riscv-bits.h
|
|
index de31a0143139b7..f02f411d550d18 100644
|
|
--- a/tools/testing/selftests/rseq/rseq-riscv-bits.h
|
|
+++ b/tools/testing/selftests/rseq/rseq-riscv-bits.h
|
|
@@ -243,7 +243,7 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i
|
|
#ifdef RSEQ_COMPARE_TWICE
|
|
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
|
|
#endif
|
|
- RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, 3)
|
|
+ RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, 3)
|
|
RSEQ_INJECT_ASM(4)
|
|
RSEQ_ASM_DEFINE_ABORT(4, abort)
|
|
: /* gcc asm goto does not allow outputs */
|
|
@@ -251,8 +251,8 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i
|
|
[current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
|
|
[rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
|
|
[ptr] "r" (ptr),
|
|
- [off] "er" (off),
|
|
- [inc] "er" (inc)
|
|
+ [off] "r" (off),
|
|
+ [inc] "r" (inc)
|
|
RSEQ_INJECT_INPUT
|
|
: "memory", RSEQ_ASM_TMP_REG_1
|
|
RSEQ_INJECT_CLOBBER
|
|
diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h
|
|
index 37e598d0a365e2..67d544aaa9a3b0 100644
|
|
--- a/tools/testing/selftests/rseq/rseq-riscv.h
|
|
+++ b/tools/testing/selftests/rseq/rseq-riscv.h
|
|
@@ -158,7 +158,7 @@ do { \
|
|
"bnez " RSEQ_ASM_TMP_REG_1 ", 222b\n" \
|
|
"333:\n"
|
|
|
|
-#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, post_commit_label) \
|
|
+#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, post_commit_label) \
|
|
"mv " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(ptr) "]\n" \
|
|
RSEQ_ASM_OP_R_ADD(off) \
|
|
REG_L RSEQ_ASM_TMP_REG_1 ", 0(" RSEQ_ASM_TMP_REG_1 ")\n" \
|
|
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
|
|
index a985e57954820e..198cdf75c837bb 100644
|
|
--- a/tools/tracing/rtla/src/timerlat_hist.c
|
|
+++ b/tools/tracing/rtla/src/timerlat_hist.c
|
|
@@ -905,7 +905,7 @@ timerlat_hist_apply_config(struct osnoise_tool *tool, struct timerlat_hist_param
|
|
* On kernels without support, user threads will have already failed
|
|
* on missing timerlat_fd, and kernel threads do not need it.
|
|
*/
|
|
- retval = osnoise_set_workload(tool->context, params->kernel_workload);
|
|
+ retval = osnoise_set_workload(tool->context, !params->user_hist);
|
|
if (retval < -1) {
|
|
err_msg("Failed to set OSNOISE_WORKLOAD option\n");
|
|
goto out_err;
|
|
diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
|
|
index 1fed4c8d8520f9..7212855d336417 100644
|
|
--- a/tools/tracing/rtla/src/timerlat_top.c
|
|
+++ b/tools/tracing/rtla/src/timerlat_top.c
|
|
@@ -684,7 +684,7 @@ timerlat_top_apply_config(struct osnoise_tool *top, struct timerlat_top_params *
|
|
* On kernels without support, user threads will have already failed
|
|
* on missing timerlat_fd, and kernel threads do not need it.
|
|
*/
|
|
- retval = osnoise_set_workload(top->context, params->kernel_workload);
|
|
+ retval = osnoise_set_workload(top->context, !params->user_top);
|
|
if (retval < -1) {
|
|
err_msg("Failed to set OSNOISE_WORKLOAD option\n");
|
|
goto out_err;
|