mirror of
https://github.com/armbian/build.git
synced 2025-08-12 06:06:58 +02:00
* Attached cubox-i to stock kernel. Tested. Sadly ZRAM has some issues, so it must ramain disabled * more imx6 updates for cubox/udoo * Enable HDMI on Cubox-i Signed-off-by: Igor Pecovnik <igor.pecovnik@gmail.com>
2427 lines
79 KiB
Diff
2427 lines
79 KiB
Diff
diff --git a/Documentation/devicetree/bindings/eeprom/eeprom.txt b/Documentation/devicetree/bindings/eeprom/eeprom.txt
|
|
index 735bc94444bb..4dcce8ee5cee 100644
|
|
--- a/Documentation/devicetree/bindings/eeprom/eeprom.txt
|
|
+++ b/Documentation/devicetree/bindings/eeprom/eeprom.txt
|
|
@@ -6,7 +6,8 @@ Required properties:
|
|
|
|
"atmel,24c00", "atmel,24c01", "atmel,24c02", "atmel,24c04",
|
|
"atmel,24c08", "atmel,24c16", "atmel,24c32", "atmel,24c64",
|
|
- "atmel,24c128", "atmel,24c256", "atmel,24c512", "atmel,24c1024"
|
|
+ "atmel,24c128", "atmel,24c256", "atmel,24c512", "atmel,24c1024",
|
|
+ "atmel,24c2048"
|
|
|
|
"catalyst,24c32"
|
|
|
|
@@ -17,7 +18,7 @@ Required properties:
|
|
If there is no specific driver for <manufacturer>, a generic
|
|
driver based on <type> is selected. Possible types are:
|
|
"24c00", "24c01", "24c02", "24c04", "24c08", "24c16", "24c32", "24c64",
|
|
- "24c128", "24c256", "24c512", "24c1024", "spd"
|
|
+ "24c128", "24c256", "24c512", "24c1024", "24c2048", "spd"
|
|
|
|
- reg : the I2C address of the EEPROM
|
|
|
|
diff --git a/Makefile b/Makefile
|
|
index 2b8434aaeece..a452ead13b1e 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 4
|
|
PATCHLEVEL = 9
|
|
-SUBLEVEL = 158
|
|
+SUBLEVEL = 159
|
|
EXTRAVERSION =
|
|
NAME = Roaring Lionus
|
|
|
|
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
|
|
index 06377400dc09..469642801a68 100644
|
|
--- a/arch/alpha/include/asm/irq.h
|
|
+++ b/arch/alpha/include/asm/irq.h
|
|
@@ -55,15 +55,15 @@
|
|
|
|
#elif defined(CONFIG_ALPHA_DP264) || \
|
|
defined(CONFIG_ALPHA_LYNX) || \
|
|
- defined(CONFIG_ALPHA_SHARK) || \
|
|
- defined(CONFIG_ALPHA_EIGER)
|
|
+ defined(CONFIG_ALPHA_SHARK)
|
|
# define NR_IRQS 64
|
|
|
|
#elif defined(CONFIG_ALPHA_TITAN)
|
|
#define NR_IRQS 80
|
|
|
|
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
|
|
- defined(CONFIG_ALPHA_TAKARA)
|
|
+ defined(CONFIG_ALPHA_TAKARA) || \
|
|
+ defined(CONFIG_ALPHA_EIGER)
|
|
# define NR_IRQS 128
|
|
|
|
#elif defined(CONFIG_ALPHA_WILDFIRE)
|
|
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
|
|
index 83e9eee57a55..f70663127aad 100644
|
|
--- a/arch/alpha/mm/fault.c
|
|
+++ b/arch/alpha/mm/fault.c
|
|
@@ -77,7 +77,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
|
|
/* Macro for exception fixup code to access integer registers. */
|
|
#define dpf_reg(r) \
|
|
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
|
|
- (r) <= 18 ? (r)+8 : (r)-10])
|
|
+ (r) <= 18 ? (r)+10 : (r)-10])
|
|
|
|
asmlinkage void
|
|
do_page_fault(unsigned long address, unsigned long mmcsr,
|
|
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
|
|
index 78492a0bbbab..3c58ec707ea9 100644
|
|
--- a/arch/arm/boot/dts/da850-evm.dts
|
|
+++ b/arch/arm/boot/dts/da850-evm.dts
|
|
@@ -156,7 +156,7 @@
|
|
|
|
sound {
|
|
compatible = "simple-audio-card";
|
|
- simple-audio-card,name = "DA850/OMAP-L138 EVM";
|
|
+ simple-audio-card,name = "DA850-OMAPL138 EVM";
|
|
simple-audio-card,widgets =
|
|
"Line", "Line In",
|
|
"Line", "Line Out";
|
|
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
|
|
index 7b8ab21fed6c..920e64cdb673 100644
|
|
--- a/arch/arm/boot/dts/da850-lcdk.dts
|
|
+++ b/arch/arm/boot/dts/da850-lcdk.dts
|
|
@@ -26,7 +26,7 @@
|
|
|
|
sound {
|
|
compatible = "simple-audio-card";
|
|
- simple-audio-card,name = "DA850/OMAP-L138 LCDK";
|
|
+ simple-audio-card,name = "DA850-OMAPL138 LCDK";
|
|
simple-audio-card,widgets =
|
|
"Line", "Line In",
|
|
"Line", "Line Out";
|
|
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
|
|
index d8fca9db46d0..dddbc0d03da5 100644
|
|
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
|
|
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
|
|
@@ -35,8 +35,8 @@
|
|
compatible = "gpio-fan";
|
|
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
|
|
pinctrl-names = "default";
|
|
- gpios = <&gpio1 14 GPIO_ACTIVE_LOW
|
|
- &gpio1 13 GPIO_ACTIVE_LOW>;
|
|
+ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
|
|
+ &gpio1 13 GPIO_ACTIVE_HIGH>;
|
|
gpio-fan,speed-map = <0 0
|
|
3000 1
|
|
6000 2>;
|
|
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
|
|
index e616f61f859d..7d727506096f 100644
|
|
--- a/arch/arm/include/asm/assembler.h
|
|
+++ b/arch/arm/include/asm/assembler.h
|
|
@@ -465,6 +465,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
|
#endif
|
|
.endm
|
|
|
|
+ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
|
|
+#ifdef CONFIG_CPU_SPECTRE
|
|
+ sub \tmp, \limit, #1
|
|
+ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
|
|
+ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
|
|
+ subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
|
|
+ movlo \addr, #0 @ if (tmp < 0) addr = NULL
|
|
+ csdb
|
|
+#endif
|
|
+ .endm
|
|
+
|
|
.macro uaccess_disable, tmp, isb=1
|
|
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
|
/*
|
|
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
|
|
index c55db1e22f0c..b9356dbfded0 100644
|
|
--- a/arch/arm/include/asm/cputype.h
|
|
+++ b/arch/arm/include/asm/cputype.h
|
|
@@ -106,6 +106,7 @@
|
|
#define ARM_CPU_PART_SCORPION 0x510002d0
|
|
|
|
extern unsigned int processor_id;
|
|
+struct proc_info_list *lookup_processor(u32 midr);
|
|
|
|
#ifdef CONFIG_CPU_CP15
|
|
#define read_cpuid(reg) \
|
|
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
|
|
index f379f5f849a9..1bfcc3bcfc6d 100644
|
|
--- a/arch/arm/include/asm/proc-fns.h
|
|
+++ b/arch/arm/include/asm/proc-fns.h
|
|
@@ -23,7 +23,7 @@ struct mm_struct;
|
|
/*
|
|
* Don't change this structure - ASM code relies on it.
|
|
*/
|
|
-extern struct processor {
|
|
+struct processor {
|
|
/* MISC
|
|
* get data abort address/flags
|
|
*/
|
|
@@ -79,9 +79,13 @@ extern struct processor {
|
|
unsigned int suspend_size;
|
|
void (*do_suspend)(void *);
|
|
void (*do_resume)(void *);
|
|
-} processor;
|
|
+};
|
|
|
|
#ifndef MULTI_CPU
|
|
+static inline void init_proc_vtable(const struct processor *p)
|
|
+{
|
|
+}
|
|
+
|
|
extern void cpu_proc_init(void);
|
|
extern void cpu_proc_fin(void);
|
|
extern int cpu_do_idle(void);
|
|
@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
|
|
extern void cpu_do_suspend(void *);
|
|
extern void cpu_do_resume(void *);
|
|
#else
|
|
-#define cpu_proc_init processor._proc_init
|
|
-#define cpu_proc_fin processor._proc_fin
|
|
-#define cpu_reset processor.reset
|
|
-#define cpu_do_idle processor._do_idle
|
|
-#define cpu_dcache_clean_area processor.dcache_clean_area
|
|
-#define cpu_set_pte_ext processor.set_pte_ext
|
|
-#define cpu_do_switch_mm processor.switch_mm
|
|
|
|
-/* These three are private to arch/arm/kernel/suspend.c */
|
|
-#define cpu_do_suspend processor.do_suspend
|
|
-#define cpu_do_resume processor.do_resume
|
|
+extern struct processor processor;
|
|
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
+#include <linux/smp.h>
|
|
+/*
|
|
+ * This can't be a per-cpu variable because we need to access it before
|
|
+ * per-cpu has been initialised. We have a couple of functions that are
|
|
+ * called in a pre-emptible context, and so can't use smp_processor_id()
|
|
+ * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
|
|
+ * function pointers for these are identical across all CPUs.
|
|
+ */
|
|
+extern struct processor *cpu_vtable[];
|
|
+#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
|
|
+#define PROC_TABLE(f) cpu_vtable[0]->f
|
|
+static inline void init_proc_vtable(const struct processor *p)
|
|
+{
|
|
+ unsigned int cpu = smp_processor_id();
|
|
+ *cpu_vtable[cpu] = *p;
|
|
+ WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
|
|
+ cpu_vtable[0]->dcache_clean_area);
|
|
+ WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
|
|
+ cpu_vtable[0]->set_pte_ext);
|
|
+}
|
|
+#else
|
|
+#define PROC_VTABLE(f) processor.f
|
|
+#define PROC_TABLE(f) processor.f
|
|
+static inline void init_proc_vtable(const struct processor *p)
|
|
+{
|
|
+ processor = *p;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#define cpu_proc_init PROC_VTABLE(_proc_init)
|
|
+#define cpu_check_bugs PROC_VTABLE(check_bugs)
|
|
+#define cpu_proc_fin PROC_VTABLE(_proc_fin)
|
|
+#define cpu_reset PROC_VTABLE(reset)
|
|
+#define cpu_do_idle PROC_VTABLE(_do_idle)
|
|
+#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
|
|
+#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
|
|
+#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
|
|
+
|
|
+/* These two are private to arch/arm/kernel/suspend.c */
|
|
+#define cpu_do_suspend PROC_VTABLE(do_suspend)
|
|
+#define cpu_do_resume PROC_VTABLE(do_resume)
|
|
#endif
|
|
|
|
extern void cpu_resume(void);
|
|
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
|
|
index 57d2ad9c75ca..df8420672c7e 100644
|
|
--- a/arch/arm/include/asm/thread_info.h
|
|
+++ b/arch/arm/include/asm/thread_info.h
|
|
@@ -124,8 +124,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
|
struct user_vfp;
|
|
struct user_vfp_exc;
|
|
|
|
-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
|
|
- struct user_vfp_exc __user *);
|
|
+extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
|
|
+ struct user_vfp_exc *);
|
|
extern int vfp_restore_user_hwstate(struct user_vfp *,
|
|
struct user_vfp_exc *);
|
|
#endif
|
|
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
|
|
index 7b17460127fd..0f6c6b873bc5 100644
|
|
--- a/arch/arm/include/asm/uaccess.h
|
|
+++ b/arch/arm/include/asm/uaccess.h
|
|
@@ -99,6 +99,14 @@ extern int __put_user_bad(void);
|
|
static inline void set_fs(mm_segment_t fs)
|
|
{
|
|
current_thread_info()->addr_limit = fs;
|
|
+
|
|
+ /*
|
|
+ * Prevent a mispredicted conditional call to set_fs from forwarding
|
|
+ * the wrong address limit to access_ok under speculation.
|
|
+ */
|
|
+ dsb(nsh);
|
|
+ isb();
|
|
+
|
|
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
|
|
}
|
|
|
|
@@ -121,6 +129,32 @@ static inline void set_fs(mm_segment_t fs)
|
|
#define __inttype(x) \
|
|
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
|
|
|
+/*
|
|
+ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
|
|
+ * is above the current addr_limit.
|
|
+ */
|
|
+#define uaccess_mask_range_ptr(ptr, size) \
|
|
+ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
|
|
+static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
|
|
+ size_t size)
|
|
+{
|
|
+ void __user *safe_ptr = (void __user *)ptr;
|
|
+ unsigned long tmp;
|
|
+
|
|
+ asm volatile(
|
|
+ " sub %1, %3, #1\n"
|
|
+ " subs %1, %1, %0\n"
|
|
+ " addhs %1, %1, #1\n"
|
|
+ " subhss %1, %1, %2\n"
|
|
+ " movlo %0, #0\n"
|
|
+ : "+r" (safe_ptr), "=&r" (tmp)
|
|
+ : "r" (size), "r" (current_thread_info()->addr_limit)
|
|
+ : "cc");
|
|
+
|
|
+ csdb();
|
|
+ return safe_ptr;
|
|
+}
|
|
+
|
|
/*
|
|
* Single-value transfer routines. They automatically use the right
|
|
* size if we just have the right pointer type. Note that the functions
|
|
@@ -392,6 +426,14 @@ do { \
|
|
__pu_err; \
|
|
})
|
|
|
|
+#ifdef CONFIG_CPU_SPECTRE
|
|
+/*
|
|
+ * When mitigating Spectre variant 1.1, all accessors need to include
|
|
+ * verification of the address space.
|
|
+ */
|
|
+#define __put_user(x, ptr) put_user(x, ptr)
|
|
+
|
|
+#else
|
|
#define __put_user(x, ptr) \
|
|
({ \
|
|
long __pu_err = 0; \
|
|
@@ -399,12 +441,6 @@ do { \
|
|
__pu_err; \
|
|
})
|
|
|
|
-#define __put_user_error(x, ptr, err) \
|
|
-({ \
|
|
- __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
|
|
- (void) 0; \
|
|
-})
|
|
-
|
|
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
|
|
do { \
|
|
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
|
|
@@ -484,6 +520,7 @@ do { \
|
|
: "r" (x), "i" (-EFAULT) \
|
|
: "cc")
|
|
|
|
+#endif /* !CONFIG_CPU_SPECTRE */
|
|
|
|
#ifdef CONFIG_MMU
|
|
extern unsigned long __must_check
|
|
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
|
|
index 7be511310191..d41d3598e5e5 100644
|
|
--- a/arch/arm/kernel/bugs.c
|
|
+++ b/arch/arm/kernel/bugs.c
|
|
@@ -6,8 +6,8 @@
|
|
void check_other_bugs(void)
|
|
{
|
|
#ifdef MULTI_CPU
|
|
- if (processor.check_bugs)
|
|
- processor.check_bugs();
|
|
+ if (cpu_check_bugs)
|
|
+ cpu_check_bugs();
|
|
#endif
|
|
}
|
|
|
|
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
|
|
index 8733012d231f..7e662bdd5cb3 100644
|
|
--- a/arch/arm/kernel/head-common.S
|
|
+++ b/arch/arm/kernel/head-common.S
|
|
@@ -122,6 +122,9 @@ __mmap_switched_data:
|
|
.long init_thread_union + THREAD_START_SP @ sp
|
|
.size __mmap_switched_data, . - __mmap_switched_data
|
|
|
|
+ __FINIT
|
|
+ .text
|
|
+
|
|
/*
|
|
* This provides a C-API version of __lookup_processor_type
|
|
*/
|
|
@@ -133,9 +136,6 @@ ENTRY(lookup_processor_type)
|
|
ldmfd sp!, {r4 - r6, r9, pc}
|
|
ENDPROC(lookup_processor_type)
|
|
|
|
- __FINIT
|
|
- .text
|
|
-
|
|
/*
|
|
* Read processor ID register (CP#15, CR0), and look up in the linker-built
|
|
* supported processor list. Note that we can't use the absolute addresses
|
|
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
|
|
index f4e54503afa9..4764742db7b0 100644
|
|
--- a/arch/arm/kernel/setup.c
|
|
+++ b/arch/arm/kernel/setup.c
|
|
@@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2);
|
|
|
|
#ifdef MULTI_CPU
|
|
struct processor processor __ro_after_init;
|
|
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
+struct processor *cpu_vtable[NR_CPUS] = {
|
|
+ [0] = &processor,
|
|
+};
|
|
+#endif
|
|
#endif
|
|
#ifdef MULTI_TLB
|
|
struct cpu_tlb_fns cpu_tlb __ro_after_init;
|
|
@@ -667,28 +672,33 @@ static void __init smp_build_mpidr_hash(void)
|
|
}
|
|
#endif
|
|
|
|
-static void __init setup_processor(void)
|
|
+/*
|
|
+ * locate processor in the list of supported processor types. The linker
|
|
+ * builds this table for us from the entries in arch/arm/mm/proc-*.S
|
|
+ */
|
|
+struct proc_info_list *lookup_processor(u32 midr)
|
|
{
|
|
- struct proc_info_list *list;
|
|
+ struct proc_info_list *list = lookup_processor_type(midr);
|
|
|
|
- /*
|
|
- * locate processor in the list of supported processor
|
|
- * types. The linker builds this table for us from the
|
|
- * entries in arch/arm/mm/proc-*.S
|
|
- */
|
|
- list = lookup_processor_type(read_cpuid_id());
|
|
if (!list) {
|
|
- pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
|
|
- read_cpuid_id());
|
|
- while (1);
|
|
+ pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
|
|
+ smp_processor_id(), midr);
|
|
+ while (1)
|
|
+ /* can't use cpu_relax() here as it may require MMU setup */;
|
|
}
|
|
|
|
+ return list;
|
|
+}
|
|
+
|
|
+static void __init setup_processor(void)
|
|
+{
|
|
+ unsigned int midr = read_cpuid_id();
|
|
+ struct proc_info_list *list = lookup_processor(midr);
|
|
+
|
|
cpu_name = list->cpu_name;
|
|
__cpu_architecture = __get_cpu_architecture();
|
|
|
|
-#ifdef MULTI_CPU
|
|
- processor = *list->proc;
|
|
-#endif
|
|
+ init_proc_vtable(list->proc);
|
|
#ifdef MULTI_TLB
|
|
cpu_tlb = *list->tlb;
|
|
#endif
|
|
@@ -700,7 +710,7 @@ static void __init setup_processor(void)
|
|
#endif
|
|
|
|
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
|
|
- cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
|
|
+ list->cpu_name, midr, midr & 15,
|
|
proc_arch[cpu_architecture()], get_cr());
|
|
|
|
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
|
|
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
|
|
index 6bee5c9b1133..0a066f03b5ec 100644
|
|
--- a/arch/arm/kernel/signal.c
|
|
+++ b/arch/arm/kernel/signal.c
|
|
@@ -94,17 +94,18 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
|
|
|
|
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
|
|
{
|
|
- const unsigned long magic = VFP_MAGIC;
|
|
- const unsigned long size = VFP_STORAGE_SIZE;
|
|
+ struct vfp_sigframe kframe;
|
|
int err = 0;
|
|
|
|
- __put_user_error(magic, &frame->magic, err);
|
|
- __put_user_error(size, &frame->size, err);
|
|
+ memset(&kframe, 0, sizeof(kframe));
|
|
+ kframe.magic = VFP_MAGIC;
|
|
+ kframe.size = VFP_STORAGE_SIZE;
|
|
|
|
+ err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
|
|
if (err)
|
|
- return -EFAULT;
|
|
+ return err;
|
|
|
|
- return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
|
|
+ return __copy_to_user(frame, &kframe, sizeof(kframe));
|
|
}
|
|
|
|
static int restore_vfp_context(struct vfp_sigframe __user *auxp)
|
|
@@ -256,30 +257,35 @@ static int
|
|
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
|
|
{
|
|
struct aux_sigframe __user *aux;
|
|
+ struct sigcontext context;
|
|
int err = 0;
|
|
|
|
- __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
|
|
- __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
|
|
- __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
|
|
- __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
|
|
- __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
|
|
- __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
|
|
- __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
|
|
- __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
|
|
- __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
|
|
- __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
|
|
- __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
|
|
- __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
|
|
- __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
|
|
- __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
|
|
- __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
|
|
- __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
|
|
- __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
|
|
-
|
|
- __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
|
|
- __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
|
|
- __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
|
|
- __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
|
|
+ context = (struct sigcontext) {
|
|
+ .arm_r0 = regs->ARM_r0,
|
|
+ .arm_r1 = regs->ARM_r1,
|
|
+ .arm_r2 = regs->ARM_r2,
|
|
+ .arm_r3 = regs->ARM_r3,
|
|
+ .arm_r4 = regs->ARM_r4,
|
|
+ .arm_r5 = regs->ARM_r5,
|
|
+ .arm_r6 = regs->ARM_r6,
|
|
+ .arm_r7 = regs->ARM_r7,
|
|
+ .arm_r8 = regs->ARM_r8,
|
|
+ .arm_r9 = regs->ARM_r9,
|
|
+ .arm_r10 = regs->ARM_r10,
|
|
+ .arm_fp = regs->ARM_fp,
|
|
+ .arm_ip = regs->ARM_ip,
|
|
+ .arm_sp = regs->ARM_sp,
|
|
+ .arm_lr = regs->ARM_lr,
|
|
+ .arm_pc = regs->ARM_pc,
|
|
+ .arm_cpsr = regs->ARM_cpsr,
|
|
+
|
|
+ .trap_no = current->thread.trap_no,
|
|
+ .error_code = current->thread.error_code,
|
|
+ .fault_address = current->thread.address,
|
|
+ .oldmask = set->sig[0],
|
|
+ };
|
|
+
|
|
+ err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
|
|
|
|
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
|
|
|
|
@@ -296,7 +302,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
|
|
if (err == 0)
|
|
err |= preserve_vfp_context(&aux->vfp);
|
|
#endif
|
|
- __put_user_error(0, &aux->end_magic, err);
|
|
+ err |= __put_user(0, &aux->end_magic);
|
|
|
|
return err;
|
|
}
|
|
@@ -428,7 +434,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
|
/*
|
|
* Set uc.uc_flags to a value which sc.trap_no would never have.
|
|
*/
|
|
- __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
|
|
+ err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
|
|
|
|
err |= setup_sigframe(frame, regs, set);
|
|
if (err == 0)
|
|
@@ -448,8 +454,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
|
|
|
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
|
|
|
|
- __put_user_error(0, &frame->sig.uc.uc_flags, err);
|
|
- __put_user_error(NULL, &frame->sig.uc.uc_link, err);
|
|
+ err |= __put_user(0, &frame->sig.uc.uc_flags);
|
|
+ err |= __put_user(NULL, &frame->sig.uc.uc_link);
|
|
|
|
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
|
|
err |= setup_sigframe(&frame->sig, regs, set);
|
|
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
|
|
index 4b129aac7233..8faf869e9fb2 100644
|
|
--- a/arch/arm/kernel/smp.c
|
|
+++ b/arch/arm/kernel/smp.c
|
|
@@ -27,6 +27,7 @@
|
|
#include <linux/completion.h>
|
|
#include <linux/cpufreq.h>
|
|
#include <linux/irq_work.h>
|
|
+#include <linux/slab.h>
|
|
|
|
#include <linux/atomic.h>
|
|
#include <asm/bugs.h>
|
|
@@ -40,6 +41,7 @@
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/pgalloc.h>
|
|
+#include <asm/procinfo.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/tlbflush.h>
|
|
@@ -100,6 +102,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
|
|
#endif
|
|
}
|
|
|
|
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
+static int secondary_biglittle_prepare(unsigned int cpu)
|
|
+{
|
|
+ if (!cpu_vtable[cpu])
|
|
+ cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
|
|
+
|
|
+ return cpu_vtable[cpu] ? 0 : -ENOMEM;
|
|
+}
|
|
+
|
|
+static void secondary_biglittle_init(void)
|
|
+{
|
|
+ init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
|
|
+}
|
|
+#else
|
|
+static int secondary_biglittle_prepare(unsigned int cpu)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void secondary_biglittle_init(void)
|
|
+{
|
|
+}
|
|
+#endif
|
|
+
|
|
int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|
{
|
|
int ret;
|
|
@@ -107,6 +133,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|
if (!smp_ops.smp_boot_secondary)
|
|
return -ENOSYS;
|
|
|
|
+ ret = secondary_biglittle_prepare(cpu);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
/*
|
|
* We need to tell the secondary core where to find
|
|
* its stack and the page tables.
|
|
@@ -358,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
|
|
struct mm_struct *mm = &init_mm;
|
|
unsigned int cpu;
|
|
|
|
+ secondary_biglittle_init();
|
|
+
|
|
/*
|
|
* The identity mapping is uncached (strongly ordered), so
|
|
* switch away from it before attempting any exclusive accesses.
|
|
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
|
|
index 640748e27035..d844c5c9364b 100644
|
|
--- a/arch/arm/kernel/sys_oabi-compat.c
|
|
+++ b/arch/arm/kernel/sys_oabi-compat.c
|
|
@@ -276,6 +276,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
|
|
int maxevents, int timeout)
|
|
{
|
|
struct epoll_event *kbuf;
|
|
+ struct oabi_epoll_event e;
|
|
mm_segment_t fs;
|
|
long ret, err, i;
|
|
|
|
@@ -294,8 +295,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
|
|
set_fs(fs);
|
|
err = 0;
|
|
for (i = 0; i < ret; i++) {
|
|
- __put_user_error(kbuf[i].events, &events->events, err);
|
|
- __put_user_error(kbuf[i].data, &events->data, err);
|
|
+ e.events = kbuf[i].events;
|
|
+ e.data = kbuf[i].data;
|
|
+ err = __copy_to_user(events, &e, sizeof(e));
|
|
+ if (err)
|
|
+ break;
|
|
events++;
|
|
}
|
|
kfree(kbuf);
|
|
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
|
|
index a826df3d3814..6709a8d33963 100644
|
|
--- a/arch/arm/lib/copy_from_user.S
|
|
+++ b/arch/arm/lib/copy_from_user.S
|
|
@@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
|
|
#ifdef CONFIG_CPU_SPECTRE
|
|
get_thread_info r3
|
|
ldr r3, [r3, #TI_ADDR_LIMIT]
|
|
- adds ip, r1, r2 @ ip=addr+size
|
|
- sub r3, r3, #1 @ addr_limit - 1
|
|
- cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
|
|
- movcs r1, #0 @ addr = NULL
|
|
- csdb
|
|
+ uaccess_mask_range_ptr r1, r2, r3, ip
|
|
#endif
|
|
|
|
#include "copy_template.S"
|
|
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
|
|
index caf5019d8161..970abe521197 100644
|
|
--- a/arch/arm/lib/copy_to_user.S
|
|
+++ b/arch/arm/lib/copy_to_user.S
|
|
@@ -94,6 +94,11 @@
|
|
|
|
ENTRY(__copy_to_user_std)
|
|
WEAK(arm_copy_to_user)
|
|
+#ifdef CONFIG_CPU_SPECTRE
|
|
+ get_thread_info r3
|
|
+ ldr r3, [r3, #TI_ADDR_LIMIT]
|
|
+ uaccess_mask_range_ptr r0, r2, r3, ip
|
|
+#endif
|
|
|
|
#include "copy_template.S"
|
|
|
|
@@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std)
|
|
rsb r0, r0, r2
|
|
copy_abort_end
|
|
.popsection
|
|
-
|
|
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
|
|
index 6bd1089b07e0..f598d792bace 100644
|
|
--- a/arch/arm/lib/uaccess_with_memcpy.c
|
|
+++ b/arch/arm/lib/uaccess_with_memcpy.c
|
|
@@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
n = __copy_to_user_std(to, from, n);
|
|
uaccess_restore(ua_flags);
|
|
} else {
|
|
- n = __copy_to_user_memcpy(to, from, n);
|
|
+ n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
|
|
+ from, n);
|
|
}
|
|
return n;
|
|
}
|
|
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
|
|
index ed9a01484030..a52fe871adbc 100644
|
|
--- a/arch/arm/mach-integrator/impd1.c
|
|
+++ b/arch/arm/mach-integrator/impd1.c
|
|
@@ -394,7 +394,11 @@ static int __ref impd1_probe(struct lm_device *dev)
|
|
sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
|
|
GFP_KERNEL);
|
|
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
|
|
- mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
|
|
+ mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
|
|
+ "lm%x:00700", dev->id);
|
|
+ if (!lookup || !chipname || !mmciname)
|
|
+ return -ENOMEM;
|
|
+
|
|
lookup->dev_id = mmciname;
|
|
/*
|
|
* Offsets on GPIO block 1:
|
|
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
|
|
index 7d9176c4a21d..f8bb65032b79 100644
|
|
--- a/arch/arm/mm/proc-macros.S
|
|
+++ b/arch/arm/mm/proc-macros.S
|
|
@@ -275,6 +275,13 @@
|
|
.endm
|
|
|
|
.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
|
|
+/*
|
|
+ * If we are building for big.Little with branch predictor hardening,
|
|
+ * we need the processor function tables to remain available after boot.
|
|
+ */
|
|
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
+ .section ".rodata"
|
|
+#endif
|
|
.type \name\()_processor_functions, #object
|
|
.align 2
|
|
ENTRY(\name\()_processor_functions)
|
|
@@ -310,6 +317,9 @@ ENTRY(\name\()_processor_functions)
|
|
.endif
|
|
|
|
.size \name\()_processor_functions, . - \name\()_processor_functions
|
|
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
+ .previous
|
|
+#endif
|
|
.endm
|
|
|
|
.macro define_cache_functions name:req
|
|
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
|
|
index 5544b82a2e7a..9a07916af8dd 100644
|
|
--- a/arch/arm/mm/proc-v7-bugs.c
|
|
+++ b/arch/arm/mm/proc-v7-bugs.c
|
|
@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
|
|
case ARM_CPU_PART_CORTEX_A17:
|
|
case ARM_CPU_PART_CORTEX_A73:
|
|
case ARM_CPU_PART_CORTEX_A75:
|
|
- if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
|
|
- goto bl_error;
|
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
|
harden_branch_predictor_bpiall;
|
|
spectre_v2_method = "BPIALL";
|
|
@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
|
|
|
|
case ARM_CPU_PART_CORTEX_A15:
|
|
case ARM_CPU_PART_BRAHMA_B15:
|
|
- if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
|
|
- goto bl_error;
|
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
|
harden_branch_predictor_iciallu;
|
|
spectre_v2_method = "ICIALLU";
|
|
@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
|
|
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
|
if ((int)res.a0 != 0)
|
|
break;
|
|
- if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
|
|
- goto bl_error;
|
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
|
call_hvc_arch_workaround_1;
|
|
- processor.switch_mm = cpu_v7_hvc_switch_mm;
|
|
+ cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
|
|
spectre_v2_method = "hypervisor";
|
|
break;
|
|
|
|
@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
|
|
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
|
if ((int)res.a0 != 0)
|
|
break;
|
|
- if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
|
|
- goto bl_error;
|
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
|
call_smc_arch_workaround_1;
|
|
- processor.switch_mm = cpu_v7_smc_switch_mm;
|
|
+ cpu_do_switch_mm = cpu_v7_smc_switch_mm;
|
|
spectre_v2_method = "firmware";
|
|
break;
|
|
|
|
@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
|
|
if (spectre_v2_method)
|
|
pr_info("CPU%u: Spectre v2: using %s workaround\n",
|
|
smp_processor_id(), spectre_v2_method);
|
|
- return;
|
|
-
|
|
-bl_error:
|
|
- pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
|
|
- cpu);
|
|
}
|
|
#else
|
|
static void cpu_v7_spectre_init(void)
|
|
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
|
|
index 8e5e97989fda..00dd8cf36632 100644
|
|
--- a/arch/arm/vfp/vfpmodule.c
|
|
+++ b/arch/arm/vfp/vfpmodule.c
|
|
@@ -554,12 +554,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
|
|
* Save the current VFP state into the provided structures and prepare
|
|
* for entry into a new function (signal handler).
|
|
*/
|
|
-int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
|
|
- struct user_vfp_exc __user *ufp_exc)
|
|
+int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
|
|
+ struct user_vfp_exc *ufp_exc)
|
|
{
|
|
struct thread_info *thread = current_thread_info();
|
|
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
|
|
- int err = 0;
|
|
|
|
/* Ensure that the saved hwstate is up-to-date. */
|
|
vfp_sync_hwstate(thread);
|
|
@@ -568,22 +567,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
|
|
* Copy the floating point registers. There can be unused
|
|
* registers see asm/hwcap.h for details.
|
|
*/
|
|
- err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
|
|
- sizeof(hwstate->fpregs));
|
|
+ memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
|
|
+
|
|
/*
|
|
* Copy the status and control register.
|
|
*/
|
|
- __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
|
|
+ ufp->fpscr = hwstate->fpscr;
|
|
|
|
/*
|
|
* Copy the exception registers.
|
|
*/
|
|
- __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
|
|
- __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
|
|
- __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
|
|
-
|
|
- if (err)
|
|
- return -EFAULT;
|
|
+ ufp_exc->fpexc = hwstate->fpexc;
|
|
+ ufp_exc->fpinst = hwstate->fpinst;
|
|
+ ufp_exc->fpinst2 = hwstate->fpinst2;
|
|
|
|
/* Ensure that VFP is disabled. */
|
|
vfp_flush_hwstate(thread);
|
|
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
|
|
index cadf99923600..ab04751a12b6 100644
|
|
--- a/arch/x86/events/core.c
|
|
+++ b/arch/x86/events/core.c
|
|
@@ -2196,6 +2196,19 @@ void perf_check_microcode(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(perf_check_microcode);
|
|
|
|
+static int x86_pmu_check_period(struct perf_event *event, u64 value)
|
|
+{
|
|
+ if (x86_pmu.check_period && x86_pmu.check_period(event, value))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (value && x86_pmu.limit_period) {
|
|
+ if (x86_pmu.limit_period(event, value) > value)
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static struct pmu pmu = {
|
|
.pmu_enable = x86_pmu_enable,
|
|
.pmu_disable = x86_pmu_disable,
|
|
@@ -2220,6 +2233,7 @@ static struct pmu pmu = {
|
|
.event_idx = x86_pmu_event_idx,
|
|
.sched_task = x86_pmu_sched_task,
|
|
.task_ctx_size = sizeof(struct x86_perf_task_context),
|
|
+ .check_period = x86_pmu_check_period,
|
|
};
|
|
|
|
void arch_perf_update_userpage(struct perf_event *event,
|
|
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
|
|
index f600ab601e00..f0639c8ebcb6 100644
|
|
--- a/arch/x86/events/intel/core.c
|
|
+++ b/arch/x86/events/intel/core.c
|
|
@@ -3262,6 +3262,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
|
|
intel_pmu_lbr_sched_task(ctx, sched_in);
|
|
}
|
|
|
|
+static int intel_pmu_check_period(struct perf_event *event, u64 value)
|
|
+{
|
|
+ return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
|
|
+}
|
|
+
|
|
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
|
|
|
|
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
|
|
@@ -3328,6 +3333,8 @@ static __initconst const struct x86_pmu core_pmu = {
|
|
.cpu_starting = intel_pmu_cpu_starting,
|
|
.cpu_dying = intel_pmu_cpu_dying,
|
|
.cpu_dead = intel_pmu_cpu_dead,
|
|
+
|
|
+ .check_period = intel_pmu_check_period,
|
|
};
|
|
|
|
static __initconst const struct x86_pmu intel_pmu = {
|
|
@@ -3367,6 +3374,8 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|
|
|
.guest_get_msrs = intel_guest_get_msrs,
|
|
.sched_task = intel_pmu_sched_task,
|
|
+
|
|
+ .check_period = intel_pmu_check_period,
|
|
};
|
|
|
|
static __init void intel_clovertown_quirk(void)
|
|
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
|
|
index 7ace39c51ff7..5c21680b0a69 100644
|
|
--- a/arch/x86/events/perf_event.h
|
|
+++ b/arch/x86/events/perf_event.h
|
|
@@ -626,6 +626,11 @@ struct x86_pmu {
|
|
* Intel host/guest support (KVM)
|
|
*/
|
|
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
|
|
+
|
|
+ /*
|
|
+ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
|
|
+ */
|
|
+ int (*check_period) (struct perf_event *event, u64 period);
|
|
};
|
|
|
|
struct x86_perf_task_context {
|
|
@@ -833,7 +838,7 @@ static inline int amd_pmu_init(void)
|
|
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
|
|
|
-static inline bool intel_pmu_has_bts(struct perf_event *event)
|
|
+static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
|
|
{
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
unsigned int hw_event, bts_event;
|
|
@@ -844,7 +849,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
|
|
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
|
|
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
|
|
|
- return hw_event == bts_event && hwc->sample_period == 1;
|
|
+ return hw_event == bts_event && period == 1;
|
|
+}
|
|
+
|
|
+static inline bool intel_pmu_has_bts(struct perf_event *event)
|
|
+{
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
+
|
|
+ return intel_pmu_has_bts_period(event, hwc->sample_period);
|
|
}
|
|
|
|
int intel_pmu_save_and_restart(struct perf_event *event);
|
|
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
|
|
index cb26f18d43af..555c002167ad 100644
|
|
--- a/arch/x86/ia32/ia32_aout.c
|
|
+++ b/arch/x86/ia32/ia32_aout.c
|
|
@@ -50,7 +50,7 @@ static unsigned long get_dr(int n)
|
|
/*
|
|
* fill in the user structure for a core dump..
|
|
*/
|
|
-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
|
|
+static void fill_dump(struct pt_regs *regs, struct user32 *dump)
|
|
{
|
|
u32 fs, gs;
|
|
memset(dump, 0, sizeof(*dump));
|
|
@@ -156,10 +156,12 @@ static int aout_core_dump(struct coredump_params *cprm)
|
|
fs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
has_dumped = 1;
|
|
+
|
|
+ fill_dump(cprm->regs, &dump);
|
|
+
|
|
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
|
|
dump.u_ar0 = offsetof(struct user32, regs);
|
|
dump.signal = cprm->siginfo->si_signo;
|
|
- dump_thread32(cprm->regs, &dump);
|
|
|
|
/*
|
|
* If the size of the dump file exceeds the rlimit, then see
|
|
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
|
|
index e652a7cc6186..3f697a9e3f59 100644
|
|
--- a/arch/x86/include/asm/uv/bios.h
|
|
+++ b/arch/x86/include/asm/uv/bios.h
|
|
@@ -48,7 +48,8 @@ enum {
|
|
BIOS_STATUS_SUCCESS = 0,
|
|
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
|
|
BIOS_STATUS_EINVAL = -EINVAL,
|
|
- BIOS_STATUS_UNAVAIL = -EBUSY
|
|
+ BIOS_STATUS_UNAVAIL = -EBUSY,
|
|
+ BIOS_STATUS_ABORT = -EINTR,
|
|
};
|
|
|
|
/* Address map parameters */
|
|
@@ -167,4 +168,9 @@ extern long system_serial_number;
|
|
|
|
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
|
|
|
|
+/*
|
|
+ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
|
|
+ */
|
|
+extern struct semaphore __efi_uv_runtime_lock;
|
|
+
|
|
#endif /* _ASM_X86_UV_BIOS_H */
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index 91db841101ca..1870fa7387b7 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -2178,7 +2178,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
|
if (!entry_only)
|
|
j = find_msr(&m->host, msr);
|
|
|
|
- if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
|
|
+ if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
|
|
+ (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
|
|
printk_once(KERN_WARNING "Not enough msr switch entries. "
|
|
"Can't add msr %x\n", msr);
|
|
return;
|
|
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
|
|
index 4a6a5a26c582..eb33432f2f24 100644
|
|
--- a/arch/x86/platform/uv/bios_uv.c
|
|
+++ b/arch/x86/platform/uv/bios_uv.c
|
|
@@ -29,7 +29,8 @@
|
|
|
|
struct uv_systab *uv_systab;
|
|
|
|
-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
|
+static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
|
+ u64 a4, u64 a5)
|
|
{
|
|
struct uv_systab *tab = uv_systab;
|
|
s64 ret;
|
|
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
|
|
|
return ret;
|
|
}
|
|
+
|
|
+s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
|
+{
|
|
+ s64 ret;
|
|
+
|
|
+ if (down_interruptible(&__efi_uv_runtime_lock))
|
|
+ return BIOS_STATUS_ABORT;
|
|
+
|
|
+ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
|
|
+ up(&__efi_uv_runtime_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
EXPORT_SYMBOL_GPL(uv_bios_call);
|
|
|
|
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
|
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
|
unsigned long bios_flags;
|
|
s64 ret;
|
|
|
|
+ if (down_interruptible(&__efi_uv_runtime_lock))
|
|
+ return BIOS_STATUS_ABORT;
|
|
+
|
|
local_irq_save(bios_flags);
|
|
- ret = uv_bios_call(which, a1, a2, a3, a4, a5);
|
|
+ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
|
|
local_irq_restore(bios_flags);
|
|
|
|
+ up(&__efi_uv_runtime_lock);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
|
|
index 17b518cb787c..0ea065c6725a 100644
|
|
--- a/drivers/acpi/numa.c
|
|
+++ b/drivers/acpi/numa.c
|
|
@@ -147,9 +147,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
|
|
{
|
|
struct acpi_srat_mem_affinity *p =
|
|
(struct acpi_srat_mem_affinity *)header;
|
|
- pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
|
|
- (unsigned long)p->base_address,
|
|
- (unsigned long)p->length,
|
|
+ pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
|
|
+ (unsigned long long)p->base_address,
|
|
+ (unsigned long long)p->length,
|
|
p->proximity_domain,
|
|
(p->flags & ACPI_SRAT_MEM_ENABLED) ?
|
|
"enabled" : "disabled",
|
|
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
|
|
index d6d91e8afa9e..61fe4bbc6dc0 100644
|
|
--- a/drivers/cpufreq/cpufreq.c
|
|
+++ b/drivers/cpufreq/cpufreq.c
|
|
@@ -1496,17 +1496,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
|
|
{
|
|
unsigned int ret_freq = 0;
|
|
|
|
- if (!cpufreq_driver->get)
|
|
+ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
|
|
return ret_freq;
|
|
|
|
ret_freq = cpufreq_driver->get(policy->cpu);
|
|
|
|
/*
|
|
- * Updating inactive policies is invalid, so avoid doing that. Also
|
|
- * if fast frequency switching is used with the given policy, the check
|
|
+ * If fast frequency switching is used with the given policy, the check
|
|
* against policy->cur is pointless, so skip it in that case too.
|
|
*/
|
|
- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
|
|
+ if (policy->fast_switch_enabled)
|
|
return ret_freq;
|
|
|
|
if (ret_freq && policy->cur &&
|
|
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
|
|
index ae54870b2788..dd7f63354ca0 100644
|
|
--- a/drivers/firmware/efi/runtime-wrappers.c
|
|
+++ b/drivers/firmware/efi/runtime-wrappers.c
|
|
@@ -49,6 +49,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
+/*
|
|
+ * Expose the EFI runtime lock to the UV platform
|
|
+ */
|
|
+#ifdef CONFIG_X86_UV
|
|
+extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
|
|
+#endif
|
|
+
|
|
/*
|
|
* According to section 7.1 of the UEFI spec, Runtime Services are not fully
|
|
* reentrant, and there are particular combinations of calls that need to be
|
|
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
|
|
index f64f35cdc2ff..fa3f2f039a74 100644
|
|
--- a/drivers/gpu/drm/bridge/tc358767.c
|
|
+++ b/drivers/gpu/drm/bridge/tc358767.c
|
|
@@ -96,6 +96,8 @@
|
|
#define DP0_STARTVAL 0x064c
|
|
#define DP0_ACTIVEVAL 0x0650
|
|
#define DP0_SYNCVAL 0x0654
|
|
+#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
|
|
+#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
|
|
#define DP0_MISC 0x0658
|
|
#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
|
|
#define BPC_6 (0 << 5)
|
|
@@ -140,6 +142,8 @@
|
|
#define DP0_LTLOOPCTRL 0x06d8
|
|
#define DP0_SNKLTCTRL 0x06e4
|
|
|
|
+#define DP1_SRCCTRL 0x07a0
|
|
+
|
|
/* PHY */
|
|
#define DP_PHY_CTRL 0x0800
|
|
#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
|
|
@@ -148,6 +152,7 @@
|
|
#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
|
|
#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
|
|
#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
|
|
+#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
|
|
#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
|
|
#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
|
|
|
|
@@ -538,6 +543,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
|
|
unsigned long rate;
|
|
u32 value;
|
|
int ret;
|
|
+ u32 dp_phy_ctrl;
|
|
|
|
rate = clk_get_rate(tc->refclk);
|
|
switch (rate) {
|
|
@@ -562,7 +568,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
|
|
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
|
|
tc_write(SYS_PLLPARAM, value);
|
|
|
|
- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
|
|
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
|
|
+ if (tc->link.base.num_lanes == 2)
|
|
+ dp_phy_ctrl |= PHY_2LANE;
|
|
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
|
|
|
|
/*
|
|
* Initially PLLs are in bypass. Force PLL parameter update,
|
|
@@ -717,7 +726,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
|
|
|
|
tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
|
|
|
|
- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
|
|
+ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
|
|
+ ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
|
|
+ ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
|
|
|
|
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
|
|
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
|
|
@@ -827,12 +838,11 @@ static int tc_main_link_setup(struct tc_data *tc)
|
|
if (!tc->mode)
|
|
return -EINVAL;
|
|
|
|
- /* from excel file - DP0_SrcCtrl */
|
|
- tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
|
|
- DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
|
|
- DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
|
|
- /* from excel file - DP1_SrcCtrl */
|
|
- tc_write(0x07a0, 0x00003083);
|
|
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
|
|
+ /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
|
|
+ tc_write(DP1_SRCCTRL,
|
|
+ (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
|
|
+ ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
|
|
|
|
rate = clk_get_rate(tc->refclk);
|
|
switch (rate) {
|
|
@@ -853,8 +863,11 @@ static int tc_main_link_setup(struct tc_data *tc)
|
|
}
|
|
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
|
|
tc_write(SYS_PLLPARAM, value);
|
|
+
|
|
/* Setup Main Link */
|
|
- dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
|
|
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
|
|
+ if (tc->link.base.num_lanes == 2)
|
|
+ dp_phy_ctrl |= PHY_2LANE;
|
|
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
|
|
msleep(100);
|
|
|
|
@@ -1109,10 +1122,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
|
|
static int tc_connector_mode_valid(struct drm_connector *connector,
|
|
struct drm_display_mode *mode)
|
|
{
|
|
+ struct tc_data *tc = connector_to_tc(connector);
|
|
+ u32 req, avail;
|
|
+ u32 bits_per_pixel = 24;
|
|
+
|
|
/* DPI interface clock limitation: upto 154 MHz */
|
|
if (mode->clock > 154000)
|
|
return MODE_CLOCK_HIGH;
|
|
|
|
+ req = mode->clock * bits_per_pixel / 8;
|
|
+ avail = tc->link.base.num_lanes * tc->link.base.rate;
|
|
+
|
|
+ if (req > avail)
|
|
+ return MODE_BAD;
|
|
+
|
|
return MODE_OK;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
|
|
index 7b2030925825..6509031098d5 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem.c
|
|
@@ -1593,6 +1593,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
|
return err;
|
|
}
|
|
|
|
+static inline bool
|
|
+__vma_matches(struct vm_area_struct *vma, struct file *filp,
|
|
+ unsigned long addr, unsigned long size)
|
|
+{
|
|
+ if (vma->vm_file != filp)
|
|
+ return false;
|
|
+
|
|
+ return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
|
|
+}
|
|
+
|
|
/**
|
|
* i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
|
|
* it is mapped to.
|
|
@@ -1651,7 +1661,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|
return -EINTR;
|
|
}
|
|
vma = find_vma(mm, addr);
|
|
- if (vma)
|
|
+ if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
|
|
vma->vm_page_prot =
|
|
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
|
else
|
|
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
|
|
index b0d445390ee4..d43bc7bd3387 100644
|
|
--- a/drivers/input/misc/bma150.c
|
|
+++ b/drivers/input/misc/bma150.c
|
|
@@ -482,13 +482,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
|
|
idev->close = bma150_irq_close;
|
|
input_set_drvdata(idev, bma150);
|
|
|
|
+ bma150->input = idev;
|
|
+
|
|
error = input_register_device(idev);
|
|
if (error) {
|
|
input_free_device(idev);
|
|
return error;
|
|
}
|
|
|
|
- bma150->input = idev;
|
|
return 0;
|
|
}
|
|
|
|
@@ -511,15 +512,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
|
|
|
|
bma150_init_input_device(bma150, ipoll_dev->input);
|
|
|
|
+ bma150->input_polled = ipoll_dev;
|
|
+ bma150->input = ipoll_dev->input;
|
|
+
|
|
error = input_register_polled_device(ipoll_dev);
|
|
if (error) {
|
|
input_free_polled_device(ipoll_dev);
|
|
return error;
|
|
}
|
|
|
|
- bma150->input_polled = ipoll_dev;
|
|
- bma150->input = ipoll_dev->input;
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
|
|
index 30adc5745cba..25ce9047b682 100644
|
|
--- a/drivers/input/mouse/elan_i2c_core.c
|
|
+++ b/drivers/input/mouse/elan_i2c_core.c
|
|
@@ -1240,7 +1240,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
|
|
static const struct acpi_device_id elan_acpi_id[] = {
|
|
{ "ELAN0000", 0 },
|
|
{ "ELAN0100", 0 },
|
|
- { "ELAN0501", 0 },
|
|
{ "ELAN0600", 0 },
|
|
{ "ELAN0602", 0 },
|
|
{ "ELAN0605", 0 },
|
|
@@ -1251,6 +1250,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
|
{ "ELAN060C", 0 },
|
|
{ "ELAN0611", 0 },
|
|
{ "ELAN0612", 0 },
|
|
+ { "ELAN0617", 0 },
|
|
{ "ELAN0618", 0 },
|
|
{ "ELAN061C", 0 },
|
|
{ "ELAN061D", 0 },
|
|
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
|
|
index c120afd9c46a..38edf8f5bf8a 100644
|
|
--- a/drivers/input/mouse/elantech.c
|
|
+++ b/drivers/input/mouse/elantech.c
|
|
@@ -1117,6 +1117,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
|
|
* Asus UX31 0x361f00 20, 15, 0e clickpad
|
|
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
|
|
* Avatar AVIU-145A2 0x361f00 ? clickpad
|
|
+ * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
|
|
+ * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
|
|
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
|
|
* Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
|
|
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
|
|
@@ -1169,6 +1171,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
|
|
},
|
|
},
|
|
+ {
|
|
+ /* Fujitsu H780 also has a middle button */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
|
|
+ },
|
|
+ },
|
|
#endif
|
|
{ }
|
|
};
|
|
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
|
|
index 914c8a6bf93c..345f4d81ba07 100644
|
|
--- a/drivers/md/dm-thin.c
|
|
+++ b/drivers/md/dm-thin.c
|
|
@@ -257,6 +257,7 @@ struct pool {
|
|
|
|
spinlock_t lock;
|
|
struct bio_list deferred_flush_bios;
|
|
+ struct bio_list deferred_flush_completions;
|
|
struct list_head prepared_mappings;
|
|
struct list_head prepared_discards;
|
|
struct list_head prepared_discards_pt2;
|
|
@@ -925,6 +926,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
|
|
mempool_free(m, m->tc->pool->mapping_pool);
|
|
}
|
|
|
|
+static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
|
|
+{
|
|
+ struct pool *pool = tc->pool;
|
|
+ unsigned long flags;
|
|
+
|
|
+ /*
|
|
+ * If the bio has the REQ_FUA flag set we must commit the metadata
|
|
+ * before signaling its completion.
|
|
+ */
|
|
+ if (!bio_triggers_commit(tc, bio)) {
|
|
+ bio_endio(bio);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Complete bio with an error if earlier I/O caused changes to the
|
|
+ * metadata that can't be committed, e.g, due to I/O errors on the
|
|
+ * metadata device.
|
|
+ */
|
|
+ if (dm_thin_aborted_changes(tc->td)) {
|
|
+ bio_io_error(bio);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Batch together any bios that trigger commits and then issue a
|
|
+ * single commit for them in process_deferred_bios().
|
|
+ */
|
|
+ spin_lock_irqsave(&pool->lock, flags);
|
|
+ bio_list_add(&pool->deferred_flush_completions, bio);
|
|
+ spin_unlock_irqrestore(&pool->lock, flags);
|
|
+}
|
|
+
|
|
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
|
{
|
|
struct thin_c *tc = m->tc;
|
|
@@ -957,7 +991,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
|
*/
|
|
if (bio) {
|
|
inc_remap_and_issue_cell(tc, m->cell, m->data_block);
|
|
- bio_endio(bio);
|
|
+ complete_overwrite_bio(tc, bio);
|
|
} else {
|
|
inc_all_io_entry(tc->pool, m->cell->holder);
|
|
remap_and_issue(tc, m->cell->holder, m->data_block);
|
|
@@ -2303,7 +2337,7 @@ static void process_deferred_bios(struct pool *pool)
|
|
{
|
|
unsigned long flags;
|
|
struct bio *bio;
|
|
- struct bio_list bios;
|
|
+ struct bio_list bios, bio_completions;
|
|
struct thin_c *tc;
|
|
|
|
tc = get_first_thin(pool);
|
|
@@ -2314,26 +2348,36 @@ static void process_deferred_bios(struct pool *pool)
|
|
}
|
|
|
|
/*
|
|
- * If there are any deferred flush bios, we must commit
|
|
- * the metadata before issuing them.
|
|
+ * If there are any deferred flush bios, we must commit the metadata
|
|
+ * before issuing them or signaling their completion.
|
|
*/
|
|
bio_list_init(&bios);
|
|
+ bio_list_init(&bio_completions);
|
|
+
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
bio_list_merge(&bios, &pool->deferred_flush_bios);
|
|
bio_list_init(&pool->deferred_flush_bios);
|
|
+
|
|
+ bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
|
|
+ bio_list_init(&pool->deferred_flush_completions);
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
|
- if (bio_list_empty(&bios) &&
|
|
+ if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
|
|
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
|
|
return;
|
|
|
|
if (commit(pool)) {
|
|
+ bio_list_merge(&bios, &bio_completions);
|
|
+
|
|
while ((bio = bio_list_pop(&bios)))
|
|
bio_io_error(bio);
|
|
return;
|
|
}
|
|
pool->last_commit_jiffies = jiffies;
|
|
|
|
+ while ((bio = bio_list_pop(&bio_completions)))
|
|
+ bio_endio(bio);
|
|
+
|
|
while ((bio = bio_list_pop(&bios)))
|
|
generic_make_request(bio);
|
|
}
|
|
@@ -2968,6 +3012,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
|
|
INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
|
|
spin_lock_init(&pool->lock);
|
|
bio_list_init(&pool->deferred_flush_bios);
|
|
+ bio_list_init(&pool->deferred_flush_completions);
|
|
INIT_LIST_HEAD(&pool->prepared_mappings);
|
|
INIT_LIST_HEAD(&pool->prepared_discards);
|
|
INIT_LIST_HEAD(&pool->prepared_discards_pt2);
|
|
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
|
|
index c4e41c26649e..fac10c0e852c 100644
|
|
--- a/drivers/misc/eeprom/Kconfig
|
|
+++ b/drivers/misc/eeprom/Kconfig
|
|
@@ -12,7 +12,7 @@ config EEPROM_AT24
|
|
ones like at24c64, 24lc02 or fm24c04:
|
|
|
|
24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
|
|
- 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
|
|
+ 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048
|
|
|
|
Unless you like data loss puzzles, always be sure that any chip
|
|
you configure as a 24c32 (32 kbit) or larger is NOT really a
|
|
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
|
|
index d8a485f1798b..a37b9b6a315a 100644
|
|
--- a/drivers/misc/eeprom/at24.c
|
|
+++ b/drivers/misc/eeprom/at24.c
|
|
@@ -170,6 +170,7 @@ static const struct i2c_device_id at24_ids[] = {
|
|
{ "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) },
|
|
{ "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) },
|
|
{ "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) },
|
|
+ { "24c2048", AT24_DEVICE_MAGIC(2097152 / 8, AT24_FLAG_ADDR16) },
|
|
{ "at24", 0 },
|
|
{ /* END OF LIST */ }
|
|
};
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
index 4bc2c806eb61..eeeb4c5740bf 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
@@ -12979,6 +12979,24 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
|
|
struct net_device *dev,
|
|
netdev_features_t features)
|
|
{
|
|
+ /*
|
|
+ * A skb with gso_size + header length > 9700 will cause a
|
|
+ * firmware panic. Drop GSO support.
|
|
+ *
|
|
+ * Eventually the upper layer should not pass these packets down.
|
|
+ *
|
|
+ * For speed, if the gso_size is <= 9000, assume there will
|
|
+ * not be 700 bytes of headers and pass it through. Only do a
|
|
+ * full (slow) validation if the gso_size is > 9000.
|
|
+ *
|
|
+ * (Due to the way SKB_BY_FRAGS works this will also do a full
|
|
+ * validation in that case.)
|
|
+ */
|
|
+ if (unlikely(skb_is_gso(skb) &&
|
|
+ (skb_shinfo(skb)->gso_size > 9000) &&
|
|
+ !skb_gso_validate_mac_len(skb, 9700)))
|
|
+ features &= ~NETIF_F_GSO_MASK;
|
|
+
|
|
features = vlan_features_check(skb, features);
|
|
return vxlan_features_check(skb, features);
|
|
}
|
|
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
|
|
index 8a40202c0a17..c4f1c363e24b 100644
|
|
--- a/drivers/net/usb/ch9200.c
|
|
+++ b/drivers/net/usb/ch9200.c
|
|
@@ -254,14 +254,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
|
tx_overhead = 0x40;
|
|
|
|
len = skb->len;
|
|
- if (skb_headroom(skb) < tx_overhead) {
|
|
- struct sk_buff *skb2;
|
|
-
|
|
- skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
|
|
+ if (skb_cow_head(skb, tx_overhead)) {
|
|
dev_kfree_skb_any(skb);
|
|
- skb = skb2;
|
|
- if (!skb)
|
|
- return NULL;
|
|
+ return NULL;
|
|
}
|
|
|
|
__skb_push(skb, tx_overhead);
|
|
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
|
|
index 66b34ddbe216..72d9e7954b0a 100644
|
|
--- a/drivers/net/usb/kaweth.c
|
|
+++ b/drivers/net/usb/kaweth.c
|
|
@@ -803,18 +803,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
|
|
}
|
|
|
|
/* We now decide whether we can put our special header into the sk_buff */
|
|
- if (skb_cloned(skb) || skb_headroom(skb) < 2) {
|
|
- /* no such luck - we make our own */
|
|
- struct sk_buff *copied_skb;
|
|
- copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
|
|
- dev_kfree_skb_irq(skb);
|
|
- skb = copied_skb;
|
|
- if (!copied_skb) {
|
|
- kaweth->stats.tx_errors++;
|
|
- netif_start_queue(net);
|
|
- spin_unlock_irq(&kaweth->device_lock);
|
|
- return NETDEV_TX_OK;
|
|
- }
|
|
+ if (skb_cow_head(skb, 2)) {
|
|
+ kaweth->stats.tx_errors++;
|
|
+ netif_start_queue(net);
|
|
+ spin_unlock_irq(&kaweth->device_lock);
|
|
+ dev_kfree_skb_any(skb);
|
|
+ return NETDEV_TX_OK;
|
|
}
|
|
|
|
private_header = (__le16 *)__skb_push(skb, 2);
|
|
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
|
|
index e29f4c0767eb..e719ecd69d01 100644
|
|
--- a/drivers/net/usb/smsc95xx.c
|
|
+++ b/drivers/net/usb/smsc95xx.c
|
|
@@ -2011,13 +2011,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
|
|
/* We do not advertise SG, so skbs should be already linearized */
|
|
BUG_ON(skb_shinfo(skb)->nr_frags);
|
|
|
|
- if (skb_headroom(skb) < overhead) {
|
|
- struct sk_buff *skb2 = skb_copy_expand(skb,
|
|
- overhead, 0, flags);
|
|
+ /* Make writable and expand header space by overhead if required */
|
|
+ if (skb_cow_head(skb, overhead)) {
|
|
+ /* Must deallocate here as returning NULL to indicate error
|
|
+ * means the skb won't be deallocated in the caller.
|
|
+ */
|
|
dev_kfree_skb_any(skb);
|
|
- skb = skb2;
|
|
- if (!skb)
|
|
- return NULL;
|
|
+ return NULL;
|
|
}
|
|
|
|
if (csum) {
|
|
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
index bedce3453dd3..5aa221487a9c 100644
|
|
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
@@ -803,11 +803,24 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
|
|
return ret;
|
|
}
|
|
|
|
- ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio);
|
|
- if (ret) {
|
|
- dev_err(pctrl->dev, "Failed to add pin range\n");
|
|
- gpiochip_remove(&pctrl->chip);
|
|
- return ret;
|
|
+ /*
|
|
+ * For DeviceTree-supported systems, the gpio core checks the
|
|
+ * pinctrl's device node for the "gpio-ranges" property.
|
|
+ * If it is present, it takes care of adding the pin ranges
|
|
+ * for the driver. In this case the driver can skip ahead.
|
|
+ *
|
|
+ * In order to remain compatible with older, existing DeviceTree
|
|
+ * files which don't set the "gpio-ranges" property or systems that
|
|
+ * utilize ACPI the driver has to call gpiochip_add_pin_range().
|
|
+ */
|
|
+ if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) {
|
|
+ ret = gpiochip_add_pin_range(&pctrl->chip,
|
|
+ dev_name(pctrl->dev), 0, 0, chip->ngpio);
|
|
+ if (ret) {
|
|
+ dev_err(pctrl->dev, "Failed to add pin range\n");
|
|
+ gpiochip_remove(&pctrl->chip);
|
|
+ return ret;
|
|
+ }
|
|
}
|
|
|
|
ret = gpiochip_irqchip_add(chip,
|
|
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
|
|
index 85442edf3c49..913ebb6d0d29 100644
|
|
--- a/drivers/scsi/aic94xx/aic94xx_init.c
|
|
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
|
|
@@ -281,7 +281,7 @@ static ssize_t asd_show_dev_rev(struct device *dev,
|
|
return snprintf(buf, PAGE_SIZE, "%s\n",
|
|
asd_dev_rev[asd_ha->revision_id]);
|
|
}
|
|
-static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
|
|
+static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
|
|
|
|
static ssize_t asd_show_dev_bios_build(struct device *dev,
|
|
struct device_attribute *attr,char *buf)
|
|
@@ -478,7 +478,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
|
|
{
|
|
int err;
|
|
|
|
- err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
|
|
+ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
|
|
if (err)
|
|
return err;
|
|
|
|
@@ -500,13 +500,13 @@ err_update_bios:
|
|
err_biosb:
|
|
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
|
|
err_rev:
|
|
- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
|
|
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
|
|
return err;
|
|
}
|
|
|
|
static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
|
|
{
|
|
- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
|
|
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
|
|
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
|
|
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
|
|
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
|
|
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
|
|
index 984d6aae7529..0e5435330c07 100644
|
|
--- a/drivers/usb/dwc2/hcd.c
|
|
+++ b/drivers/usb/dwc2/hcd.c
|
|
@@ -5202,7 +5202,6 @@ error3:
|
|
error2:
|
|
usb_put_hcd(hcd);
|
|
error1:
|
|
- kfree(hsotg->core_params);
|
|
|
|
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
|
|
kfree(hsotg->last_frame_num_array);
|
|
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
|
|
index a3046b6523c8..8ec296308729 100644
|
|
--- a/fs/cifs/file.c
|
|
+++ b/fs/cifs/file.c
|
|
@@ -1126,6 +1126,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
|
|
+ PAGE_SIZE);
|
|
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
|
|
+ PAGE_SIZE);
|
|
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
|
sizeof(LOCKING_ANDX_RANGE);
|
|
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
|
@@ -1462,6 +1466,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
|
|
if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
|
|
return -EINVAL;
|
|
|
|
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
|
|
+ PAGE_SIZE);
|
|
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
|
|
+ PAGE_SIZE);
|
|
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
|
sizeof(LOCKING_ANDX_RANGE);
|
|
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
|
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
|
|
index b7885dc0d9bb..dee5250701de 100644
|
|
--- a/fs/cifs/smb2file.c
|
|
+++ b/fs/cifs/smb2file.c
|
|
@@ -129,6 +129,8 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
|
|
if (max_buf < sizeof(struct smb2_lock_element))
|
|
return -EINVAL;
|
|
|
|
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
|
|
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
|
|
max_num = max_buf / sizeof(struct smb2_lock_element);
|
|
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
|
|
if (!buf)
|
|
@@ -265,6 +267,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
|
|
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
|
|
max_num = max_buf / sizeof(struct smb2_lock_element);
|
|
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
|
|
if (!buf) {
|
|
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
|
|
index 78ed8105e64d..ae8ecf821019 100644
|
|
--- a/include/linux/perf_event.h
|
|
+++ b/include/linux/perf_event.h
|
|
@@ -455,6 +455,11 @@ struct pmu {
|
|
* Filter events for PMU-specific reasons.
|
|
*/
|
|
int (*filter_match) (struct perf_event *event); /* optional */
|
|
+
|
|
+ /*
|
|
+ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
|
|
+ */
|
|
+ int (*check_period) (struct perf_event *event, u64 value); /* optional */
|
|
};
|
|
|
|
/**
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index ed329a39d621..f8761774a94f 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -3102,6 +3102,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
|
|
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
|
|
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
|
|
bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
|
|
+bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
|
|
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
|
|
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
|
|
int skb_ensure_writable(struct sk_buff *skb, int write_len);
|
|
@@ -3880,6 +3881,21 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
|
|
return hdr_len + skb_gso_transport_seglen(skb);
|
|
}
|
|
|
|
+/**
|
|
+ * skb_gso_mac_seglen - Return length of individual segments of a gso packet
|
|
+ *
|
|
+ * @skb: GSO skb
|
|
+ *
|
|
+ * skb_gso_mac_seglen is used to determine the real size of the
|
|
+ * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
|
|
+ * headers (TCP/UDP).
|
|
+ */
|
|
+static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
|
|
+{
|
|
+ unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
|
|
+ return hdr_len + skb_gso_transport_seglen(skb);
|
|
+}
|
|
+
|
|
/* Local Checksum Offload.
|
|
* Compute outer checksum based on the assumption that the
|
|
* inner checksum will be offloaded later.
|
|
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
|
|
index b02af0bf5777..66f6b84df287 100644
|
|
--- a/include/net/netfilter/nf_tables.h
|
|
+++ b/include/net/netfilter/nf_tables.h
|
|
@@ -87,6 +87,35 @@ struct nft_regs {
|
|
};
|
|
};
|
|
|
|
+/* Store/load an u16 or u8 integer to/from the u32 data register.
|
|
+ *
|
|
+ * Note, when using concatenations, register allocation happens at 32-bit
|
|
+ * level. So for store instruction, pad the rest part with zero to avoid
|
|
+ * garbage values.
|
|
+ */
|
|
+
|
|
+static inline void nft_reg_store16(u32 *dreg, u16 val)
|
|
+{
|
|
+ *dreg = 0;
|
|
+ *(u16 *)dreg = val;
|
|
+}
|
|
+
|
|
+static inline void nft_reg_store8(u32 *dreg, u8 val)
|
|
+{
|
|
+ *dreg = 0;
|
|
+ *(u8 *)dreg = val;
|
|
+}
|
|
+
|
|
+static inline u16 nft_reg_load16(u32 *sreg)
|
|
+{
|
|
+ return *(u16 *)sreg;
|
|
+}
|
|
+
|
|
+static inline u8 nft_reg_load8(u32 *sreg)
|
|
+{
|
|
+ return *(u8 *)sreg;
|
|
+}
|
|
+
|
|
static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
|
|
unsigned int len)
|
|
{
|
|
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
|
|
index 659b1634de61..3d3de5e9f9cc 100644
|
|
--- a/include/uapi/linux/if_ether.h
|
|
+++ b/include/uapi/linux/if_ether.h
|
|
@@ -139,11 +139,18 @@
|
|
* This is an Ethernet frame header.
|
|
*/
|
|
|
|
+/* allow libcs like musl to deactivate this, glibc does not implement this. */
|
|
+#ifndef __UAPI_DEF_ETHHDR
|
|
+#define __UAPI_DEF_ETHHDR 1
|
|
+#endif
|
|
+
|
|
+#if __UAPI_DEF_ETHHDR
|
|
struct ethhdr {
|
|
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
|
|
unsigned char h_source[ETH_ALEN]; /* source ether addr */
|
|
__be16 h_proto; /* packet type ID field */
|
|
} __attribute__((packed));
|
|
+#endif
|
|
|
|
|
|
#endif /* _UAPI_LINUX_IF_ETHER_H */
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index 1af0bbf20984..17339506f9f8 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -4600,6 +4600,11 @@ static void __perf_event_period(struct perf_event *event,
|
|
}
|
|
}
|
|
|
|
+static int perf_event_check_period(struct perf_event *event, u64 value)
|
|
+{
|
|
+ return event->pmu->check_period(event, value);
|
|
+}
|
|
+
|
|
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
{
|
|
u64 value;
|
|
@@ -4616,6 +4621,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
|
|
return -EINVAL;
|
|
|
|
+ if (perf_event_check_period(event, value))
|
|
+ return -EINVAL;
|
|
+
|
|
event_function_call(event, __perf_event_period, &value);
|
|
|
|
return 0;
|
|
@@ -8622,6 +8630,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
|
|
return 0;
|
|
}
|
|
|
|
+static int perf_event_nop_int(struct perf_event *event, u64 value)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
|
|
|
|
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
|
|
@@ -8944,6 +8957,9 @@ got_cpu_context:
|
|
pmu->pmu_disable = perf_pmu_nop_void;
|
|
}
|
|
|
|
+ if (!pmu->check_period)
|
|
+ pmu->check_period = perf_event_nop_int;
|
|
+
|
|
if (!pmu->event_idx)
|
|
pmu->event_idx = perf_event_idx_default;
|
|
|
|
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
|
|
index f4b5811ebe23..99becab2c1ce 100644
|
|
--- a/kernel/events/ring_buffer.c
|
|
+++ b/kernel/events/ring_buffer.c
|
|
@@ -700,7 +700,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
|
|
size = sizeof(struct ring_buffer);
|
|
size += nr_pages * sizeof(void *);
|
|
|
|
- if (order_base_2(size) >= MAX_ORDER)
|
|
+ if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
|
|
goto fail;
|
|
|
|
rb = kzalloc(size, GFP_KERNEL);
|
|
diff --git a/kernel/signal.c b/kernel/signal.c
|
|
index 798b8f495ae2..c091dcc9f19b 100644
|
|
--- a/kernel/signal.c
|
|
+++ b/kernel/signal.c
|
|
@@ -2241,9 +2241,12 @@ relock:
|
|
}
|
|
|
|
/* Has this task already been marked for death? */
|
|
- ksig->info.si_signo = signr = SIGKILL;
|
|
- if (signal_group_exit(signal))
|
|
+ if (signal_group_exit(signal)) {
|
|
+ ksig->info.si_signo = signr = SIGKILL;
|
|
+ sigdelset(¤t->pending.signal, SIGKILL);
|
|
+ recalc_sigpending();
|
|
goto fatal;
|
|
+ }
|
|
|
|
for (;;) {
|
|
struct k_sigaction *ka;
|
|
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
|
|
index f0ab801a6437..c6eee3d9ed00 100644
|
|
--- a/kernel/trace/trace_uprobe.c
|
|
+++ b/kernel/trace/trace_uprobe.c
|
|
@@ -150,7 +150,14 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
|
|
|
|
ret = strncpy_from_user(dst, src, maxlen);
|
|
if (ret == maxlen)
|
|
- dst[--ret] = '\0';
|
|
+ dst[ret - 1] = '\0';
|
|
+ else if (ret >= 0)
|
|
+ /*
|
|
+ * Include the terminating null byte. In this case it
|
|
+ * was copied by strncpy_from_user but not accounted
|
|
+ * for in ret.
|
|
+ */
|
|
+ ret++;
|
|
|
|
if (ret < 0) { /* Failed to fetch string */
|
|
((u8 *)get_rloc_data(dest))[0] = '\0';
|
|
diff --git a/mm/memory.c b/mm/memory.c
|
|
index 35d8217bb046..47248dc0b9e1 100644
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -3329,15 +3329,24 @@ static int do_fault(struct fault_env *fe)
|
|
{
|
|
struct vm_area_struct *vma = fe->vma;
|
|
pgoff_t pgoff = linear_page_index(vma, fe->address);
|
|
+ int ret;
|
|
|
|
/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
|
|
if (!vma->vm_ops->fault)
|
|
- return VM_FAULT_SIGBUS;
|
|
- if (!(fe->flags & FAULT_FLAG_WRITE))
|
|
- return do_read_fault(fe, pgoff);
|
|
- if (!(vma->vm_flags & VM_SHARED))
|
|
- return do_cow_fault(fe, pgoff);
|
|
- return do_shared_fault(fe, pgoff);
|
|
+ ret = VM_FAULT_SIGBUS;
|
|
+ else if (!(fe->flags & FAULT_FLAG_WRITE))
|
|
+ ret = do_read_fault(fe, pgoff);
|
|
+ else if (!(vma->vm_flags & VM_SHARED))
|
|
+ ret = do_cow_fault(fe, pgoff);
|
|
+ else
|
|
+ ret = do_shared_fault(fe, pgoff);
|
|
+
|
|
+ /* preallocated pagetable is unused: free it */
|
|
+ if (fe->prealloc_pte) {
|
|
+ pte_free(vma->vm_mm, fe->prealloc_pte);
|
|
+ fe->prealloc_pte = 0;
|
|
+ }
|
|
+ return ret;
|
|
}
|
|
|
|
static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
|
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
|
index dca1fed0d7da..11501165f0df 100644
|
|
--- a/net/core/skbuff.c
|
|
+++ b/net/core/skbuff.c
|
|
@@ -4469,37 +4469,74 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
|
|
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
|
|
|
|
/**
|
|
- * skb_gso_validate_mtu - Return in case such skb fits a given MTU
|
|
+ * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
|
|
*
|
|
- * @skb: GSO skb
|
|
- * @mtu: MTU to validate against
|
|
+ * There are a couple of instances where we have a GSO skb, and we
|
|
+ * want to determine what size it would be after it is segmented.
|
|
*
|
|
- * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
|
|
- * once split.
|
|
+ * We might want to check:
|
|
+ * - L3+L4+payload size (e.g. IP forwarding)
|
|
+ * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
|
|
+ *
|
|
+ * This is a helper to do that correctly considering GSO_BY_FRAGS.
|
|
+ *
|
|
+ * @seg_len: The segmented length (from skb_gso_*_seglen). In the
|
|
+ * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
|
|
+ *
|
|
+ * @max_len: The maximum permissible length.
|
|
+ *
|
|
+ * Returns true if the segmented length <= max length.
|
|
*/
|
|
-bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
|
|
-{
|
|
+static inline bool skb_gso_size_check(const struct sk_buff *skb,
|
|
+ unsigned int seg_len,
|
|
+ unsigned int max_len) {
|
|
const struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
const struct sk_buff *iter;
|
|
- unsigned int hlen;
|
|
-
|
|
- hlen = skb_gso_network_seglen(skb);
|
|
|
|
if (shinfo->gso_size != GSO_BY_FRAGS)
|
|
- return hlen <= mtu;
|
|
+ return seg_len <= max_len;
|
|
|
|
/* Undo this so we can re-use header sizes */
|
|
- hlen -= GSO_BY_FRAGS;
|
|
+ seg_len -= GSO_BY_FRAGS;
|
|
|
|
skb_walk_frags(skb, iter) {
|
|
- if (hlen + skb_headlen(iter) > mtu)
|
|
+ if (seg_len + skb_headlen(iter) > max_len)
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
+
|
|
+/**
|
|
+ * skb_gso_validate_mtu - Return in case such skb fits a given MTU
|
|
+ *
|
|
+ * @skb: GSO skb
|
|
+ * @mtu: MTU to validate against
|
|
+ *
|
|
+ * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
|
|
+ * once split.
|
|
+ */
|
|
+bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
|
|
+{
|
|
+ return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
|
|
+}
|
|
EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
|
|
|
|
+/**
|
|
+ * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
|
|
+ *
|
|
+ * @skb: GSO skb
|
|
+ * @len: length to validate against
|
|
+ *
|
|
+ * skb_gso_validate_mac_len validates if a given skb will fit a wanted
|
|
+ * length once split, including L2, L3 and L4 headers and the payload.
|
|
+ */
|
|
+bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
|
|
+{
|
|
+ return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
|
|
+
|
|
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
|
|
{
|
|
int mac_len;
|
|
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
|
|
index 51ced81b616c..dc3628a396ec 100644
|
|
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
|
|
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
|
|
@@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
|
|
memset(&range, 0, sizeof(range));
|
|
range.flags = priv->flags;
|
|
if (priv->sreg_proto_min) {
|
|
- range.min_proto.all =
|
|
- *(__be16 *)®s->data[priv->sreg_proto_min];
|
|
- range.max_proto.all =
|
|
- *(__be16 *)®s->data[priv->sreg_proto_max];
|
|
+ range.min_proto.all = (__force __be16)nft_reg_load16(
|
|
+ ®s->data[priv->sreg_proto_min]);
|
|
+ range.max_proto.all = (__force __be16)nft_reg_load16(
|
|
+ ®s->data[priv->sreg_proto_max]);
|
|
}
|
|
regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, pkt->hook,
|
|
&range, pkt->out);
|
|
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
|
|
index c09d4381427e..f760524e1353 100644
|
|
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
|
|
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
|
|
@@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
|
|
|
|
memset(&mr, 0, sizeof(mr));
|
|
if (priv->sreg_proto_min) {
|
|
- mr.range[0].min.all =
|
|
- *(__be16 *)®s->data[priv->sreg_proto_min];
|
|
- mr.range[0].max.all =
|
|
- *(__be16 *)®s->data[priv->sreg_proto_max];
|
|
+ mr.range[0].min.all = (__force __be16)nft_reg_load16(
|
|
+ ®s->data[priv->sreg_proto_min]);
|
|
+ mr.range[0].max.all = (__force __be16)nft_reg_load16(
|
|
+ ®s->data[priv->sreg_proto_max]);
|
|
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
|
|
}
|
|
|
|
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
|
|
index 9597ffb74077..b74a420050c4 100644
|
|
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
|
|
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
|
|
@@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
|
|
memset(&range, 0, sizeof(range));
|
|
range.flags = priv->flags;
|
|
if (priv->sreg_proto_min) {
|
|
- range.min_proto.all =
|
|
- *(__be16 *)®s->data[priv->sreg_proto_min];
|
|
- range.max_proto.all =
|
|
- *(__be16 *)®s->data[priv->sreg_proto_max];
|
|
+ range.min_proto.all = (__force __be16)nft_reg_load16(
|
|
+ ®s->data[priv->sreg_proto_min]);
|
|
+ range.max_proto.all = (__force __be16)nft_reg_load16(
|
|
+ ®s->data[priv->sreg_proto_max]);
|
|
}
|
|
regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out);
|
|
}
|
|
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
|
|
index aca44e89a881..7ef58e493fca 100644
|
|
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
|
|
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
|
|
@@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
|
|
|
|
memset(&range, 0, sizeof(range));
|
|
if (priv->sreg_proto_min) {
|
|
- range.min_proto.all =
|
|
- *(__be16 *)®s->data[priv->sreg_proto_min],
|
|
- range.max_proto.all =
|
|
- *(__be16 *)®s->data[priv->sreg_proto_max],
|
|
+ range.min_proto.all = (__force __be16)nft_reg_load16(
|
|
+ ®s->data[priv->sreg_proto_min]);
|
|
+ range.max_proto.all = (__force __be16)nft_reg_load16(
|
|
+ ®s->data[priv->sreg_proto_max]);
|
|
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
|
|
}
|
|
|
|
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
|
|
index d7b0d171172a..2b9fda71fa8b 100644
|
|
--- a/net/netfilter/nft_ct.c
|
|
+++ b/net/netfilter/nft_ct.c
|
|
@@ -77,7 +77,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
|
|
|
|
switch (priv->key) {
|
|
case NFT_CT_DIRECTION:
|
|
- *dest = CTINFO2DIR(ctinfo);
|
|
+ nft_reg_store8(dest, CTINFO2DIR(ctinfo));
|
|
return;
|
|
case NFT_CT_STATUS:
|
|
*dest = ct->status;
|
|
@@ -129,10 +129,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
|
|
return;
|
|
}
|
|
case NFT_CT_L3PROTOCOL:
|
|
- *dest = nf_ct_l3num(ct);
|
|
+ nft_reg_store8(dest, nf_ct_l3num(ct));
|
|
return;
|
|
case NFT_CT_PROTOCOL:
|
|
- *dest = nf_ct_protonum(ct);
|
|
+ nft_reg_store8(dest, nf_ct_protonum(ct));
|
|
return;
|
|
default:
|
|
break;
|
|
@@ -149,10 +149,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
|
|
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
|
|
return;
|
|
case NFT_CT_PROTO_SRC:
|
|
- *dest = (__force __u16)tuple->src.u.all;
|
|
+ nft_reg_store16(dest, (__force u16)tuple->src.u.all);
|
|
return;
|
|
case NFT_CT_PROTO_DST:
|
|
- *dest = (__force __u16)tuple->dst.u.all;
|
|
+ nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
|
|
return;
|
|
default:
|
|
break;
|
|
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
|
|
index 7c3395513ff0..cec8dc0e5e6f 100644
|
|
--- a/net/netfilter/nft_meta.c
|
|
+++ b/net/netfilter/nft_meta.c
|
|
@@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr,
|
|
*dest = skb->len;
|
|
break;
|
|
case NFT_META_PROTOCOL:
|
|
- *dest = 0;
|
|
- *(__be16 *)dest = skb->protocol;
|
|
+ nft_reg_store16(dest, (__force u16)skb->protocol);
|
|
break;
|
|
case NFT_META_NFPROTO:
|
|
- *dest = pkt->pf;
|
|
+ nft_reg_store8(dest, pkt->pf);
|
|
break;
|
|
case NFT_META_L4PROTO:
|
|
if (!pkt->tprot_set)
|
|
goto err;
|
|
- *dest = pkt->tprot;
|
|
+ nft_reg_store8(dest, pkt->tprot);
|
|
break;
|
|
case NFT_META_PRIORITY:
|
|
*dest = skb->priority;
|
|
@@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
|
|
case NFT_META_IIFTYPE:
|
|
if (in == NULL)
|
|
goto err;
|
|
- *dest = 0;
|
|
- *(u16 *)dest = in->type;
|
|
+ nft_reg_store16(dest, in->type);
|
|
break;
|
|
case NFT_META_OIFTYPE:
|
|
if (out == NULL)
|
|
goto err;
|
|
- *dest = 0;
|
|
- *(u16 *)dest = out->type;
|
|
+ nft_reg_store16(dest, out->type);
|
|
break;
|
|
case NFT_META_SKUID:
|
|
sk = skb_to_full_sk(skb);
|
|
@@ -142,22 +139,22 @@ void nft_meta_get_eval(const struct nft_expr *expr,
|
|
#endif
|
|
case NFT_META_PKTTYPE:
|
|
if (skb->pkt_type != PACKET_LOOPBACK) {
|
|
- *dest = skb->pkt_type;
|
|
+ nft_reg_store8(dest, skb->pkt_type);
|
|
break;
|
|
}
|
|
|
|
switch (pkt->pf) {
|
|
case NFPROTO_IPV4:
|
|
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
|
|
- *dest = PACKET_MULTICAST;
|
|
+ nft_reg_store8(dest, PACKET_MULTICAST);
|
|
else
|
|
- *dest = PACKET_BROADCAST;
|
|
+ nft_reg_store8(dest, PACKET_BROADCAST);
|
|
break;
|
|
case NFPROTO_IPV6:
|
|
if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF)
|
|
- *dest = PACKET_MULTICAST;
|
|
+ nft_reg_store8(dest, PACKET_MULTICAST);
|
|
else
|
|
- *dest = PACKET_BROADCAST;
|
|
+ nft_reg_store8(dest, PACKET_BROADCAST);
|
|
break;
|
|
case NFPROTO_NETDEV:
|
|
switch (skb->protocol) {
|
|
@@ -171,14 +168,14 @@ void nft_meta_get_eval(const struct nft_expr *expr,
|
|
goto err;
|
|
|
|
if (ipv4_is_multicast(iph->daddr))
|
|
- *dest = PACKET_MULTICAST;
|
|
+ nft_reg_store8(dest, PACKET_MULTICAST);
|
|
else
|
|
- *dest = PACKET_BROADCAST;
|
|
+ nft_reg_store8(dest, PACKET_BROADCAST);
|
|
|
|
break;
|
|
}
|
|
case htons(ETH_P_IPV6):
|
|
- *dest = PACKET_MULTICAST;
|
|
+ nft_reg_store8(dest, PACKET_MULTICAST);
|
|
break;
|
|
default:
|
|
WARN_ON_ONCE(1);
|
|
@@ -233,7 +230,9 @@ void nft_meta_set_eval(const struct nft_expr *expr,
|
|
{
|
|
const struct nft_meta *meta = nft_expr_priv(expr);
|
|
struct sk_buff *skb = pkt->skb;
|
|
- u32 value = regs->data[meta->sreg];
|
|
+ u32 *sreg = ®s->data[meta->sreg];
|
|
+ u32 value = *sreg;
|
|
+ u8 pkt_type;
|
|
|
|
switch (meta->key) {
|
|
case NFT_META_MARK:
|
|
@@ -243,9 +242,12 @@ void nft_meta_set_eval(const struct nft_expr *expr,
|
|
skb->priority = value;
|
|
break;
|
|
case NFT_META_PKTTYPE:
|
|
- if (skb->pkt_type != value &&
|
|
- skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type))
|
|
- skb->pkt_type = value;
|
|
+ pkt_type = nft_reg_load8(sreg);
|
|
+
|
|
+ if (skb->pkt_type != pkt_type &&
|
|
+ skb_pkt_type_ok(pkt_type) &&
|
|
+ skb_pkt_type_ok(skb->pkt_type))
|
|
+ skb->pkt_type = pkt_type;
|
|
break;
|
|
case NFT_META_NFTRACE:
|
|
skb->nf_trace = !!value;
|
|
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
|
|
index ee2d71753746..4c48e9bb21e2 100644
|
|
--- a/net/netfilter/nft_nat.c
|
|
+++ b/net/netfilter/nft_nat.c
|
|
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
|
|
}
|
|
|
|
if (priv->sreg_proto_min) {
|
|
- range.min_proto.all =
|
|
- *(__be16 *)®s->data[priv->sreg_proto_min];
|
|
- range.max_proto.all =
|
|
- *(__be16 *)®s->data[priv->sreg_proto_max];
|
|
+ range.min_proto.all = (__force __be16)nft_reg_load16(
|
|
+ ®s->data[priv->sreg_proto_min]);
|
|
+ range.max_proto.all = (__force __be16)nft_reg_load16(
|
|
+ ®s->data[priv->sreg_proto_max]);
|
|
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
|
|
}
|
|
|
|
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
|
|
index b3f7980b0f27..d646aa770ac8 100644
|
|
--- a/net/sched/sch_tbf.c
|
|
+++ b/net/sched/sch_tbf.c
|
|
@@ -142,16 +142,6 @@ static u64 psched_ns_t2l(const struct psched_ratecfg *r,
|
|
return len;
|
|
}
|
|
|
|
-/*
|
|
- * Return length of individual segments of a gso packet,
|
|
- * including all headers (MAC, IP, TCP/UDP)
|
|
- */
|
|
-static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
|
|
-{
|
|
- unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
|
|
- return hdr_len + skb_gso_transport_seglen(skb);
|
|
-}
|
|
-
|
|
/* GSO packet is too big, segment it so that tbf can transmit
|
|
* each segment in time
|
|
*/
|
|
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
|
|
index ba9cd75e4c98..447b3a8a83c3 100644
|
|
--- a/sound/pci/hda/patch_conexant.c
|
|
+++ b/sound/pci/hda/patch_conexant.c
|
|
@@ -854,6 +854,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
|
|
+ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
|
|
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
|
|
index e6ac7b9b4648..497bad9f2789 100644
|
|
--- a/sound/usb/pcm.c
|
|
+++ b/sound/usb/pcm.c
|
|
@@ -313,6 +313,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
|
|
return 0;
|
|
}
|
|
|
|
+/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
|
|
+ * applies. Returns 1 if a quirk was found.
|
|
+ */
|
|
static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
|
|
struct usb_device *dev,
|
|
struct usb_interface_descriptor *altsd,
|
|
@@ -391,7 +394,7 @@ add_sync_ep:
|
|
|
|
subs->data_endpoint->sync_master = subs->sync_endpoint;
|
|
|
|
- return 0;
|
|
+ return 1;
|
|
}
|
|
|
|
static int set_sync_endpoint(struct snd_usb_substream *subs,
|
|
@@ -430,6 +433,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
|
|
if (err < 0)
|
|
return err;
|
|
|
|
+ /* endpoint set by quirk */
|
|
+ if (err > 0)
|
|
+ return 0;
|
|
+
|
|
if (altsd->bNumEndpoints < 2)
|
|
return 0;
|
|
|
|
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
|
|
index 046a4850e3df..ff32ca1d81ff 100644
|
|
--- a/tools/perf/util/unwind-libdw.c
|
|
+++ b/tools/perf/util/unwind-libdw.c
|
|
@@ -231,7 +231,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
|
|
|
|
err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
|
|
|
|
- if (err && !ui->max_stack)
|
|
+ if (err && ui->max_stack != max_stack)
|
|
err = 0;
|
|
|
|
/*
|