mirror of
https://github.com/armbian/build.git
synced 2025-08-15 23:56:57 +02:00
220 lines
6.5 KiB
Diff
220 lines
6.5 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 892ed237b1e1b6..bca0f2e14c5c2f 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 6
|
|
PATCHLEVEL = 6
|
|
-SUBLEVEL = 81
|
|
+SUBLEVEL = 82
|
|
EXTRAVERSION =
|
|
NAME = Pinguïn Aangedreven
|
|
|
|
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
|
index 1e666454ebdc3e..a06fab5016fdf4 100644
|
|
--- a/arch/x86/Kconfig
|
|
+++ b/arch/x86/Kconfig
|
|
@@ -1315,6 +1315,10 @@ config MICROCODE
|
|
depends on CPU_SUP_AMD || CPU_SUP_INTEL
|
|
select CRYPTO_LIB_SHA256 if CPU_SUP_AMD
|
|
|
|
+config MICROCODE_INITRD32
|
|
+ def_bool y
|
|
+ depends on MICROCODE && X86_32 && BLK_DEV_INITRD
|
|
+
|
|
config MICROCODE_LATE_LOADING
|
|
bool "Late microcode loading (DANGEROUS)"
|
|
default n
|
|
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
|
|
index 1ab475a518e9a5..0ee6ed0ff2bf20 100644
|
|
--- a/arch/x86/include/asm/microcode.h
|
|
+++ b/arch/x86/include/asm/microcode.h
|
|
@@ -23,6 +23,8 @@ static inline void load_ucode_ap(void) { }
|
|
static inline void microcode_bsp_resume(void) { }
|
|
#endif
|
|
|
|
+extern unsigned long initrd_start_early;
|
|
+
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
|
/* Intel specific microcode defines. Public for IFS */
|
|
struct microcode_header_intel {
|
|
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
|
|
index f3495623ac9972..bf483fcb4e5744 100644
|
|
--- a/arch/x86/include/asm/setup.h
|
|
+++ b/arch/x86/include/asm/setup.h
|
|
@@ -126,6 +126,7 @@ void clear_bss(void);
|
|
#ifdef __i386__
|
|
|
|
asmlinkage void __init __noreturn i386_start_kernel(void);
|
|
+void __init mk_early_pgtbl_32(void);
|
|
|
|
#else
|
|
asmlinkage void __init __noreturn x86_64_start_kernel(char *real_mode);
|
|
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
|
|
index 3269a0e23d3ab8..0000325ab98f4d 100644
|
|
--- a/arch/x86/kernel/Makefile
|
|
+++ b/arch/x86/kernel/Makefile
|
|
@@ -16,6 +16,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg
|
|
CFLAGS_REMOVE_ftrace.o = -pg
|
|
CFLAGS_REMOVE_early_printk.o = -pg
|
|
CFLAGS_REMOVE_head64.o = -pg
|
|
+CFLAGS_REMOVE_head32.o = -pg
|
|
CFLAGS_REMOVE_sev.o = -pg
|
|
CFLAGS_REMOVE_rethook.o = -pg
|
|
endif
|
|
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
|
|
index bde27a35bf2e28..de001b2146abf3 100644
|
|
--- a/arch/x86/kernel/head32.c
|
|
+++ b/arch/x86/kernel/head32.c
|
|
@@ -30,12 +30,32 @@ static void __init i386_default_early_setup(void)
|
|
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
|
|
}
|
|
|
|
+#ifdef CONFIG_MICROCODE_INITRD32
|
|
+unsigned long __initdata initrd_start_early;
|
|
+static pte_t __initdata *initrd_pl2p_start, *initrd_pl2p_end;
|
|
+
|
|
+static void zap_early_initrd_mapping(void)
|
|
+{
|
|
+ pte_t *pl2p = initrd_pl2p_start;
|
|
+
|
|
+ for (; pl2p < initrd_pl2p_end; pl2p++) {
|
|
+ *pl2p = (pte_t){ .pte = 0 };
|
|
+
|
|
+ if (!IS_ENABLED(CONFIG_X86_PAE))
|
|
+ *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = (pte_t) {.pte = 0};
|
|
+ }
|
|
+}
|
|
+#else
|
|
+static inline void zap_early_initrd_mapping(void) { }
|
|
+#endif
|
|
+
|
|
asmlinkage __visible void __init __noreturn i386_start_kernel(void)
|
|
{
|
|
/* Make sure IDT is set up before any exception happens */
|
|
idt_setup_early_handler();
|
|
|
|
load_ucode_bsp();
|
|
+ zap_early_initrd_mapping();
|
|
|
|
cr4_init_shadow();
|
|
|
|
@@ -72,52 +92,83 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void)
|
|
* to the first kernel PMD. Note the upper half of each PMD or PTE are
|
|
* always zero at this stage.
|
|
*/
|
|
-void __init mk_early_pgtbl_32(void);
|
|
-void __init mk_early_pgtbl_32(void)
|
|
-{
|
|
-#ifdef __pa
|
|
-#undef __pa
|
|
-#endif
|
|
-#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
|
|
- pte_t pte, *ptep;
|
|
- int i;
|
|
- unsigned long *ptr;
|
|
- /* Enough space to fit pagetables for the low memory linear map */
|
|
- const unsigned long limit = __pa(_end) +
|
|
- (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
|
|
#ifdef CONFIG_X86_PAE
|
|
- pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd);
|
|
-#define SET_PL2(pl2, val) { (pl2).pmd = (val); }
|
|
+typedef pmd_t pl2_t;
|
|
+#define pl2_base initial_pg_pmd
|
|
+#define SET_PL2(val) { .pmd = (val), }
|
|
#else
|
|
- pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table);
|
|
-#define SET_PL2(pl2, val) { (pl2).pgd = (val); }
|
|
+typedef pgd_t pl2_t;
|
|
+#define pl2_base initial_page_table
|
|
+#define SET_PL2(val) { .pgd = (val), }
|
|
#endif
|
|
|
|
- ptep = (pte_t *)__pa(__brk_base);
|
|
- pte.pte = PTE_IDENT_ATTR;
|
|
-
|
|
+static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t **pl2p,
|
|
+ const unsigned long limit)
|
|
+{
|
|
while ((pte.pte & PTE_PFN_MASK) < limit) {
|
|
+ pl2_t pl2 = SET_PL2((unsigned long)*ptep | PDE_IDENT_ATTR);
|
|
+ int i;
|
|
+
|
|
+ **pl2p = pl2;
|
|
+ if (!IS_ENABLED(CONFIG_X86_PAE)) {
|
|
+ /* Kernel PDE entry */
|
|
+ *(*pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
|
|
+ }
|
|
|
|
- SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR);
|
|
- *pl2p = pl2;
|
|
-#ifndef CONFIG_X86_PAE
|
|
- /* Kernel PDE entry */
|
|
- *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
|
|
-#endif
|
|
for (i = 0; i < PTRS_PER_PTE; i++) {
|
|
- *ptep = pte;
|
|
+ **ptep = pte;
|
|
pte.pte += PAGE_SIZE;
|
|
- ptep++;
|
|
+ (*ptep)++;
|
|
}
|
|
-
|
|
- pl2p++;
|
|
+ (*pl2p)++;
|
|
}
|
|
+ return pte;
|
|
+}
|
|
+
|
|
+void __init __no_stack_protector mk_early_pgtbl_32(void)
|
|
+{
|
|
+ /* Enough space to fit pagetables for the low memory linear map */
|
|
+ unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
|
|
+ pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base);
|
|
+ struct boot_params __maybe_unused *params;
|
|
+ pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base);
|
|
+ unsigned long *ptr;
|
|
+
|
|
+ pte.pte = PTE_IDENT_ATTR;
|
|
+ pte = init_map(pte, &ptep, &pl2p, limit);
|
|
|
|
- ptr = (unsigned long *)__pa(&max_pfn_mapped);
|
|
+ ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped);
|
|
/* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */
|
|
*ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
|
|
|
|
- ptr = (unsigned long *)__pa(&_brk_end);
|
|
+ ptr = (unsigned long *)__pa_nodebug(&_brk_end);
|
|
*ptr = (unsigned long)ptep + PAGE_OFFSET;
|
|
-}
|
|
|
|
+#ifdef CONFIG_MICROCODE_INITRD32
|
|
+ /* Running on a hypervisor? */
|
|
+ if (native_cpuid_ecx(1) & BIT(31))
|
|
+ return;
|
|
+
|
|
+ params = (struct boot_params *)__pa_nodebug(&boot_params);
|
|
+ if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
|
|
+ return;
|
|
+
|
|
+ /* Save the virtual start address */
|
|
+ ptr = (unsigned long *)__pa_nodebug(&initrd_start_early);
|
|
+ *ptr = (pte.pte & PTE_PFN_MASK) + PAGE_OFFSET;
|
|
+ *ptr += ((unsigned long)params->hdr.ramdisk_image) & ~PAGE_MASK;
|
|
+
|
|
+ /* Save PLP2 for cleanup */
|
|
+ ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_start);
|
|
+ *ptr = (unsigned long)pl2p + PAGE_OFFSET;
|
|
+
|
|
+ limit = (unsigned long)params->hdr.ramdisk_image;
|
|
+ pte.pte = PTE_IDENT_ATTR | PFN_ALIGN(limit);
|
|
+ limit = (unsigned long)params->hdr.ramdisk_image + params->hdr.ramdisk_size;
|
|
+
|
|
+ init_map(pte, &ptep, &pl2p, limit);
|
|
+
|
|
+ ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_end);
|
|
+ *ptr = (unsigned long)pl2p + PAGE_OFFSET;
|
|
+#endif
|
|
+}
|