diff --git a/0001-xen-use-correct-end-address-of-kernel-for-conflict-c.patch b/0001-xen-use-correct-end-address-of-kernel-for-conflict-c.patch new file mode 100644 index 00000000..83998995 --- /dev/null +++ b/0001-xen-use-correct-end-address-of-kernel-for-conflict-c.patch @@ -0,0 +1,40 @@ +From db85abfa44f20856320f9cf19a4b4fe6d94b30f8 Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Sat, 3 Aug 2024 08:01:22 +0200 +Subject: [PATCH 1/5] xen: use correct end address of kernel for conflict + checking + +When running as a Xen PV dom0 the kernel is loaded by the hypervisor +using a different memory map than that of the host. In order to +minimize the required changes in the kernel, the kernel adapts its +memory map to that of the host. In order to do that it is checking +for conflicts of its load address with the host memory map. + +Unfortunately the tested memory range does not include the .brk +area, which might result in crashes or memory corruption when this +area does conflict withe the memory map of the host. + +Fix the test by using the _end label instead of __bss_stop. + +Fixes: 808fdb71936c ("xen: check for kernel memory conflicting with memory layout") +Signed-off-by: Juergen Gross +--- + arch/x86/xen/setup.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c +index 806ddb2391d9..4bcc70a71b7d 100644 +--- a/arch/x86/xen/setup.c ++++ b/arch/x86/xen/setup.c +@@ -825,7 +825,7 @@ char * __init xen_memory_setup(void) + * to relocating (and even reusing) pages with kernel text or data. + */ + if (xen_is_e820_reserved(__pa_symbol(_text), +- __pa_symbol(__bss_stop) - __pa_symbol(_text))) { ++ __pa_symbol(_end) - __pa_symbol(_text))) { + xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n"); + BUG(); + } +-- +2.43.0 + diff --git a/0002-xen-introduce-generic-helper-checking-for-memory-map.patch b/0002-xen-introduce-generic-helper-checking-for-memory-map.patch new file mode 100644 index 00000000..3b66445b --- /dev/null +++ b/0002-xen-introduce-generic-helper-checking-for-memory-map.patch @@ -0,0 +1,120 @@ +From c03b755e273639b865d351ba1d36625fc8b2791e Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Fri, 2 Aug 2024 14:11:06 +0200 +Subject: [PATCH 2/5] xen: introduce generic helper checking for memory map + conflicts + +When booting as a Xen PV dom0 the memory layout of the dom0 is +modified to match that of the host, as this requires less changes in +the kernel for supporting Xen. + +There are some cases, though, which are problematic, as it is the Xen +hypervisor selecting the kernel's load address plus some other data, +which might conflict with the host's memory map. + +These conflicts are detected at boot time and result in a boot error. +In order to support handling at least some of these conflicts in +future, introduce a generic helper function which will later gain the +ability to adapt the memory layout when possible. + +Add the missing check for the xen_start_info area. + +Signed-off-by: Juergen Gross +--- + arch/x86/xen/mmu_pv.c | 5 +---- + arch/x86/xen/setup.c | 34 ++++++++++++++++++++++++++++------ + arch/x86/xen/xen-ops.h | 3 ++- + 3 files changed, 31 insertions(+), 11 deletions(-) + +diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c +index f1ce39d6d32c..839e6613753d 100644 +--- a/arch/x86/xen/mmu_pv.c ++++ b/arch/x86/xen/mmu_pv.c +@@ -2018,10 +2018,7 @@ void __init xen_reserve_special_pages(void) + + void __init xen_pt_check_e820(void) + { +- if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) { +- xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n"); +- BUG(); +- } ++ xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table"); + } + + static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; +diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c +index 4bcc70a71b7d..96765180514b 100644 +--- a/arch/x86/xen/setup.c ++++ b/arch/x86/xen/setup.c +@@ -567,7 +567,7 @@ static void __init xen_ignore_unusable(void) + } + } + +-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) ++static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) + { + struct e820_entry *entry; + unsigned mapcnt; +@@ -624,6 +624,23 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size) + return 0; + } + ++/* ++ * Check for an area in physical memory to be usable for non-movable purposes. ++ * An area is considered to usable if the used E820 map lists it to be RAM. ++ * In case the area is not usable, crash the system with an error message. ++ */ ++void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, ++ const char *component) ++{ ++ if (!xen_is_e820_reserved(start, size)) ++ return; ++ ++ xen_raw_console_write("Xen hypervisor allocated "); ++ xen_raw_console_write(component); ++ xen_raw_console_write(" memory conflicts with E820 map\n"); ++ BUG(); ++} ++ + /* + * Like memcpy, but with physical addresses for dest and src. + */ +@@ -824,11 +841,16 @@ char * __init xen_memory_setup(void) + * Failing now is better than running into weird problems later due + * to relocating (and even reusing) pages with kernel text or data. + */ +- if (xen_is_e820_reserved(__pa_symbol(_text), +- __pa_symbol(_end) - __pa_symbol(_text))) { +- xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n"); +- BUG(); +- } ++ xen_chk_is_e820_usable(__pa_symbol(_text), ++ __pa_symbol(_end) - __pa_symbol(_text), ++ "kernel"); ++ ++ /* ++ * Check for a conflict of the xen_start_info memory with the target ++ * E820 map. ++ */ ++ xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info), ++ "xen_start_info"); + + /* + * Check for a conflict of the hypervisor supplied page tables with +diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h +index 0cf16fc79e0b..9a27d1d653d3 100644 +--- a/arch/x86/xen/xen-ops.h ++++ b/arch/x86/xen/xen-ops.h +@@ -48,7 +48,8 @@ void xen_mm_unpin_all(void); + void __init xen_relocate_p2m(void); + #endif + +-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size); ++void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, ++ const char *component); + unsigned long __ref xen_chk_extra_mem(unsigned long pfn); + void __init xen_inv_extra_mem(void); + void __init xen_remap_memory(void); +-- +2.43.0 + diff --git a/0003-xen-move-checks-for-e820-conflicts-further-up.patch b/0003-xen-move-checks-for-e820-conflicts-further-up.patch new file mode 100644 index 00000000..d7d8edf7 --- /dev/null +++ b/0003-xen-move-checks-for-e820-conflicts-further-up.patch @@ -0,0 +1,80 @@ +From 92bea9107c776578c415c56b4f6b3c7bca90c7e9 Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Tue, 6 Aug 2024 09:56:48 +0200 +Subject: [PATCH 3/5] xen: move checks for e820 conflicts further up + +Move the checks for e820 memory map conflicts using the +xen_chk_is_e820_usable() helper further up in order to prepare +resolving some of the possible conflicts by doing some e820 map +modifications, which must happen before evaluating the RAM layout. + +Signed-off-by: Juergen Gross +--- + arch/x86/xen/setup.c | 44 ++++++++++++++++++++++---------------------- + 1 file changed, 22 insertions(+), 22 deletions(-) + +diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c +index 96765180514b..dba68951ed6b 100644 +--- a/arch/x86/xen/setup.c ++++ b/arch/x86/xen/setup.c +@@ -764,6 +764,28 @@ char * __init xen_memory_setup(void) + /* Make sure the Xen-supplied memory map is well-ordered. */ + e820__update_table(&xen_e820_table); + ++ /* ++ * Check whether the kernel itself conflicts with the target E820 map. ++ * Failing now is better than running into weird problems later due ++ * to relocating (and even reusing) pages with kernel text or data. ++ */ ++ xen_chk_is_e820_usable(__pa_symbol(_text), ++ __pa_symbol(_end) - __pa_symbol(_text), ++ "kernel"); ++ ++ /* ++ * Check for a conflict of the xen_start_info memory with the target ++ * E820 map. ++ */ ++ xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info), ++ "xen_start_info"); ++ ++ /* ++ * Check for a conflict of the hypervisor supplied page tables with ++ * the target E820 map. ++ */ ++ xen_pt_check_e820(); ++ + max_pages = xen_get_max_pages(); + + /* How many extra pages do we need due to remapping? */ +@@ -836,28 +858,6 @@ char * __init xen_memory_setup(void) + + e820__update_table(e820_table); + +- /* +- * Check whether the kernel itself conflicts with the target E820 map. +- * Failing now is better than running into weird problems later due +- * to relocating (and even reusing) pages with kernel text or data. +- */ +- xen_chk_is_e820_usable(__pa_symbol(_text), +- __pa_symbol(_end) - __pa_symbol(_text), +- "kernel"); +- +- /* +- * Check for a conflict of the xen_start_info memory with the target +- * E820 map. +- */ +- xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info), +- "xen_start_info"); +- +- /* +- * Check for a conflict of the hypervisor supplied page tables with +- * the target E820 map. +- */ +- xen_pt_check_e820(); +- + xen_reserve_xen_mfnlist(); + + /* Check for a conflict of the initrd with the target E820 map. */ +-- +2.43.0 + diff --git a/0004-xen-move-max_pfn-in-xen_memory_setup-out-of-function.patch b/0004-xen-move-max_pfn-in-xen_memory_setup-out-of-function.patch new file mode 100644 index 00000000..45841c6e --- /dev/null +++ b/0004-xen-move-max_pfn-in-xen_memory_setup-out-of-function.patch @@ -0,0 +1,185 @@ +From 101e6b19bc6906ba069b20ad41bcf5577594399a Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Tue, 6 Aug 2024 10:24:41 +0200 +Subject: [PATCH 4/5] xen: move max_pfn in xen_memory_setup() out of function + scope + +Instead of having max_pfn as a local variable of xen_memory_setup(), +make it a static variable in setup.c instead. This avoids having to +pass it to subfunctions, which will be needed in more cases in future. + +Rename it to ini_nr_pages, as the value denotes the currently usable +number of memory pages as passed from the hypervisor at boot time. + +Signed-off-by: Juergen Gross +--- + arch/x86/xen/setup.c | 53 ++++++++++++++++++++++---------------------- + 1 file changed, 27 insertions(+), 26 deletions(-) + +diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c +index dba68951ed6b..d678c0330971 100644 +--- a/arch/x86/xen/setup.c ++++ b/arch/x86/xen/setup.c +@@ -46,6 +46,9 @@ bool xen_pv_pci_possible; + /* E820 map used during setting up memory. */ + static struct e820_table xen_e820_table __initdata; + ++/* Number of initially usable memory pages. */ ++static unsigned long ini_nr_pages __initdata; ++ + /* + * Buffer used to remap identity mapped pages. We only need the virtual space. + * The physical page behind this address is remapped as needed to different +@@ -212,7 +215,7 @@ static int __init xen_free_mfn(unsigned long mfn) + * as a fallback if the remapping fails. + */ + static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, +- unsigned long end_pfn, unsigned long nr_pages) ++ unsigned long end_pfn) + { + unsigned long pfn, end; + int ret; +@@ -220,7 +223,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, + WARN_ON(start_pfn > end_pfn); + + /* Release pages first. */ +- end = min(end_pfn, nr_pages); ++ end = min(end_pfn, ini_nr_pages); + for (pfn = start_pfn; pfn < end; pfn++) { + unsigned long mfn = pfn_to_mfn(pfn); + +@@ -341,15 +344,14 @@ static void __init xen_do_set_identity_and_remap_chunk( + * to Xen and not remapped. + */ + static unsigned long __init xen_set_identity_and_remap_chunk( +- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, +- unsigned long remap_pfn) ++ unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn) + { + unsigned long pfn; + unsigned long i = 0; + unsigned long n = end_pfn - start_pfn; + + if (remap_pfn == 0) +- remap_pfn = nr_pages; ++ remap_pfn = ini_nr_pages; + + while (i < n) { + unsigned long cur_pfn = start_pfn + i; +@@ -358,19 +360,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk( + unsigned long remap_range_size; + + /* Do not remap pages beyond the current allocation */ +- if (cur_pfn >= nr_pages) { ++ if (cur_pfn >= ini_nr_pages) { + /* Identity map remaining pages */ + set_phys_range_identity(cur_pfn, cur_pfn + size); + break; + } +- if (cur_pfn + size > nr_pages) +- size = nr_pages - cur_pfn; ++ if (cur_pfn + size > ini_nr_pages) ++ size = ini_nr_pages - cur_pfn; + + remap_range_size = xen_find_pfn_range(&remap_pfn); + if (!remap_range_size) { + pr_warn("Unable to find available pfn range, not remapping identity pages\n"); + xen_set_identity_and_release_chunk(cur_pfn, +- cur_pfn + left, nr_pages); ++ cur_pfn + left); + break; + } + /* Adjust size to fit in current e820 RAM region */ +@@ -397,18 +399,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk( + } + + static unsigned long __init xen_count_remap_pages( +- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, ++ unsigned long start_pfn, unsigned long end_pfn, + unsigned long remap_pages) + { +- if (start_pfn >= nr_pages) ++ if (start_pfn >= ini_nr_pages) + return remap_pages; + +- return remap_pages + min(end_pfn, nr_pages) - start_pfn; ++ return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn; + } + +-static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, ++static unsigned long __init xen_foreach_remap_area( + unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, +- unsigned long nr_pages, unsigned long last_val)) ++ unsigned long last_val)) + { + phys_addr_t start = 0; + unsigned long ret_val = 0; +@@ -436,8 +438,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, + end_pfn = PFN_UP(entry->addr); + + if (start_pfn < end_pfn) +- ret_val = func(start_pfn, end_pfn, nr_pages, +- ret_val); ++ ret_val = func(start_pfn, end_pfn, ret_val); + start = end; + } + } +@@ -700,7 +701,7 @@ static void __init xen_reserve_xen_mfnlist(void) + **/ + char * __init xen_memory_setup(void) + { +- unsigned long max_pfn, pfn_s, n_pfns; ++ unsigned long pfn_s, n_pfns; + phys_addr_t mem_end, addr, size, chunk_size; + u32 type; + int rc; +@@ -712,9 +713,9 @@ char * __init xen_memory_setup(void) + int op; + + xen_parse_512gb(); +- max_pfn = xen_get_pages_limit(); +- max_pfn = min(max_pfn, xen_start_info->nr_pages); +- mem_end = PFN_PHYS(max_pfn); ++ ini_nr_pages = xen_get_pages_limit(); ++ ini_nr_pages = min(ini_nr_pages, xen_start_info->nr_pages); ++ mem_end = PFN_PHYS(ini_nr_pages); + + memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); + set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); +@@ -789,10 +790,10 @@ char * __init xen_memory_setup(void) + max_pages = xen_get_max_pages(); + + /* How many extra pages do we need due to remapping? */ +- max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); ++ max_pages += xen_foreach_remap_area(xen_count_remap_pages); + +- if (max_pages > max_pfn) +- extra_pages += max_pages - max_pfn; ++ if (max_pages > ini_nr_pages) ++ extra_pages += max_pages - ini_nr_pages; + + /* + * Clamp the amount of extra memory to a EXTRA_MEM_RATIO +@@ -801,8 +802,8 @@ char * __init xen_memory_setup(void) + * Make sure we have no memory above max_pages, as this area + * isn't handled by the p2m management. + */ +- extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), +- extra_pages, max_pages - max_pfn); ++ extra_pages = min3(EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM)), ++ extra_pages, max_pages - ini_nr_pages); + i = 0; + addr = xen_e820_table.entries[0].addr; + size = xen_e820_table.entries[0].size; +@@ -885,7 +886,7 @@ char * __init xen_memory_setup(void) + * Set identity map on non-RAM pages and prepare remapping the + * underlying RAM. + */ +- xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk); ++ xen_foreach_remap_area(xen_set_identity_and_remap_chunk); + + pr_info("Released %ld page(s)\n", xen_released_pages); + +-- +2.43.0 + diff --git a/0005-xen-tolerate-ACPI-NVS-memory-overlapping-with-Xen-al.patch b/0005-xen-tolerate-ACPI-NVS-memory-overlapping-with-Xen-al.patch new file mode 100644 index 00000000..dad824ff --- /dev/null +++ b/0005-xen-tolerate-ACPI-NVS-memory-overlapping-with-Xen-al.patch @@ -0,0 +1,203 @@ +From fa89aba12bd34c918ce958e77e447d797f70f533 Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Fri, 2 Aug 2024 20:14:22 +0200 +Subject: [PATCH 5/5] xen: tolerate ACPI NVS memory overlapping with Xen + allocated memory + +In order to minimize required special handling for running as Xen PV +dom0, the memory layout is modified to match that of the host. This +requires to have only RAM at the locations where Xen allocated memory +is living. Unfortunately there seem to be some machines, where ACPI +NVS is located at 64 MB, resulting in a conflict with the loaded +kernel or the initial page tables built by Xen. + +As ACPI NVS needs to be accessed by the kernel only for saving and +restoring it across suspend operations, it can be relocated in the +dom0's memory map by swapping it with unused RAM (this is possible +via modification of the dom0 P2M map). + +While the E820 map can (and should) be modified right away, the P2M +map can be updated only after memory allocation is working, as the P2M +map might need to be extended. + +Fixes: 808fdb71936c ("xen: check for kernel memory conflicting with memory layout") +Signed-off-by: Juergen Gross +--- + arch/x86/xen/setup.c | 133 ++++++++++++++++++++++++++++++++++++++++++- + 1 file changed, 132 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c +index d678c0330971..dbb5d13ca61a 100644 +--- a/arch/x86/xen/setup.c ++++ b/arch/x86/xen/setup.c +@@ -49,6 +49,15 @@ static struct e820_table xen_e820_table __initdata; + /* Number of initially usable memory pages. */ + static unsigned long ini_nr_pages __initdata; + ++/* Remapped non-RAM areas */ ++#define NR_NONRAM_REMAP 4 ++static struct nonram_remap { ++ unsigned long maddr; ++ unsigned long size; ++ unsigned long paddr; ++} xen_nonram_remap[NR_NONRAM_REMAP] __initdata; ++static unsigned int nr_nonram_remap __initdata; ++ + /* + * Buffer used to remap identity mapped pages. We only need the virtual space. + * The physical page behind this address is remapped as needed to different +@@ -452,6 +461,8 @@ static unsigned long __init xen_foreach_remap_area( + * to be remapped memory itself in a linked list anchored at xen_remap_mfn. + * This scheme allows to remap the different chunks in arbitrary order while + * the resulting mapping will be independent from the order. ++ * In case xen_e820_resolve_conflicts() did relocate some non-RAM E820 ++ * entries, set the correct P2M information for the affected pages. + */ + void __init xen_remap_memory(void) + { +@@ -495,6 +506,29 @@ void __init xen_remap_memory(void) + set_pte_mfn(buf, mfn_save, PAGE_KERNEL); + + pr_info("Remapped %ld page(s)\n", remapped); ++ ++ if (nr_nonram_remap == 0) ++ return; ++ ++ remapped = 0; ++ for (i = 0; i < nr_nonram_remap; i++) { ++ struct nonram_remap *remap = xen_nonram_remap + i; ++ ++ pfn = PFN_DOWN(remap->paddr); ++ mfn_save = PFN_DOWN(remap->maddr); ++ for (len = 0; len < remap->size; len += PAGE_SIZE) { ++ if (!set_phys_to_machine(pfn, mfn_save)) { ++ WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n", ++ pfn, mfn_save); ++ BUG(); ++ } ++ pfn++; ++ mfn_save++; ++ remapped++; ++ } ++ } ++ ++ pr_info("Remapped %ld non-RAM page(s)\n", remapped); + } + + static unsigned long __init xen_get_pages_limit(void) +@@ -625,14 +659,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size) + return 0; + } + ++/* ++ * Swap a non-RAM E820 map entry with RAM above ini_nr_pages. ++ * Note that the E820 map is modified accordingly, but the P2M map isn't yet. ++ * The adaption of the P2M must be deferred until page allocation is possible. ++ */ ++static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry) ++{ ++ struct e820_entry *entry; ++ unsigned int mapcnt; ++ phys_addr_t mem_end = PFN_PHYS(ini_nr_pages); ++ struct nonram_remap *remap; ++ phys_addr_t swap_addr, swap_size, entry_end; ++ ++ if (nr_nonram_remap == NR_NONRAM_REMAP) { ++ xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n"); ++ BUG(); ++ } ++ ++ swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr); ++ swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size); ++ remap = xen_nonram_remap + nr_nonram_remap; ++ entry = xen_e820_table.entries; ++ ++ for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { ++ entry_end = entry->addr + entry->size; ++ if (entry->type == E820_TYPE_RAM && entry->size >= swap_size && ++ entry_end - swap_size >= mem_end) { ++ /* Reduce RAM entry by needed space (whole pages). */ ++ entry->size -= swap_size; ++ ++ /* Add new entry at the end of E820 map. */ ++ entry = xen_e820_table.entries + ++ xen_e820_table.nr_entries; ++ xen_e820_table.nr_entries++; ++ ++ /* Fill new entry (keep size and page offset). */ ++ entry->type = swap_entry->type; ++ entry->addr = entry_end - swap_size + ++ swap_addr - swap_entry->addr; ++ entry->size = swap_entry->size; ++ ++ /* Convert old entry to RAM, align to pages. */ ++ swap_entry->type = E820_TYPE_RAM; ++ swap_entry->addr = swap_addr; ++ swap_entry->size = swap_size; ++ ++ /* Remember PFN<->MFN relation for P2M update. */ ++ remap->maddr = swap_addr; ++ remap->size = swap_size; ++ remap->paddr = entry_end - swap_size; ++ nr_nonram_remap++; ++ ++ /* Order E820 table and merge entries. */ ++ e820__update_table(&xen_e820_table); ++ ++ return; ++ } ++ ++ entry++; ++ } ++ ++ xen_raw_console_write("No suitable area found for required E820 entry remapping action\n"); ++ BUG(); ++} ++ ++/* ++ * Look for non-RAM memory types in a specific guest physical area and move ++ * those away if possible (ACPI NVS only for now). ++ */ ++static void __init xen_e820_resolve_conflicts(phys_addr_t start, ++ phys_addr_t size) ++{ ++ struct e820_entry *entry; ++ unsigned int mapcnt; ++ phys_addr_t end; ++ ++ if (!size) ++ return; ++ ++ end = start + size; ++ entry = xen_e820_table.entries; ++ ++ for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { ++ if (entry->addr >= end) ++ return; ++ ++ if (entry->addr + entry->size > start && ++ entry->type == E820_TYPE_NVS) ++ xen_e820_swap_entry_with_ram(entry); ++ ++ entry++; ++ } ++} ++ + /* + * Check for an area in physical memory to be usable for non-movable purposes. +- * An area is considered to usable if the used E820 map lists it to be RAM. ++ * An area is considered to usable if the used E820 map lists it to be RAM or ++ * some other type which can be moved to higher PFNs while keeping the MFNs. + * In case the area is not usable, crash the system with an error message. + */ + void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, + const char *component) + { ++ xen_e820_resolve_conflicts(start, size); ++ + if (!xen_is_e820_reserved(start, size)) + return; + +-- +2.43.0 + diff --git a/config-qubes b/config-qubes index 03f5d27d..0e8a2432 100644 --- a/config-qubes +++ b/config-qubes @@ -176,8 +176,3 @@ CONFIG_PREEMPT=y ## sensors from 0f976d972a1671a303fad30a5e690304b0b82ee0 ## ## Intel ME driver e0f8e9ca81b80d897b190f48a4af80eff3198cb1 - -## Avoid kernel conflict with EfiACPIMemoryNVS region on some -## AMD Threadripper platforms -CONFIG_PHYSICAL_START=0x200000 -CONFIG_PHYSICAL_ALIGN=0x200000 diff --git a/kernel.spec.in b/kernel.spec.in index bff34cec..1ef998fc 100644 --- a/kernel.spec.in +++ b/kernel.spec.in @@ -149,6 +149,12 @@ Patch28: 0001-iwlwifi-avoid-writing-to-MSI-X-page-when-MSI-X-is-no.patch Patch30: 0004-pvops-respect-removable-xenstore-flag-for-block-devi.patch Patch31: 0001-PCI-add-a-reset-quirk-for-Intel-I219LM-ethernet-adap.patch +Patch40: 0001-xen-use-correct-end-address-of-kernel-for-conflict-c.patch +Patch41: 0002-xen-introduce-generic-helper-checking-for-memory-map.patch +Patch42: 0003-xen-move-checks-for-e820-conflicts-further-up.patch +Patch43: 0004-xen-move-max_pfn-in-xen_memory_setup-out-of-function.patch +Patch44: 0005-xen-tolerate-ACPI-NVS-memory-overlapping-with-Xen-al.patch + # S0ix support: Patch61: xen-events-Add-wakeup-support-to-xen-pirq.patch Patch62: xen-pm-use-suspend.patch