From d91ee5863b71e8c90eaf6035bff3078a85e2e7b5 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Fri, 1 Apr 2011 18:28:35 -0400 Subject: cpuidle: replace xen access to x86 pm_idle and default_idle When a Xen Dom0 kernel boots on a hypervisor, it gets access to the raw-hardware ACPI tables. While it parses the idle tables for the hypervisor's beneift, it uses HLT for its own idle. Rather than have xen scribble on pm_idle and access default_idle, have it simply disable_cpuidle() so acpi_idle will not load and architecture default HLT will be used. cc: xen-devel@lists.xensource.com Tested-by: Konrad Rzeszutek Wilk Acked-by: H. Peter Anvin Signed-off-by: Len Brown --- arch/x86/xen/setup.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86/xen/setup.c') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 60aeeb56948f..a9627e2e3295 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -426,7 +427,7 @@ void __init xen_arch_setup(void) #ifdef CONFIG_X86_32 boot_cpu_data.hlt_works_ok = 1; #endif - pm_idle = default_idle; + disable_cpuidle(); boot_option_idle_override = IDLE_HALT; fiddle_vdso(); -- cgit v1.2.3 From 8f3c5883d840d079a5d2db20893b5ac8ba453b40 Mon Sep 17 00:00:00 2001 From: Igor Mammedov Date: Tue, 2 Aug 2011 11:45:24 +0200 Subject: xen: Fix printk() format in xen/setup.c Use correct format specifier for unsigned long. Signed-off-by: Igor Mammedov Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/setup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/xen/setup.c') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 60aeeb56948f..e4056321b243 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -113,7 +113,7 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, len++; } } - printk(KERN_CONT "%ld pages freed\n", len); + printk(KERN_CONT "%lu pages freed\n", len); return len; } @@ -139,7 +139,7 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, if (last_end < max_addr) released += xen_release_chunk(last_end, max_addr); - printk(KERN_INFO "released %ld pages of unused memory\n", released); + printk(KERN_INFO "released %lu pages of unused memory\n", released); return released; } -- cgit v1.2.3 From 98f531da8479eec3d828adc7f0865baaab99a543 Mon Sep 17 00:00:00 2001 From: Igor Mammedov Date: Tue, 2 Aug 2011 11:45:25 +0200 Subject: xen: Fix misleading WARN message at xen_release_chunk WARN message should not complain "Failed to release memory %lx-%lx err=%d\n" ^^^^^^^ about range when it fails to release just one page, instead it should say what pfn is not freed. In addition line: printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: " ... printk(KERN_CONT "%lu pages freed\n", len); will be broken if WARN in between this line is fired. So fix it by using a single printk for this. Signed-off-by: Igor Mammedov Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/setup.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/x86/xen/setup.c') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index e4056321b243..02ffd9e48c9f 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -92,8 +92,6 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, if (end <= start) return 0; - printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: ", - start, end); for(pfn = start; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); @@ -106,14 +104,14 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); - WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", - start, end, ret); + WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); if (ret == 1) { __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); len++; } } - printk(KERN_CONT "%lu pages freed\n", len); + printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n", + start, end, len); return len; } -- cgit v1.2.3 From d312ae878b6aed3912e1acaaf5d0b2a9d08a4f11 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Fri, 19 Aug 2011 15:57:16 +0100 Subject: xen: use maximum reservation to limit amount of usable RAM Use the domain's maximum reservation to limit the amount of extra RAM for the memory balloon. This reduces the size of the pages tables and the amount of reserved low memory (which defaults to about 1/32 of the total RAM). On a system with 8 GiB of RAM with the domain limited to 1 GiB the kernel reports: Before: Memory: 627792k/4472000k available After: Memory: 549740k/11132224k available A increase of about 76 MiB (~1.5% of the unused 7 GiB). The reserved low memory is also reduced from 253 MiB to 32 MiB. The total additional usable RAM is 329 MiB. For dom0, this requires at patch to Xen ('x86: use 'dom0_mem' to limit the number of pages for dom0') (c/s 23790) CC: stable@kernel.org Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/setup.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/x86/xen/setup.c') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 02ffd9e48c9f..ff3dfa176814 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -183,6 +183,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list, PFN_UP(start_pci), PFN_DOWN(last)); return identity; } + +static unsigned long __init xen_get_max_pages(void) +{ + unsigned long max_pages = MAX_DOMAIN_PAGES; + domid_t domid = DOMID_SELF; + int ret; + + ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); + if (ret > 0) + max_pages = ret; + return min(max_pages, MAX_DOMAIN_PAGES); +} + /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ @@ -291,6 +304,12 @@ char * __init xen_memory_setup(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); + extra_limit = xen_get_max_pages(); + if (extra_limit >= max_pfn) + extra_pages = extra_limit - max_pfn; + else + extra_pages = 0; + extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); /* -- cgit v1.2.3 From e3b73c4a25e9a5705b4ef28b91676caf01f9bc9f Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Tue, 13 Sep 2011 10:17:32 -0400 Subject: xen/e820: if there is no dom0_mem=, don't tweak extra_pages. The patch "xen: use maximum reservation to limit amount of usable RAM" (d312ae878b6aed3912e1acaaf5d0b2a9d08a4f11) breaks machines that do not use 'dom0_mem=' argument with: reserve RAM buffer: 000000133f2e2000 - 000000133fffffff (XEN) mm.c:4976:d0 Global bit is set to kernel page fffff8117e (XEN) domain_crash_sync called from entry.S (XEN) Domain 0 (vcpu#0) crashed on cpu#0: ... The reason being that the last E820 entry is created using the 'extra_pages' (which is based on how many pages have been freed). The mentioned git commit sets the initial value of 'extra_pages' using a hypercall which returns the number of pages (if dom0_mem has been used) or -1 otherwise. If the later we return with MAX_DOMAIN_PAGES as basis for calculation: return min(max_pages, MAX_DOMAIN_PAGES); and use it: extra_limit = xen_get_max_pages(); if (extra_limit >= max_pfn) extra_pages = extra_limit - max_pfn; else extra_pages = 0; which means we end up with extra_pages = 128GB in PFNs (33554432) - 8GB in PFNs (2097152, on this specific box, can be larger or smaller), and then we add that value to the E820 making it: Xen: 00000000ff000000 - 0000000100000000 (reserved) Xen: 0000000100000000 - 000000133f2e2000 (usable) which is clearly wrong. It should look as so: Xen: 00000000ff000000 - 0000000100000000 (reserved) Xen: 0000000100000000 - 000000027fbda000 (usable) Naturally this problem does not present itself if dom0_mem=max:X is used. CC: stable@kernel.org Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/setup.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/x86/xen/setup.c') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index ff3dfa176814..09688eb4a899 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -305,10 +305,12 @@ char * __init xen_memory_setup(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); extra_limit = xen_get_max_pages(); - if (extra_limit >= max_pfn) - extra_pages = extra_limit - max_pfn; - else - extra_pages = 0; + if (max_pfn + extra_pages > extra_limit) { + if (extra_limit > max_pfn) + extra_pages = extra_limit - max_pfn; + else + extra_pages = 0; + } extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); -- cgit v1.2.3 From aa24411b6717fd1e6ecef281bec497f6f30bbd66 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 28 Sep 2011 17:46:32 +0100 Subject: xen/balloon: account for pages released during memory setup In xen_memory_setup() pages that occur in gaps in the memory map are released back to Xen. This reduces the domain's current page count in the hypervisor. The Xen balloon driver does not correctly decrease its initial current_pages count to reflect this. If 'delta' pages are released and the target is adjusted the resulting reservation is always 'delta' less than the requested target. This affects dom0 if the initial allocation of pages overlaps the PCI memory region but won't affect most domU guests that have been setup with pseudo-physical memory maps that don't have gaps. Fix this by accouting for the released pages when starting the balloon driver. If the domain's targets are managed by xapi, the domain may eventually run out of memory and die because xapi currently gets its target calculations wrong and whenever it is restarted it always reduces the target by 'delta'. Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/setup.c | 7 ++++++- drivers/xen/balloon.c | 4 +++- include/xen/page.h | 2 ++ 3 files changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/x86/xen/setup.c') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 46d6d21dbdbe..c983717c018c 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -39,6 +39,9 @@ extern void xen_syscall32_target(void); /* Amount of extra memory space we add to the e820 ranges */ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; +/* Number of pages released from the initial allocation. */ +unsigned long xen_released_pages; + /* * The maximum amount of extra memory compared to the base size. The * main scaling factor is the size of struct page. At extreme ratios @@ -313,7 +316,9 @@ char * __init xen_memory_setup(void) extra_pages = 0; } - extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); + xen_released_pages = xen_return_unused_memory(xen_start_info->nr_pages, + &e820); + extra_pages += xen_released_pages; /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 5dfd8f8ff07f..4f59fb373381 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -565,7 +565,9 @@ static int __init balloon_init(void) pr_info("xen/balloon: Initialising balloon driver.\n"); - balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn; + balloon_stats.current_pages = xen_pv_domain() + ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) + : max_pfn; balloon_stats.target_pages = balloon_stats.current_pages; balloon_stats.balloon_low = 0; balloon_stats.balloon_high = 0; diff --git a/include/xen/page.h b/include/xen/page.h index 0be36b976f4b..92b61f8c772c 100644 --- a/include/xen/page.h +++ b/include/xen/page.h @@ -5,4 +5,6 @@ extern phys_addr_t xen_extra_mem_start, xen_extra_mem_size; +extern unsigned long xen_released_pages; + #endif /* _XEN_PAGE_H */ -- cgit v1.2.3 From 8b5d44a5ac93cd7a1b044db3ff0ba4955b4ba5ec Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 28 Sep 2011 17:46:34 +0100 Subject: xen: allow balloon driver to use more than one memory region Allow the xen balloon driver to populate its list of extra pages from more than one region of memory. This will allow platforms to provide (for example) a region of low memory and a region of high memory. The maximum possible number of extra regions is 128 (== E820MAX) which is quite large so xen_extra_mem is placed in __initdata. This is safe as both xen_memory_setup() and balloon_init() are in __init. The balloon regions themselves are not altered (i.e., there is still only the one region). Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/setup.c | 20 ++++++++++---------- drivers/xen/balloon.c | 44 +++++++++++++++++++++++++++----------------- include/xen/page.h | 10 +++++++++- 3 files changed, 46 insertions(+), 28 deletions(-) (limited to 'arch/x86/xen/setup.c') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index c983717c018c..0c8e974c738a 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -37,7 +37,7 @@ extern void xen_syscall_target(void); extern void xen_syscall32_target(void); /* Amount of extra memory space we add to the e820 ranges */ -phys_addr_t xen_extra_mem_start, xen_extra_mem_size; +struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; /* Number of pages released from the initial allocation. */ unsigned long xen_released_pages; @@ -59,7 +59,7 @@ static void __init xen_add_extra_mem(unsigned long pages) unsigned long pfn; u64 size = (u64)pages * PAGE_SIZE; - u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; + u64 extra_start = xen_extra_mem[0].start + xen_extra_mem[0].size; if (!pages) return; @@ -69,7 +69,7 @@ static void __init xen_add_extra_mem(unsigned long pages) memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA"); - xen_extra_mem_size += size; + xen_extra_mem[0].size += size; xen_max_p2m_pfn = PFN_DOWN(extra_start + size); @@ -242,7 +242,7 @@ char * __init xen_memory_setup(void) memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; - xen_extra_mem_start = mem_end; + xen_extra_mem[0].start = mem_end; for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end; @@ -270,8 +270,8 @@ char * __init xen_memory_setup(void) e820_add_region(end, delta, E820_UNUSABLE); } - if (map[i].size > 0 && end > xen_extra_mem_start) - xen_extra_mem_start = end; + if (map[i].size > 0 && end > xen_extra_mem[0].start) + xen_extra_mem[0].start = end; /* Add region if any remains */ if (map[i].size > 0) @@ -279,10 +279,10 @@ char * __init xen_memory_setup(void) } /* Align the balloon area so that max_low_pfn does not get set * to be at the _end_ of the PCI gap at the far end (fee01000). - * Note that xen_extra_mem_start gets set in the loop above to be - * past the last E820 region. */ - if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32))) - xen_extra_mem_start = (1ULL<<32); + * Note that the start of balloon area gets set in the loop above + * to be past the last E820 region. */ + if (xen_initial_domain() && (xen_extra_mem[0].start < (1ULL<<32))) + xen_extra_mem[0].start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 9efb993090aa..fc43b53651d7 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -555,11 +555,32 @@ void free_xenballooned_pages(int nr_pages, struct page** pages) } EXPORT_SYMBOL(free_xenballooned_pages); -static int __init balloon_init(void) +static void __init balloon_add_region(unsigned long start_pfn, + unsigned long pages) { unsigned long pfn, extra_pfn_end; struct page *page; + /* + * If the amount of usable memory has been limited (e.g., with + * the 'mem' command line parameter), don't add pages beyond + * this limit. + */ + extra_pfn_end = min(max_pfn, start_pfn + pages); + + for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { + page = pfn_to_page(pfn); + /* totalram_pages and totalhigh_pages do not + include the boot-time balloon extension, so + don't subtract from it. */ + __balloon_append(page); + } +} + +static int __init balloon_init(void) +{ + int i; + if (!xen_domain()) return -ENODEV; @@ -587,23 +608,12 @@ static int __init balloon_init(void) /* * Initialize the balloon with pages from the extra memory - * region (see arch/x86/xen/setup.c). - * - * If the amount of usable memory has been limited (e.g., with - * the 'mem' command line parameter), don't add pages beyond - * this limit. + * regions (see arch/x86/xen/setup.c). */ - extra_pfn_end = min(max_pfn, - (unsigned long)PFN_DOWN(xen_extra_mem_start - + xen_extra_mem_size)); - for (pfn = PFN_UP(xen_extra_mem_start); - pfn < extra_pfn_end; - pfn++) { - page = pfn_to_page(pfn); - /* totalram_pages and totalhigh_pages do not include the boot-time - balloon extension, so don't subtract from it. */ - __balloon_append(page); - } + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) + if (xen_extra_mem[i].size) + balloon_add_region(PFN_UP(xen_extra_mem[i].start), + PFN_DOWN(xen_extra_mem[i].size)); return 0; } diff --git a/include/xen/page.h b/include/xen/page.h index 92b61f8c772c..12765b6f9517 100644 --- a/include/xen/page.h +++ b/include/xen/page.h @@ -3,7 +3,15 @@ #include -extern phys_addr_t xen_extra_mem_start, xen_extra_mem_size; +struct xen_memory_region { + phys_addr_t start; + phys_addr_t size; +}; + +#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */ + +extern __initdata +struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS]; extern unsigned long xen_released_pages; -- cgit v1.2.3 From dc91c728fddc29dfed1ae96f6807216b5f42d3a1 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 29 Sep 2011 12:26:19 +0100 Subject: xen: allow extra memory to be in multiple regions Allow the extra memory (used by the balloon driver) to be in multiple regions (typically two regions, one for low memory and one for high memory). This allows the balloon driver to increase the number of available low pages (if the initial number if pages is small). As a side effect, the algorithm for building the e820 memory map is simpler and more obviously correct as the map supplied by the hypervisor is (almost) used as is (in particular, all reserved regions and gaps are preserved). Only RAM regions are altered and RAM regions above max_pfn + extra_pages are marked as unused (the region is split in two if necessary). Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/setup.c | 182 ++++++++++++++++++++++++--------------------------- 1 file changed, 86 insertions(+), 96 deletions(-) (limited to 'arch/x86/xen/setup.c') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 0c8e974c738a..2ad2fd53bd32 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -54,26 +54,32 @@ unsigned long xen_released_pages; */ #define EXTRA_MEM_RATIO (10) -static void __init xen_add_extra_mem(unsigned long pages) +static void __init xen_add_extra_mem(u64 start, u64 size) { unsigned long pfn; + int i; - u64 size = (u64)pages * PAGE_SIZE; - u64 extra_start = xen_extra_mem[0].start + xen_extra_mem[0].size; - - if (!pages) - return; - - e820_add_region(extra_start, size, E820_RAM); - sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); - - memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA"); + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + /* Add new region. */ + if (xen_extra_mem[i].size == 0) { + xen_extra_mem[i].start = start; + xen_extra_mem[i].size = size; + break; + } + /* Append to existing region. */ + if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) { + xen_extra_mem[i].size += size; + break; + } + } + if (i == XEN_EXTRA_MEM_MAX_REGIONS) + printk(KERN_WARNING "Warning: not enough extra memory regions\n"); - xen_extra_mem[0].size += size; + memblock_x86_reserve_range(start, start + size, "XEN EXTRA"); - xen_max_p2m_pfn = PFN_DOWN(extra_start + size); + xen_max_p2m_pfn = PFN_DOWN(start + size); - for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++) + for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } @@ -120,8 +126,8 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, return len; } -static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, - const struct e820map *e820) +static unsigned long __init xen_return_unused_memory( + unsigned long max_pfn, const struct e820entry *map, int nr_map) { phys_addr_t max_addr = PFN_PHYS(max_pfn); phys_addr_t last_end = ISA_END_ADDRESS; @@ -129,13 +135,13 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, int i; /* Free any unused memory above the low 1Mbyte. */ - for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { - phys_addr_t end = e820->map[i].addr; + for (i = 0; i < nr_map && last_end < max_addr; i++) { + phys_addr_t end = map[i].addr; end = min(max_addr, end); if (last_end < end) released += xen_release_chunk(last_end, end); - last_end = max(last_end, e820->map[i].addr + e820->map[i].size); + last_end = max(last_end, map[i].addr + map[i].size); } if (last_end < max_addr) @@ -200,20 +206,32 @@ static unsigned long __init xen_get_max_pages(void) return min(max_pages, MAX_DOMAIN_PAGES); } +static void xen_align_and_add_e820_region(u64 start, u64 size, int type) +{ + u64 end = start + size; + + /* Align RAM regions to page boundaries. */ + if (type == E820_RAM) { + start = PAGE_ALIGN(start); + end &= ~((u64)PAGE_SIZE - 1); + } + + e820_add_region(start, end - start, type); +} + /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; - static struct e820entry map_raw[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; + unsigned long max_pages; unsigned long extra_pages = 0; - unsigned long extra_limit; unsigned long identity_pages = 0; int i; int op; @@ -240,49 +258,55 @@ char * __init xen_memory_setup(void) } BUG_ON(rc); - memcpy(map_raw, map, sizeof(map)); - e820.nr_map = 0; - xen_extra_mem[0].start = mem_end; - for (i = 0; i < memmap.nr_entries; i++) { - unsigned long long end; - - /* Guard against non-page aligned E820 entries. */ - if (map[i].type == E820_RAM) - map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE; - - end = map[i].addr + map[i].size; - if (map[i].type == E820_RAM && end > mem_end) { - /* RAM off the end - may be partially included */ - u64 delta = min(map[i].size, end - mem_end); - - map[i].size -= delta; - end -= delta; - - extra_pages += PFN_DOWN(delta); - /* - * Set RAM below 4GB that is not for us to be unusable. - * This prevents "System RAM" address space from being - * used as potential resource for I/O address (happens - * when 'allocate_resource' is called). - */ - if (delta && - (xen_initial_domain() && end < 0x100000000ULL)) - e820_add_region(end, delta, E820_UNUSABLE); + /* Make sure the Xen-supplied memory map is well-ordered. */ + sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); + + max_pages = xen_get_max_pages(); + if (max_pages > max_pfn) + extra_pages += max_pages - max_pfn; + + xen_released_pages = xen_return_unused_memory(max_pfn, map, + memmap.nr_entries); + extra_pages += xen_released_pages; + + /* + * Clamp the amount of extra memory to a EXTRA_MEM_RATIO + * factor the base size. On non-highmem systems, the base + * size is the full initial memory allocation; on highmem it + * is limited to the max size of lowmem, so that it doesn't + * get completely filled. + * + * In principle there could be a problem in lowmem systems if + * the initial memory is also very large with respect to + * lowmem, but we won't try to deal with that here. + */ + extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), + extra_pages); + + i = 0; + while (i < memmap.nr_entries) { + u64 addr = map[i].addr; + u64 size = map[i].size; + u32 type = map[i].type; + + if (type == E820_RAM) { + if (addr < mem_end) { + size = min(size, mem_end - addr); + } else if (extra_pages) { + size = min(size, (u64)extra_pages * PAGE_SIZE); + extra_pages -= size / PAGE_SIZE; + xen_add_extra_mem(addr, size); + } else + type = E820_UNUSABLE; } - if (map[i].size > 0 && end > xen_extra_mem[0].start) - xen_extra_mem[0].start = end; + xen_align_and_add_e820_region(addr, size, type); - /* Add region if any remains */ - if (map[i].size > 0) - e820_add_region(map[i].addr, map[i].size, map[i].type); + map[i].addr += size; + map[i].size -= size; + if (map[i].size == 0) + i++; } - /* Align the balloon area so that max_low_pfn does not get set - * to be at the _end_ of the PCI gap at the far end (fee01000). - * Note that the start of balloon area gets set in the loop above - * to be past the last E820 region. */ - if (xen_initial_domain() && (xen_extra_mem[0].start < (1ULL<<32))) - xen_extra_mem[0].start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we @@ -308,45 +332,11 @@ char * __init xen_memory_setup(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); - extra_limit = xen_get_max_pages(); - if (max_pfn + extra_pages > extra_limit) { - if (extra_limit > max_pfn) - extra_pages = extra_limit - max_pfn; - else - extra_pages = 0; - } - - xen_released_pages = xen_return_unused_memory(xen_start_info->nr_pages, - &e820); - extra_pages += xen_released_pages; - - /* - * Clamp the amount of extra memory to a EXTRA_MEM_RATIO - * factor the base size. On non-highmem systems, the base - * size is the full initial memory allocation; on highmem it - * is limited to the max size of lowmem, so that it doesn't - * get completely filled. - * - * In principle there could be a problem in lowmem systems if - * the initial memory is also very large with respect to - * lowmem, but we won't try to deal with that here. - */ - extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), - max_pfn + extra_pages); - - if (extra_limit >= max_pfn) - extra_pages = extra_limit - max_pfn; - else - extra_pages = 0; - - xen_add_extra_mem(extra_pages); - /* * Set P2M for all non-RAM pages and E820 gaps to be identity - * type PFNs. We supply it with the non-sanitized version - * of the E820. + * type PFNs. */ - identity_pages = xen_set_identity(map_raw, memmap.nr_entries); + identity_pages = xen_set_identity(e820.map, e820.nr_map); printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); return "Xen"; } -- cgit v1.2.3 From f3f436e33b925ead21e3f9b47b1e2aed965511d9 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 28 Sep 2011 17:46:36 +0100 Subject: xen: release all pages within 1-1 p2m mappings In xen_memory_setup() all reserved regions and gaps are set to an identity (1-1) p2m mapping. If an available page has a PFN within one of these 1-1 mappings it will become inaccessible (as it MFN is lost) so release them before setting up the mapping. This can make an additional 256 MiB or more of RAM available (depending on the size of the reserved regions in the memory map) if the initial pages overlap with reserved regions. The 1:1 p2m mappings are also extended to cover partial pages. This fixes an issue with (for example) systems with a BIOS that puts the DMI tables in a reserved region that begins on a non-page boundary. Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/setup.c | 117 ++++++++++++++++++--------------------------------- 1 file changed, 42 insertions(+), 75 deletions(-) (limited to 'arch/x86/xen/setup.c') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 2ad2fd53bd32..38d0af4fefec 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -83,25 +83,18 @@ static void __init xen_add_extra_mem(u64 start, u64 size) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } -static unsigned long __init xen_release_chunk(phys_addr_t start_addr, - phys_addr_t end_addr) +static unsigned long __init xen_release_chunk(unsigned long start, + unsigned long end) { struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; - unsigned long start, end; unsigned long len = 0; unsigned long pfn; int ret; - start = PFN_UP(start_addr); - end = PFN_DOWN(end_addr); - - if (end <= start) - return 0; - for(pfn = start; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); @@ -126,72 +119,52 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, return len; } -static unsigned long __init xen_return_unused_memory( - unsigned long max_pfn, const struct e820entry *map, int nr_map) +static unsigned long __init xen_set_identity_and_release( + const struct e820entry *list, size_t map_size, unsigned long nr_pages) { - phys_addr_t max_addr = PFN_PHYS(max_pfn); - phys_addr_t last_end = ISA_END_ADDRESS; + phys_addr_t start = 0; unsigned long released = 0; - int i; - - /* Free any unused memory above the low 1Mbyte. */ - for (i = 0; i < nr_map && last_end < max_addr; i++) { - phys_addr_t end = map[i].addr; - end = min(max_addr, end); - - if (last_end < end) - released += xen_release_chunk(last_end, end); - last_end = max(last_end, map[i].addr + map[i].size); - } - - if (last_end < max_addr) - released += xen_release_chunk(last_end, max_addr); - - printk(KERN_INFO "released %lu pages of unused memory\n", released); - return released; -} - -static unsigned long __init xen_set_identity(const struct e820entry *list, - ssize_t map_size) -{ - phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS; - phys_addr_t start_pci = last; - const struct e820entry *entry; unsigned long identity = 0; + const struct e820entry *entry; int i; + /* + * Combine non-RAM regions and gaps until a RAM region (or the + * end of the map) is reached, then set the 1:1 map and + * release the pages (if available) in those non-RAM regions. + * + * The combined non-RAM regions are rounded to a whole number + * of pages so any partial pages are accessible via the 1:1 + * mapping. This is needed for some BIOSes that put (for + * example) the DMI tables in a reserved region that begins on + * a non-page boundary. + */ for (i = 0, entry = list; i < map_size; i++, entry++) { - phys_addr_t start = entry->addr; - phys_addr_t end = start + entry->size; + phys_addr_t end = entry->addr + entry->size; - if (start < last) - start = last; + if (entry->type == E820_RAM || i == map_size - 1) { + unsigned long start_pfn = PFN_DOWN(start); + unsigned long end_pfn = PFN_UP(end); - if (end <= start) - continue; + if (entry->type == E820_RAM) + end_pfn = PFN_UP(entry->addr); - /* Skip over the 1MB region. */ - if (last > end) - continue; + if (start_pfn < end_pfn) { + if (start_pfn < nr_pages) + released += xen_release_chunk( + start_pfn, min(end_pfn, nr_pages)); - if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) { - if (start > start_pci) identity += set_phys_range_identity( - PFN_UP(start_pci), PFN_DOWN(start)); - - /* Without saving 'last' we would gooble RAM too - * at the end of the loop. */ - last = end; - start_pci = end; - continue; + start_pfn, end_pfn); + } + start = end; } - start_pci = min(start, start_pci); - last = end; } - if (last > start_pci) - identity += set_phys_range_identity( - PFN_UP(start_pci), PFN_DOWN(last)); - return identity; + + printk(KERN_INFO "Released %lu pages of unused memory\n", released); + printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); + + return released; } static unsigned long __init xen_get_max_pages(void) @@ -232,7 +205,6 @@ char * __init xen_memory_setup(void) struct xen_memory_map memmap; unsigned long max_pages; unsigned long extra_pages = 0; - unsigned long identity_pages = 0; int i; int op; @@ -265,8 +237,13 @@ char * __init xen_memory_setup(void) if (max_pages > max_pfn) extra_pages += max_pages - max_pfn; - xen_released_pages = xen_return_unused_memory(max_pfn, map, - memmap.nr_entries); + /* + * Set P2M for all non-RAM pages and E820 gaps to be identity + * type PFNs. Any RAM pages that would be made inaccesible by + * this are first released. + */ + xen_released_pages = xen_set_identity_and_release( + map, memmap.nr_entries, max_pfn); extra_pages += xen_released_pages; /* @@ -312,10 +289,6 @@ char * __init xen_memory_setup(void) * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. - * - * In Dom0, the host E820 information can leave gaps in the - * ISA range, which would cause us to release those pages. To - * avoid this, we unconditionally reserve them here. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); @@ -332,12 +305,6 @@ char * __init xen_memory_setup(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); - /* - * Set P2M for all non-RAM pages and E820 gaps to be identity - * type PFNs. - */ - identity_pages = xen_set_identity(e820.map, e820.nr_map); - printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); return "Xen"; } -- cgit v1.2.3