summaryrefslogtreecommitdiff
blob: 305410048b7da5b5dc6fed182babd57bffac7384 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
Subject: Add fallback when XENMEM_exchange fails to replace contiguous region
From: jbeulich@novell.com
Patch-mainline: obsolete
References: 181869

This avoids losing precious special memory in places where any memory can be
used.

Index: 10.3-2007-10-22/arch/i386/mm/hypervisor.c
===================================================================
--- 10.3-2007-10-22.orig/arch/i386/mm/hypervisor.c	2007-10-22 13:49:28.000000000 +0200
+++ 10.3-2007-10-22/arch/i386/mm/hypervisor.c	2007-10-22 14:00:22.000000000 +0200
@@ -41,6 +41,7 @@
 #include <xen/interface/memory.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
+#include <linux/highmem.h>
 #include <asm/tlbflush.h>
 
 #ifdef CONFIG_X86_64
@@ -442,6 +443,83 @@ void xen_destroy_contiguous_region(unsig
 		BUG();
 
 	balloon_unlock(flags);
+
+	if (unlikely(!success)) {
+		/* Try hard to get the special memory back to Xen. */
+		exchange.in.extent_order = 0;
+		set_xen_guest_handle(exchange.in.extent_start, &in_frame);
+
+		for (i = 0; i < (1UL<<order); i++) {
+			struct page *page = alloc_page(__GFP_HIGHMEM);
+			unsigned long pfn;
+			mmu_update_t mmu;
+			unsigned int j = 0;
+
+			if (!page) {
+				printk(KERN_WARNING "Xen and kernel out of memory "
+				       "while trying to release an order %u "
+				       "contiguous region\n", order);
+				break;
+			}
+			pfn = page_to_pfn(page);
+
+			balloon_lock(flags);
+
+			if (!PageHighMem(page)) {
+				void *v = __va(pfn << PAGE_SHIFT);
+
+				scrub_pages(v, 1);
+				MULTI_update_va_mapping(cr_mcl + j, (unsigned long)v,
+							__pte_ma(0), UVMF_INVLPG|UVMF_ALL);
+				++j;
+			}
+#ifdef CONFIG_XEN_SCRUB_PAGES
+			else {
+				scrub_pages(kmap(page), 1);
+				kunmap(page);
+				kmap_flush_unused();
+			}
+#endif
+
+			frame = pfn_to_mfn(pfn);
+			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+
+			MULTI_update_va_mapping(cr_mcl + j, vstart,
+						pfn_pte_ma(frame, PAGE_KERNEL),
+						UVMF_INVLPG|UVMF_ALL);
+			++j;
+
+			pfn = __pa(vstart) >> PAGE_SHIFT;
+			set_phys_to_machine(pfn, frame);
+			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+				mmu.ptr = ((uint64_t)frame << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+				mmu.val = pfn;
+				cr_mcl[j].op = __HYPERVISOR_mmu_update;
+				cr_mcl[j].args[0] = (unsigned long)&mmu;
+				cr_mcl[j].args[1] = 1;
+				cr_mcl[j].args[2] = 0;
+				cr_mcl[j].args[3] = DOMID_SELF;
+				++j;
+			}
+
+			cr_mcl[j].op = __HYPERVISOR_memory_op;
+			cr_mcl[j].args[0] = XENMEM_decrease_reservation;
+			cr_mcl[j].args[1] = (unsigned long)&exchange.in;
+
+			if (HYPERVISOR_multicall(cr_mcl, j + 1))
+				BUG();
+			BUG_ON(cr_mcl[j].result != 1);
+			while (j--)
+				BUG_ON(cr_mcl[j].result != 0);
+
+			balloon_unlock(flags);
+
+			free_empty_pages(&page, 1);
+
+			in_frame++;
+			vstart += PAGE_SIZE;
+		}
+	}
 }
 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 
Index: 10.3-2007-10-22/drivers/xen/balloon/balloon.c
===================================================================
--- 10.3-2007-10-22.orig/drivers/xen/balloon/balloon.c	2007-10-22 14:00:20.000000000 +0200
+++ 10.3-2007-10-22/drivers/xen/balloon/balloon.c	2007-10-22 14:00:22.000000000 +0200
@@ -680,7 +680,7 @@ struct page **alloc_empty_pages_and_page
 	goto out;
 }
 
-void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
+static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec)
 {
 	unsigned long flags;
 	int i;
@@ -695,11 +695,24 @@ void free_empty_pages_and_pagevec(struct
 	}
 	balloon_unlock(flags);
 
-	kfree(pagevec);
+	if (free_vec)
+		kfree(pagevec);
+	else
+		totalram_pages = bs.current_pages -= nr_pages;
 
 	schedule_work(&balloon_worker);
 }
 
+void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
+{
+	_free_empty_pages_and_pagevec(pagevec, nr_pages, 1);
+}
+
+void free_empty_pages(struct page **pagevec, int nr_pages)
+{
+	_free_empty_pages_and_pagevec(pagevec, nr_pages, 0);
+}
+
 void balloon_release_driver_page(struct page *page)
 {
 	unsigned long flags;
Index: 10.3-2007-10-22/include/xen/balloon.h
===================================================================
--- 10.3-2007-10-22.orig/include/xen/balloon.h	2007-10-22 13:48:11.000000000 +0200
+++ 10.3-2007-10-22/include/xen/balloon.h	2007-10-22 14:00:22.000000000 +0200
@@ -44,6 +44,10 @@ void balloon_update_driver_allowance(lon
 struct page **alloc_empty_pages_and_pagevec(int nr_pages);
 void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
 
+/* Free an empty page range (not allocated through
+   alloc_empty_pages_and_pagevec), adding to the balloon. */
+void free_empty_pages(struct page **pagevec, int nr_pages);
+
 void balloon_release_driver_page(struct page *page);
 
 /*