summaryrefslogtreecommitdiff
blob: 198182d2aae3f3c73cc26e551fa852ccd2523246 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
# HG changeset patch
# User "Eduardo Habkost <ehabkost@redhat.com>"
# Date 1190317568 10800
# Node ID ab958de6e67d1b4f6c321cd46d2793d2409b91fd
# Parent  8792a16b6dd531cad0ed28d9bef86e3e597ea8db
x86_64: Fix sleazy-fpu under Xen

- Make it reset fpu_counter when needed (like __unlazy_fpu() does)
- Make it call clts() before restoring the fpu state during task switch
  (clts() will still not be called during the device_not_available trap,
  because it is not needed under Xen)

diff -r 8792a16b6dd5 -r ab958de6e67d arch/x86_64/kernel/entry-xen.S
--- a/arch/x86_64/kernel/entry-xen.S	Thu Sep 20 15:30:16 2007 -0300
+++ b/arch/x86_64/kernel/entry-xen.S	Thu Sep 20 16:46:08 2007 -0300
@@ -1071,7 +1071,9 @@ END(simd_coprocessor_error)
 END(simd_coprocessor_error)
 
 ENTRY(device_not_available)
-	zeroentry math_state_restore
+	# 'clts' is done by Xen during virtual trap, so we can call
+	# __math_state_restore instead of math_state_restore
+	zeroentry __math_state_restore
 END(device_not_available)
 
 	/* runs on exception stack */
diff -r 8792a16b6dd5 -r ab958de6e67d arch/x86_64/kernel/process-xen.c
--- a/arch/x86_64/kernel/process-xen.c	Thu Sep 20 15:30:16 2007 -0300
+++ b/arch/x86_64/kernel/process-xen.c	Thu Sep 20 16:46:08 2007 -0300
@@ -637,7 +637,8 @@ __switch_to(struct task_struct *prev_p, 
 	if (prev_p->thread_info->status & TS_USEDFPU) {
 		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
 		HYPERVISOR_fpu_taskswitch(1);
-	}
+	} else
+		prev_p->fpu_counter = 0;
 
 	/* 
 	 * Switch the PDA context.
diff -r 8792a16b6dd5 -r ab958de6e67d arch/x86_64/kernel/traps-xen.c
--- a/arch/x86_64/kernel/traps-xen.c	Thu Sep 20 15:30:16 2007 -0300
+++ b/arch/x86_64/kernel/traps-xen.c	Thu Sep 20 16:46:08 2007 -0300
@@ -1064,16 +1064,21 @@ asmlinkage void __attribute__((weak)) mc
  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  * Don't touch unless you *really* know how it works.
  */
-asmlinkage void math_state_restore(void)
+asmlinkage void __math_state_restore(void)
 {
 	struct task_struct *me = current;
-        /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
 
 	if (!used_math())
 		init_fpu(me);
 	restore_fpu_checking(&me->thread.i387.fxsave);
 	task_thread_info(me)->status |= TS_USEDFPU;
 	me->fpu_counter++;
+}
+
+asmlinkage void math_state_restore(void)
+{
+	clts();			/* Allow maths ops (or we recurse) */
+	__math_state_restore();
 }
 
 
diff -r 8792a16b6dd5 -r ab958de6e67d arch/x86_64/kernel/traps.c
--- a/arch/x86_64/kernel/traps.c	Thu Sep 20 15:30:16 2007 -0300
+++ b/arch/x86_64/kernel/traps.c	Thu Sep 20 16:46:08 2007 -0300
@@ -1056,16 +1056,21 @@ asmlinkage void __attribute__((weak)) mc
  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  * Don't touch unless you *really* know how it works.
  */
-asmlinkage void math_state_restore(void)
+asmlinkage void __math_state_restore(void)
 {
 	struct task_struct *me = current;
-	clts();			/* Allow maths ops (or we recurse) */
 
 	if (!used_math())
 		init_fpu(me);
 	restore_fpu_checking(&me->thread.i387.fxsave);
 	task_thread_info(me)->status |= TS_USEDFPU;
 	me->fpu_counter++;
+}
+
+asmlinkage void math_state_restore(void)
+{
+	clts();			/* Allow maths ops (or we recurse) */
+	__math_state_restore();
 }
 
 void __init trap_init(void)