[xen-unstable] [Mini-OS] Fix x86 arch_switch_thread

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|

[xen-unstable] [Mini-OS] Fix x86 arch_switch_thread

Xen patchbot-unstable
# HG changeset patch
# User Keir Fraser <[hidden email]>
# Date 1195835008 0
# Node ID d46265d21dc5148721a8cd252e72d870b38136e3
# Parent  2215f4f6f0f225c8b7d377f376ca3fa4d5d6d742
[Mini-OS] Fix x86 arch_switch_thread

Fix x86 arch_switch_thread by making it pure assembly.
There were missing general register clobbers for x86_64, and BP should
theorically be clobbered too, but gcc does not believe that, so the
only simple safe solution is to use pure assembly.

Signed-off-by: Samuel Thibault <[hidden email]>
---
 extras/mini-os/arch/x86/x86_32.S        |   18 ++++++++++++++
 extras/mini-os/arch/x86/x86_64.S        |   20 ++++++++++++++++
 extras/mini-os/include/sched.h          |    1
 extras/mini-os/include/x86/arch_sched.h |   39 +-------------------------------
 4 files changed, 41 insertions(+), 37 deletions(-)

diff -r 2215f4f6f0f2 -r d46265d21dc5 extras/mini-os/arch/x86/x86_32.S
--- a/extras/mini-os/arch/x86/x86_32.S Fri Nov 23 16:23:03 2007 +0000
+++ b/extras/mini-os/arch/x86/x86_32.S Fri Nov 23 16:23:28 2007 +0000
@@ -288,3 +288,21 @@ ENTRY(thread_starter)
     call *%ebx
     call exit_thread
     
+ENTRY(__arch_switch_threads)
+    movl 4(%esp), %ecx /* prev */
+    movl 8(%esp), %edx /* next */
+    pushl %ebp
+    pushl %ebx
+    pushl %esi
+    pushl %edi
+    movl %esp, (%ecx) /* save ESP */
+    movl (%edx), %esp /* restore ESP */
+    movl $1f, 4(%ecx) /* save EIP */
+    pushl 4(%edx) /* restore EIP */
+    ret
+1:
+    popl %edi
+    popl %esi
+    popl %ebx
+    popl %ebp
+    ret
diff -r 2215f4f6f0f2 -r d46265d21dc5 extras/mini-os/arch/x86/x86_64.S
--- a/extras/mini-os/arch/x86/x86_64.S Fri Nov 23 16:23:03 2007 +0000
+++ b/extras/mini-os/arch/x86/x86_64.S Fri Nov 23 16:23:28 2007 +0000
@@ -386,3 +386,23 @@ ENTRY(thread_starter)
         call exit_thread
         
 
+ENTRY(__arch_switch_threads)
+ pushq %rbp
+ pushq %rbx
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rsp, (%rdi) /* save ESP */
+ movq (%rsi), %rsp /* restore ESP */
+ movq $1f, 8(%rdi) /* save EIP */
+ pushq 8(%rsi) /* restore EIP */
+ ret
+1:
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbx
+ popq %rbp
+ ret
diff -r 2215f4f6f0f2 -r d46265d21dc5 extras/mini-os/include/sched.h
--- a/extras/mini-os/include/sched.h Fri Nov 23 16:23:03 2007 +0000
+++ b/extras/mini-os/include/sched.h Fri Nov 23 16:23:28 2007 +0000
@@ -10,6 +10,7 @@ struct thread
     char *name;
     char *stack;
 #if !defined(__ia64__)
+    /* keep in that order */
     unsigned long sp;  /* Stack pointer */
     unsigned long ip;  /* Instruction pointer */
 #else /* !defined(__ia64__) */
diff -r 2215f4f6f0f2 -r d46265d21dc5 extras/mini-os/include/x86/arch_sched.h
--- a/extras/mini-os/include/x86/arch_sched.h Fri Nov 23 16:23:03 2007 +0000
+++ b/extras/mini-os/include/x86/arch_sched.h Fri Nov 23 16:23:28 2007 +0000
@@ -15,44 +15,9 @@ static inline struct thread* get_current
     return *current;
 }
 
-#ifdef __i386__
-#define arch_switch_threads(prev, next) do {                            \
-    unsigned long esi,edi;                                              \
-    __asm__ __volatile__("pushfl\n\t"                                   \
-                         "pushl %%ebp\n\t"                              \
-                         "movl %%esp,%0\n\t"         /* save ESP */     \
-                         "movl %4,%%esp\n\t"        /* restore ESP */   \
-                         "movl $1f,%1\n\t"          /* save EIP */      \
-                         "pushl %5\n\t"             /* restore EIP */   \
-                         "ret\n\t"                                      \
-                         "1:\t"                                         \
-                         "popl %%ebp\n\t"                               \
-                         "popfl"                                        \
-                         :"=m" (prev->sp),"=m" (prev->ip),            \
-                          "=S" (esi),"=D" (edi)             \
-                         :"m" (next->sp),"m" (next->ip),              \
-                          "2" (prev), "d" (next));                      \
-} while (0)
-#elif __x86_64__
-#define arch_switch_threads(prev, next) do {                                 \
-    unsigned long rsi,rdi;                                              \
-    __asm__ __volatile__("pushfq\n\t"                                   \
-                         "pushq %%rbp\n\t"                              \
-                         "movq %%rsp,%0\n\t"         /* save RSP */     \
-                         "movq %4,%%rsp\n\t"        /* restore RSP */   \
-                         "movq $1f,%1\n\t"          /* save RIP */      \
-                         "pushq %5\n\t"             /* restore RIP */   \
-                         "ret\n\t"                                      \
-                         "1:\t"                                         \
-                         "popq %%rbp\n\t"                               \
-                         "popfq"                                        \
-                         :"=m" (prev->sp),"=m" (prev->ip),            \
-                          "=S" (rsi),"=D" (rdi)             \
-                         :"m" (next->sp),"m" (next->ip),              \
-                          "2" (prev), "d" (next));                      \
-} while (0)
-#endif
+extern void __arch_switch_threads(unsigned long *prevctx, unsigned long *nextctx);
 
+#define arch_switch_threads(prev,next) __arch_switch_threads(&(prev)->sp, &(next)->sp)
 
 
           

_______________________________________________
Xen-changelog mailing list
[hidden email]
http://lists.xensource.com/xen-changelog