diff options
author | Patrick Georgi <pgeorgi@chromium.org> | 2016-01-22 12:26:52 +0100 |
---|---|---|
committer | Patrick Georgi <pgeorgi@google.com> | 2016-01-23 17:01:44 +0100 |
commit | 0302b060b6e4751e4968ce46bf5cdaddfa15911b (patch) | |
tree | baf402ff7ef1c5fd4c5f9da802a5798db16704dd | |
parent | c7b2b7c67dc8afb52c3dc8e9297e5ed81fa22674 (diff) | |
download | coreboot-0302b060b6e4751e4968ce46bf5cdaddfa15911b.tar.xz |
arch/x86: remove .intel_syntax
Replace with the more familiar AT&T syntax.
Tested by sha1sum(1)ing the object files, and checking the objdump that
the code in question was actually compiled.
Change-Id: Ie85b8ee5dad1794864c18683427e32f055745221
Signed-off-by: Patrick Georgi <pgeorgi@chromium.org>
Reviewed-on: https://review.coreboot.org/13132
Tested-by: build bot (Jenkins)
Reviewed-by: Martin Roth <martinroth@google.com>
Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
-rw-r--r-- | src/arch/x86/boot.c | 53 | ||||
-rw-r--r-- | src/arch/x86/c_start.S | 19 | ||||
-rw-r--r-- | src/arch/x86/wakeup.S | 30 |
3 files changed, 47 insertions, 55 deletions
diff --git a/src/arch/x86/boot.c b/src/arch/x86/boot.c index 71b79fe12a..b23e322d6c 100644 --- a/src/arch/x86/boot.c +++ b/src/arch/x86/boot.c @@ -60,43 +60,40 @@ static void jmp_payload(void *entry, unsigned long buffer, unsigned long size) " push %1\n\t" " push %0\n\t" - ".intel_syntax noprefix\n\t" /* use iret to switch to 32-bit code segment */ - " xor rax,rax\n\t" - " mov ax, ss\n\t" - " push rax\n\t" - " mov rax, rsp\n\t" - " add rax, 8\n\t" - " push rax\n\t" + " xor %%rax,%%rax\n\t" + " mov %%ss, %%ax\n\t" + " push %%rax\n\t" + " mov %%rsp, %%rax\n\t" + " add $8, %%rax\n\t" + " push %%rax\n\t" " pushfq\n\t" - " push 0x10\n\t" - " lea rax,[rip+3]\n\t" - " push rax\n\t" + " push $0x10\n\t" + " lea 3(%%rip), %%rax\n\t" + " push %%rax\n\t" " iretq\n\t" ".code32\n\t" /* disable paging */ - " mov eax, cr0\n\t" - " btc eax, 31\n\t" - " mov cr0, eax\n\t" + " mov %%cr0, %%eax\n\t" + " btc $31, %%eax\n\t" + " mov %%eax, %%cr0\n\t" /* disable long mode */ - " mov ecx, 0xC0000080\n\t" + " mov $0xC0000080, %%ecx\n\t" " rdmsr\n\t" - " btc eax, 8\n\t" + " btc $8, %%eax\n\t" " wrmsr\n\t" - " pop eax\n\t" - " add esp, 4\n\t" - " pop ebx\n\t" - " add esp, 4\n\t" - " pop ecx\n\t" - - " add esp, 4\n\t" - " pop edx\n\t" - " add esp, 4\n\t" - " pop esi\n\t" - " add esp, 4\n\t" - - ".att_syntax prefix\n\t" + " pop %%eax\n\t" + " add $4, %%esp\n\t" + " pop %%ebx\n\t" + " add $4, %%esp\n\t" + " pop %%ecx\n\t" + + " add $4, %%esp\n\t" + " pop %%edx\n\t" + " add $4, %%esp\n\t" + " pop %%esi\n\t" + " add $4, %%esp\n\t" #endif /* Save the callee save registers... */ diff --git a/src/arch/x86/c_start.S b/src/arch/x86/c_start.S index 124bfa84bf..65ab0be042 100644 --- a/src/arch/x86/c_start.S +++ b/src/arch/x86/c_start.S @@ -400,29 +400,26 @@ _idt_end: #ifdef __x86_64__ SetCodeSelector: -.intel_syntax noprefix - # save rsp because iret will align it to a 16 byte boundary - mov rdx, rsp + mov %rsp, %rdx # use iret to jump to a 64-bit offset in a new code segment # iret will pop cs:rip, flags, then ss:rsp - mov ax, ss # need to push ss.. - push rax # push ss instuction not valid in x64 mode, so use ax - push rsp + mov %ss, %ax # need to push ss.. + push %rax # push ss instuction not valid in x64 mode, so use ax + push %rsp pushfq - push rcx # cx is code segment selector from caller - mov rax, offset setCodeSelectorLongJump - push rax + push %rcx # cx is code segment selector from caller + mov $setCodeSelectorLongJump, %rax + push %rax # the iret will continue at next instruction, with the new cs value loaded iretq setCodeSelectorLongJump: # restore rsp, it might not have been 16-byte aligned on entry - mov rsp, rdx + mov %rdx, %rsp ret -.att_syntax prefix .previous .code64 diff --git a/src/arch/x86/wakeup.S b/src/arch/x86/wakeup.S index 2c0950e399..aad72c2d61 100644 --- a/src/arch/x86/wakeup.S +++ b/src/arch/x86/wakeup.S @@ -29,32 +29,30 @@ .globl __wakeup __wakeup: #ifdef __x86_64__ - .intel_syntax noprefix - xor rax,rax - mov ax, ss - push rax - mov rax, rsp - add rax, 8 - push rax + xor %rax,%rax + mov %ss, %ax + push %rax + mov %rsp, %rax + add $8, %rax + push %rax pushfq - push 0x10 - lea rax,[rip+3] - push rax + push $0x10 + lea 3(%rip), %rax + push %rax iretq .code32 /* disable paging */ - mov eax, cr0 - btc eax, 31 - mov cr0, eax + mov %cr0, %eax + btc $31, %eax + mov %eax, %cr0 /* disable long mode */ - mov ecx, 0xC0000080 + mov $0xC0000080, %ecx rdmsr - btc eax, 8 + btc $8, %eax wrmsr - .att_syntax prefix #endif /* First prepare the jmp to the resume vector */ mov 0x4(%esp), %eax /* vector */ |