From ebdad7f865358e8153b769150d2918d4d333aad4 Mon Sep 17 00:00:00 2001 From: gikidy Date: Wed, 10 Jun 2009 09:00:21 +0000 Subject: For X64 BASE_LIBRARY_JUMP_BUFFER is 64-bit aligned, not 128-bit aligned, replace movdqa with movdqu. git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@8508 6f19259b-4bc3-4df7-8a09-765794883524 --- MdePkg/Library/BaseLib/X64/LongJump.S | 20 ++++++++++---------- MdePkg/Library/BaseLib/X64/SetJump.S | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) (limited to 'MdePkg/Library') diff --git a/MdePkg/Library/BaseLib/X64/LongJump.S b/MdePkg/Library/BaseLib/X64/LongJump.S index a0fe61f8e7..1c82639e85 100644 --- a/MdePkg/Library/BaseLib/X64/LongJump.S +++ b/MdePkg/Library/BaseLib/X64/LongJump.S @@ -40,15 +40,15 @@ ASM_PFX(InternalLongJump): mov 0x40(%rcx), %r15 # load non-volatile fp registers ldmxcsr 0x50(%rcx) - movdqa 0x58(%rcx), %xmm6 - movdqa 0x68(%rcx), %xmm7 - movdqa 0x78(%rcx), %xmm8 - movdqa 0x88(%rcx), %xmm9 - movdqa 0x98(%rcx), %xmm10 - movdqa 0xA8(%rcx), %xmm11 - movdqa 0xB8(%rcx), %xmm12 - movdqa 0xC8(%rcx), %xmm13 - movdqa 0xD8(%rcx), %xmm14 - movdqa 0xE8(%rcx), %xmm15 + movdqu 0x58(%rcx), %xmm6 + movdqu 0x68(%rcx), %xmm7 + movdqu 0x78(%rcx), %xmm8 + movdqu 0x88(%rcx), %xmm9 + movdqu 0x98(%rcx), %xmm10 + movdqu 0xA8(%rcx), %xmm11 + movdqu 0xB8(%rcx), %xmm12 + movdqu 0xC8(%rcx), %xmm13 + movdqu 0xD8(%rcx), %xmm14 + movdqu 0xE8(%rcx), %xmm15 mov %rdx, %rax # set return value jmp *0x48(%rcx) diff --git a/MdePkg/Library/BaseLib/X64/SetJump.S b/MdePkg/Library/BaseLib/X64/SetJump.S index 3ba5be6f15..008332d5ff 100644 --- a/MdePkg/Library/BaseLib/X64/SetJump.S +++ b/MdePkg/Library/BaseLib/X64/SetJump.S @@ -39,15 +39,15 @@ ASM_PFX(SetJump): mov %rdx,0x48(%rcx) # save non-volatile fp registers stmxcsr 0x50(%rcx) - movdqa %xmm6, 0x58(%rcx) - movdqa %xmm7, 0x68(%rcx) - movdqa %xmm8, 0x78(%rcx) - movdqa %xmm9, 0x88(%rcx) - movdqa %xmm10, 0x98(%rcx) - movdqa %xmm11, 0xA8(%rcx) - movdqa %xmm12, 0xB8(%rcx) - movdqa %xmm13, 0xC8(%rcx) - movdqa %xmm14, 0xD8(%rcx) - movdqa %xmm15, 0xE8(%rcx) + movdqu %xmm6, 0x58(%rcx) + movdqu %xmm7, 0x68(%rcx) + movdqu %xmm8, 0x78(%rcx) + movdqu %xmm9, 0x88(%rcx) + movdqu %xmm10, 0x98(%rcx) + movdqu %xmm11, 0xA8(%rcx) + movdqu %xmm12, 0xB8(%rcx) + movdqu %xmm13, 0xC8(%rcx) + movdqu %xmm14, 0xD8(%rcx) + movdqu %xmm15, 0xE8(%rcx) xor %rax,%rax jmpq *%rdx -- cgit v1.2.3