summaryrefslogtreecommitdiff
path: root/src/arch/arm64
diff options
context:
space:
mode:
authorJimmy Huang <jimmy.huang@mediatek.com>2015-08-13 10:48:49 +0800
committerPatrick Georgi <pgeorgi@google.com>2015-08-28 06:43:53 +0000
commit46502c9e374c30018773f09d0be78577cbcf3d0e (patch)
tree462f82016c68807b4f205f85dfa776ded77c64da /src/arch/arm64
parent4cd0d2f5693093f442a51b4ebb97d30ce4403abd (diff)
downloadcoreboot-46502c9e374c30018773f09d0be78577cbcf3d0e.tar.xz
arm64: declare do_dcsw_op as function
do_dcsw_op is coded as a label, it's possible that linker will place do_dcsw_op on unaligned address. To avoid this situation, we declare do_dcsw_op as a function. Also explicitly set the 2nd argument of ENTRY_WITH_ALIGN(name, bits) to 2. do_dcsw_op: cbz x3, exit c103d: b40003e3 cbz x3, c10b9 <exit> mov x10, xzr c1041: aa1f03ea mov x10, xzr adr x14, dcsw_loop_table // compute inner loop address BRANCH=none BUG=none TEST=build and check do_dcsw_op in elf file Change-Id: Ieb5f4188d6126ac9f6ddb0bfcc67452f79de94ad Signed-off-by: Patrick Georgi <patrick@georgi-clan.de> Original-Commit-Id: 4ee26b76089fab82cf4fb9b21c9f15b29e57b453 Original-Change-Id: Id331e8ecab7ea8782e97c10b13e8810955747a51 Original-Signed-off-by: Jimmy Huang <jimmy.huang@mediatek.com> Original-Reviewed-on: https://chromium-review.googlesource.com/293660 Original-Reviewed-by: Julius Werner <jwerner@chromium.org> Original-Commit-Queue: Yidi Lin <yidi.lin@mediatek.com> Original-Tested-by: Yidi Lin <yidi.lin@mediatek.com> Reviewed-on: http://review.coreboot.org/11395 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
Diffstat (limited to 'src/arch/arm64')
-rw-r--r--src/arch/arm64/armv8/cache_helpers.S3
-rw-r--r--src/arch/arm64/include/arch/asm.h2
2 files changed, 3 insertions, 2 deletions
diff --git a/src/arch/arm64/armv8/cache_helpers.S b/src/arch/arm64/armv8/cache_helpers.S
index dc74dad04f..b94bc30781 100644
--- a/src/arch/arm64/armv8/cache_helpers.S
+++ b/src/arch/arm64/armv8/cache_helpers.S
@@ -54,7 +54,7 @@
b do_dcsw_op
.endm
-do_dcsw_op:
+ENTRY(do_dcsw_op)
cbz x3, exit
mov x10, xzr
adr x14, dcsw_loop_table // compute inner loop address
@@ -92,6 +92,7 @@ level_done:
isb
exit:
ret
+ENDPROC(do_dcsw_op)
.macro dcsw_loop _op
loop2_\_op:
diff --git a/src/arch/arm64/include/arch/asm.h b/src/arch/arm64/include/arch/asm.h
index 851f3f94c7..878509ec5a 100644
--- a/src/arch/arm64/include/arch/asm.h
+++ b/src/arch/arm64/include/arch/asm.h
@@ -30,7 +30,7 @@
.align bits; \
name:
-#define ENTRY(name) ENTRY_WITH_ALIGN(name, 0)
+#define ENTRY(name) ENTRY_WITH_ALIGN(name, 2)
#define END(name) \
.size name, .-name