summaryrefslogtreecommitdiff
path: root/ArmPkg/Library/ArmLib/ArmV7
diff options
context:
space:
mode:
authorandrewfish <andrewfish@6f19259b-4bc3-4df7-8a09-765794883524>2010-03-03 04:14:16 +0000
committerandrewfish <andrewfish@6f19259b-4bc3-4df7-8a09-765794883524>2010-03-03 04:14:16 +0000
commit548af3e7805cf6f1cdc75b54f685d43d9bb58d32 (patch)
tree045b3e0bafdbe0ac92fbc22be7803fe28b528a65 /ArmPkg/Library/ArmLib/ArmV7
parentc029854f209612caafa6dad6b0c8e6e639bc41e5 (diff)
downloadedk2-platforms-548af3e7805cf6f1cdc75b54f685d43d9bb58d32.tar.xz
Syncing GCC and ARMASM assembly. Made chunks of the ARMASM lowercase to make the diff easier.
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@10163 6f19259b-4bc3-4df7-8a09-765794883524
Diffstat (limited to 'ArmPkg/Library/ArmLib/ArmV7')
-rw-r--r--ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S18
-rw-r--r--ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.asm203
2 files changed, 111 insertions, 110 deletions
diff --git a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S
index b4ec9b5122..43f9e8a7e8 100644
--- a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S
+++ b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S
@@ -12,8 +12,6 @@
#
#------------------------------------------------------------------------------
-.text
-.align 2
.globl ASM_PFX(ArmInvalidateInstructionCache)
.globl ASM_PFX(ArmInvalidateDataCacheEntryByMVA)
.globl ASM_PFX(ArmCleanDataCacheEntryByMVA)
@@ -29,8 +27,6 @@
.globl ASM_PFX(ArmDisableDataCache)
.globl ASM_PFX(ArmEnableInstructionCache)
.globl ASM_PFX(ArmDisableInstructionCache)
-.globl ASM_PFX(ArmEnableExtendPTConfig)
-.globl ASM_PFX(ArmDisableExtendPTConfig)
.globl ASM_PFX(ArmEnableBranchPrediction)
.globl ASM_PFX(ArmDisableBranchPrediction)
.globl ASM_PFX(ArmV7AllDataCachesOperation)
@@ -38,6 +34,8 @@
.globl ASM_PFX(ArmDataSyncronizationBarrier)
.globl ASM_PFX(ArmInstructionSynchronizationBarrier)
+.text
+.align 2
.set DC_ON, (0x1<<2)
.set IC_ON, (0x1<<12)
@@ -104,11 +102,14 @@ ASM_PFX(ArmEnableMmu):
mrc p15,0,R0,c1,c0,0
orr R0,R0,#1
mcr p15,0,R0,c1,c0,0
+ dsb
+ isb
bx LR
ASM_PFX(ArmMmuEnabled):
mrc p15,0,R0,c1,c0,0
and R0,R0,#1
+ isb
bx LR
@@ -118,7 +119,6 @@ ASM_PFX(ArmDisableMmu):
mrc p15,0,R0,c1,c0,0
bic R0,R0,#1
mcr p15,0,R0,c1,c0,0 @Disable MMU
- mov R0,#0
dsb
isb
bx LR
@@ -192,14 +192,16 @@ Loop1:
cmp R12, #2
blt L_Skip @ no cache or only instruction cache at this level
mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction
- isb @ ISB to sync the change to the CacheSizeID reg
- mcr p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)
+ isb @ isb to sync the change to the CacheSizeID reg
+ mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)
and R2, R12, #0x7 @ extract the line length field
- and R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)
+ add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)
+@ ldr R4, =0x3FF
mov R4, #0x400
sub R4, R4, #1
ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)
clz R5, R4 @ R5 is the bit position of the way size increment
+@ ldr R7, =0x00007FFF
mov R7, #0x00008000
sub R7, R7, #1
ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)
diff --git a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.asm b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.asm
index e2676d4995..72f4ec2cf5 100644
--- a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.asm
+++ b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.asm
@@ -34,85 +34,83 @@
EXPORT ArmDataSyncronizationBarrier
EXPORT ArmInstructionSynchronizationBarrier
+ AREA ArmCacheLib, CODE, READONLY
+ PRESERVE8
DC_ON EQU ( 0x1:SHL:2 )
IC_ON EQU ( 0x1:SHL:12 )
- AREA ArmCacheLib, CODE, READONLY
- PRESERVE8
-
ArmInvalidateDataCacheEntryByMVA
- MCR p15, 0, r0, c7, c6, 1 ; invalidate single data cache line
- DSB
- ISB
- BX lr
+ mcr p15, 0, r0, c7, c6, 1 ; invalidate single data cache line
+ dsb
+ isb
+ bx lr
ArmCleanDataCacheEntryByMVA
- MCR p15, 0, r0, c7, c10, 1 ; clean single data cache line
- DSB
- ISB
- BX lr
+ mcr p15, 0, r0, c7, c10, 1 ; clean single data cache line
+ dsb
+ isb
+ bx lr
ArmCleanInvalidateDataCacheEntryByMVA
- MCR p15, 0, r0, c7, c14, 1 ; clean and invalidate single data cache line
- DSB
- ISB
- BX lr
+ mcr p15, 0, r0, c7, c14, 1 ; clean and invalidate single data cache line
+ dsb
+ isb
+ bx lr
ArmInvalidateDataCacheEntryBySetWay
mcr p15, 0, r0, c7, c6, 2 ; Invalidate this line
- DSB
- ISB
+ dsb
+ isb
bx lr
ArmCleanInvalidateDataCacheEntryBySetWay
mcr p15, 0, r0, c7, c14, 2 ; Clean and Invalidate this line
- DSB
- ISB
+ dsb
+ isb
bx lr
ArmCleanDataCacheEntryBySetWay
mcr p15, 0, r0, c7, c10, 2 ; Clean this line
- DSB
- ISB
+ dsb
+ isb
bx lr
ArmDrainWriteBuffer
mcr p15, 0, r0, c7, c10, 4 ; Drain write buffer for sync
- DSB
- ISB
+ dsb
+ isb
bx lr
ArmInvalidateInstructionCache
- MOV R0,#0
- MCR p15,0,R0,c7,c5,0 ;Invalidate entire instruction cache
- MOV R0,#0
- MCR p15,0,R0,c7,c5,4 ;Instruction synchronization barrier
- DSB
- ISB
- BX LR
+ mov R0,#0
+ mcr p15,0,R0,c7,c5,0 ;Invalidate entire instruction cache
+ mov R0,#0
+ dsb
+ isb
+ bx LR
ArmEnableMmu
mrc p15,0,R0,c1,c0,0
orr R0,R0,#1
mcr p15,0,R0,c1,c0,0
- DSB
- ISB
+ dsb
+ isb
bx LR
ArmMmuEnabled
mrc p15,0,R0,c1,c0,0
and R0,R0,#1
- ISB
+ isb
bx LR
ArmDisableMmu
@@ -121,117 +119,118 @@ ArmDisableMmu
mrc p15,0,R0,c1,c0,0
bic R0,R0,#1
mcr p15,0,R0,c1,c0,0 ;Disable MMU
- DSB
- ISB
+ dsb
+ isb
bx LR
ArmEnableDataCache
- LDR R1,=DC_ON
- MRC p15,0,R0,c1,c0,0 ;Read control register configuration data
- ORR R0,R0,R1 ;Set C bit
- MCR p15,0,r0,c1,c0,0 ;Write control register configuration data
- DSB
- ISB
- BX LR
+ ldr R1,=DC_ON
+ mrc p15,0,R0,c1,c0,0 ;Read control register configuration data
+ orr R0,R0,R1 ;Set C bit
+ mcr p15,0,r0,c1,c0,0 ;Write control register configuration data
+ dsb
+ isb
+ bx LR
ArmDisableDataCache
- LDR R1,=DC_ON
- MRC p15,0,R0,c1,c0,0 ;Read control register configuration data
- BIC R0,R0,R1 ;Clear C bit
- MCR p15,0,r0,c1,c0,0 ;Write control register configuration data
- ISB
- BX LR
+ ldr R1,=DC_ON
+ mrc p15,0,R0,c1,c0,0 ;Read control register configuration data
+ bic R0,R0,R1 ;Clear C bit
+ mcr p15,0,r0,c1,c0,0 ;Write control register configuration data
+ isb
+ bx LR
ArmEnableInstructionCache
- LDR R1,=IC_ON
- MRC p15,0,R0,c1,c0,0 ;Read control register configuration data
- ORR R0,R0,R1 ;Set I bit
- MCR p15,0,r0,c1,c0,0 ;Write control register configuration data
- ISB
- BX LR
+ ldr R1,=IC_ON
+ mrc p15,0,R0,c1,c0,0 ;Read control register configuration data
+ orr R0,R0,R1 ;Set I bit
+ mcr p15,0,r0,c1,c0,0 ;Write control register configuration data
+ dsb
+ isb
+ bx LR
ArmDisableInstructionCache
- LDR R1,=IC_ON
- MRC p15,0,R0,c1,c0,0 ;Read control register configuration data
+ ldr R1,=IC_ON
+ mrc p15,0,R0,c1,c0,0 ;Read control register configuration data
BIC R0,R0,R1 ;Clear I bit.
- MCR p15,0,r0,c1,c0,0 ;Write control register configuration data
- ISB
- BX LR
+ mcr p15,0,r0,c1,c0,0 ;Write control register configuration data
+ isb
+ bx LR
ArmEnableBranchPrediction
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #0x00000800
mcr p15, 0, r0, c1, c0, 0
- ISB
+ isb
bx LR
ArmDisableBranchPrediction
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #0x00000800
mcr p15, 0, r0, c1, c0, 0
- ISB
+ isb
bx LR
ArmV7AllDataCachesOperation
- STMFD SP!,{r4-r12, LR}
- MOV R1, R0 ; Save Function call in R1
- MRC p15, 1, R6, c0, c0, 1 ; Read CLIDR
- ANDS R3, R6, #&7000000 ; Mask out all but Level of Coherency (LoC)
- MOV R3, R3, LSR #23 ; Cache level value (naturally aligned)
- BEQ Finished
- MOV R10, #0
+ stmfd SP!,{r4-r12, LR}
+ mov R1, R0 ; Save Function call in R1
+ mrc p15, 1, R6, c0, c0, 1 ; Read CLIDR
+ ands R3, R6, #&7000000 ; Mask out all but Level of Coherency (LoC)
+ mov R3, R3, LSR #23 ; Cache level value (naturally aligned)
+ beq Finished
+ mov R10, #0
Loop1
- ADD R2, R10, R10, LSR #1 ; Work out 3xcachelevel
- MOV R12, R6, LSR R2 ; bottom 3 bits are the Cache type for this level
- AND R12, R12, #7 ; get those 3 bits alone
- CMP R12, #2
- BLT Skip ; no cache or only instruction cache at this level
- MCR p15, 2, R10, c0, c0, 0 ; write the Cache Size selection register (CSSELR) // OR in 1 for Instruction
- ISB ; ISB to sync the change to the CacheSizeID reg
- MRC p15, 1, R12, c0, c0, 0 ; reads current Cache Size ID register (CCSIDR)
- AND R2, R12, #&7 ; extract the line length field
- ADD R2, R2, #4 ; add 4 for the line length offset (log2 16 bytes)
- LDR R4, =0x3FF
- ANDS R4, R4, R12, LSR #3 ; R4 is the max number on the way size (right aligned)
- CLZ R5, R4 ; R5 is the bit position of the way size increment
- LDR R7, =0x00007FFF
- ANDS R7, R7, R12, LSR #13 ; R7 is the max number of the index size (right aligned)
+ add R2, R10, R10, LSR #1 ; Work out 3xcachelevel
+ mov R12, R6, LSR R2 ; bottom 3 bits are the Cache type for this level
+ and R12, R12, #7 ; get those 3 bits alone
+ cmp R12, #2
+ blt Skip ; no cache or only instruction cache at this level
+ mcr p15, 2, R10, c0, c0, 0 ; write the Cache Size selection register (CSSELR) // OR in 1 for Instruction
+ isb ; isb to sync the change to the CacheSizeID reg
+ mrc p15, 1, R12, c0, c0, 0 ; reads current Cache Size ID register (CCSIDR)
+ and R2, R12, #&7 ; extract the line length field
+ add R2, R2, #4 ; add 4 for the line length offset (log2 16 bytes)
+ ldr R4, =0x3FF
+ ands R4, R4, R12, LSR #3 ; R4 is the max number on the way size (right aligned)
+ clz R5, R4 ; R5 is the bit position of the way size increment
+ ldr R7, =0x00007FFF
+ ands R7, R7, R12, LSR #13 ; R7 is the max number of the index size (right aligned)
Loop2
- MOV R9, R4 ; R9 working copy of the max way size (right aligned)
+ mov R9, R4 ; R9 working copy of the max way size (right aligned)
Loop3
- ORR R0, R10, R9, LSL R5 ; factor in the way number and cache number into R11
- ORR R0, R0, R7, LSL R2 ; factor in the index number
+ orr R0, R10, R9, LSL R5 ; factor in the way number and cache number into R11
+ orr R0, R0, R7, LSL R2 ; factor in the index number
- BLX R1
+ blx R1
- SUBS R9, R9, #1 ; decrement the way number
- BGE Loop3
- SUBS R7, R7, #1 ; decrement the index
- BGE Loop2
+ subs R9, R9, #1 ; decrement the way number
+ bge Loop3
+ subs R7, R7, #1 ; decrement the index
+ bge Loop2
Skip
- ADD R10, R10, #2 ; increment the cache number
- CMP R3, R10
- BGT Loop1
+ add R10, R10, #2 ; increment the cache number
+ cmp R3, R10
+ bgt Loop1
Finished
- LDMFD SP!, {r4-r12, lr}
- BX LR
+ ldmfd SP!, {r4-r12, lr}
+ bx LR
ArmDataMemoryBarrier
- DMB
- BX LR
+ dmb
+ bx LR
ArmDataSyncronizationBarrier
- DSB
- BX LR
+ dsb
+ bx LR
ArmInstructionSynchronizationBarrier
- ISB
- BX LR
+ isb
+ bx LR
END