summaryrefslogtreecommitdiff
path: root/ArmPlatformPkg/Sec
diff options
context:
space:
mode:
authorHarry Liebel <Harry.Liebel@arm.com>2013-07-18 19:06:52 +0000
committeroliviermartin <oliviermartin@6f19259b-4bc3-4df7-8a09-765794883524>2013-07-18 19:06:52 +0000
commit1bc8326695a2181cb5934c3dfb01b0a26c4096a0 (patch)
tree759dbcb62f7e0222f5962b9228a72a6d170b215b /ArmPlatformPkg/Sec
parent93deac7e25a9adc43ea314e829ec50502ce5c39e (diff)
downloadedk2-platforms-1bc8326695a2181cb5934c3dfb01b0a26c4096a0.tar.xz
ArmPlatformPkg: Added Aarch64 support
Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Harry Liebel <Harry.Liebel@arm.com> Signed-off-by: Olivier Martin <olivier.martin@arm.com> git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@14489 6f19259b-4bc3-4df7-8a09-765794883524
Diffstat (limited to 'ArmPlatformPkg/Sec')
-rw-r--r--ArmPlatformPkg/Sec/AArch64/Helper.S156
-rw-r--r--ArmPlatformPkg/Sec/AArch64/SecEntryPoint.S146
-rw-r--r--ArmPlatformPkg/Sec/Sec.inf8
3 files changed, 308 insertions, 2 deletions
diff --git a/ArmPlatformPkg/Sec/AArch64/Helper.S b/ArmPlatformPkg/Sec/AArch64/Helper.S
new file mode 100644
index 0000000000..ff46255763
--- /dev/null
+++ b/ArmPlatformPkg/Sec/AArch64/Helper.S
@@ -0,0 +1,156 @@
+#========================================================================================
+# Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http:#opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#=======================================================================================
+
+#include <AsmMacroIoLibV8.h>
+#include <Chipset/AArch64.h>
+
+#start of the code section
+.text
+.align 3
+
+ASM_GLOBAL ASM_PFX(SetupExceptionLevel3)
+ASM_GLOBAL ASM_PFX(SwitchToNSExceptionLevel1)
+ASM_GLOBAL ASM_PFX(enter_monitor_mode)
+ASM_GLOBAL ASM_PFX(return_from_exception)
+ASM_GLOBAL ASM_PFX(copy_cpsr_into_spsr)
+ASM_GLOBAL ASM_PFX(set_non_secure_mode)
+
+ASM_PFX(SetupExceptionLevel3):
+ mrs x0, scr_el3 // Read EL3 Secure Configuration Register
+ orr x0, x0, #1 // EL0 an EL1 cannot access secure memory
+
+ // Send all interrupts to their respective Exception levels for EL3
+ bic x0, x0, #(1 << 1) // IRQ
+ bic x0, x0, #(1 << 2) // FIQ
+ bic x0, x0, #(1 << 3) // Serror and Abort
+ orr x0, x0, #(1 << 8) // Enable HVC
+ orr x0, x0, #(1 << 10) // Make next level down 64Bit. This is EL2 in the case of the Model.
+ // We need a nice way to detect this.
+ msr scr_el3, x0 // Write back our settings
+
+ msr cptr_el3, xzr // Disable copro traps to EL3
+
+ // Check for the primary CPU to avoid a race on the distributor registers.
+ mrs x0, mpidr_el1
+ tst x0, #15
+ b.ne 1f // secondary CPU
+
+ LoadConstantToReg (FixedPcdGet32(PcdGicInterruptInterfaceBase), x1)
+ mov w0, #3 // EnableGrp0 | EnableGrp1
+ str w0, [x1]
+
+1: LoadConstantToReg (FixedPcdGet32(PcdGicDistributorBase), x1)
+ add x1, x1, #0x80
+ mov w0, #~0 // Grp1 interrupts
+ str w0, [x1], #4
+ b.ne 2f // Only local interrupts for secondary CPUs
+ str w0, [x1], #4
+ str w0, [x1], #4
+
+2: LoadConstantToReg (FixedPcdGet32(PcdGicInterruptInterfaceBase), x1)
+ ldr w0, [x1]
+ mov w0, #3 // EnableGrp0 | EnableGrp1
+ str w0, [x1]
+
+ mov w0, #1 << 7 // allow NS access to GICC_PMR
+ str w0, [x1, #4] // GICC_PMR
+
+ ret
+
+// Switch from EL3 to NS-EL1
+ASM_PFX(SwitchToNSExceptionLevel1):
+ // Now setup our EL1. Controlled by EL2 config on Model
+ mrs x0, hcr_el2 // Read EL2 Hypervisor configuration Register
+ orr x0, x0, #(1 << 31) // Set EL1 to be 64bit
+
+ // Send all interrupts to their respective Exception levels for EL2
+ bic x0, x0, #(1 << 3) // Disable virtual FIQ
+ bic x0, x0, #(1 << 4) // Disable virtual IRQ
+ bic x0, x0, #(1 << 5) // Disable virtual SError and Abort
+ msr hcr_el2, x0 // Write back our settings
+
+ msr cptr_el2, xzr // Disable copro traps to EL2
+
+ msr sctlr_el2, xzr
+
+ // Enable architected timer access
+ mrs x0, cnthctl_el2
+ orr x0, x0, #3 // Enable EL1 access to timers
+ msr cnthctl_el2, x0
+
+ mrs x0, cntkctl_el1
+ orr x0, x0, #3 // EL0 access to counters
+ msr cntkctl_el1, x0
+
+ // Set ID regs
+ mrs x0, midr_el1
+ mrs x1, mpidr_el1
+ msr vpidr_el2, x0
+ msr vmpidr_el2, x1
+
+ ret
+
+
+// EL3 on AArch64 is Secure/monitor so this funtion is reduced vs ARMv7
+// we don't need a mode switch, just setup the Arguments and jump.
+// x0: Monitor World EntryPoint
+// x1: MpId
+// x2: SecBootMode
+// x3: Secure Monitor mode stack
+ASM_PFX(enter_monitor_mode):
+ mov x4, x0 // Swap EntryPoint and MpId registers
+ mov x0, x1
+ mov x1, x2
+ mov x2, x3
+ br x4
+
+// Put the address in correct ELR_ELx and do a eret.
+// We may need to do some config before we change to another Mode.
+ASM_PFX(return_from_exception):
+ msr elr_el3, x0
+
+ mrs x7, spsr_el3
+ ands w7, w7, #0xC
+ cmp w7, #0xC // EL3?
+ b.eq 3f
+ bl ASM_PFX(SetupExceptionLevel3)
+ cmp w7, #0x8 // EL2?
+ b.eq 2f
+ cmp w7, #0x4 // EL1?
+ b.eq 1f
+ b dead // We should never get here.
+
+1: bl ASM_PFX(SwitchToNSExceptionLevel1)
+2: // EL2: No more setup required.
+3: // EL3: Not sure why we would do this.
+ eret
+
+// For AArch64 we need to construct the spsr we want from individual bits and pieces.
+ASM_PFX(copy_cpsr_into_spsr):
+ mrs x0, CurrentEl // Get the current exception level we are running at.
+ mrs x1, SPSel // Which Stack are we using
+ orr x0, x0, x1
+ mrs x1, daif // Which interrupts are enabled
+ orr x0, x0, x1
+ msr spsr_el3, x0 // Write to spsr
+ ret
+
+// Get this from platform file.
+ASM_PFX(set_non_secure_mode):
+ msr spsr_el3, x0
+ ret
+
+dead:
+ b dead
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED
diff --git a/ArmPlatformPkg/Sec/AArch64/SecEntryPoint.S b/ArmPlatformPkg/Sec/AArch64/SecEntryPoint.S
new file mode 100644
index 0000000000..e678f4c18c
--- /dev/null
+++ b/ArmPlatformPkg/Sec/AArch64/SecEntryPoint.S
@@ -0,0 +1,146 @@
+//
+// Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+//
+// This program and the accompanying materials
+// are licensed and made available under the terms and conditions of the BSD License
+// which accompanies this distribution. The full text of the license may be found at
+// http://opensource.org/licenses/bsd-license.php
+//
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+//
+//
+
+#include <AutoGen.h>
+#include <AsmMacroIoLibV8.h>
+#include "SecInternal.h"
+
+.text
+.align 3
+
+GCC_ASM_IMPORT(CEntryPoint)
+GCC_ASM_IMPORT(ArmPlatformIsPrimaryCore)
+GCC_ASM_IMPORT(ArmPlatformGetCorePosition)
+GCC_ASM_IMPORT(ArmPlatformSecBootAction)
+GCC_ASM_IMPORT(ArmPlatformSecBootMemoryInit)
+GCC_ASM_IMPORT(ArmDisableInterrupts)
+GCC_ASM_IMPORT(ArmDisableCachesAndMmu)
+GCC_ASM_IMPORT(ArmReadMpidr)
+GCC_ASM_IMPORT(ArmCallWFE)
+GCC_ASM_EXPORT(_ModuleEntryPoint)
+
+StartupAddr: .dword ASM_PFX(CEntryPoint)
+
+ASM_PFX(_ModuleEntryPoint):
+
+// NOTE: We could be booting from EL3, EL2 or EL1. Need to correctly detect
+// and configure the system accordingly. EL2 is default if possible.
+// If we started in EL3 we need to switch and run at EL2.
+// If we are running at EL2 stay in EL2
+// If we are starting at EL1 stay in EL1.
+
+// Sec only runs in EL3. Othewise we jump to PEI without changing anything.
+// If Sec runs we change to EL2 before switching to PEI.
+
+// Which EL are we running at? Every EL needs some level of setup...
+ EL1_OR_EL2_OR_EL3(x0)
+1:// If we are at EL1 or EL2 leave SEC for PEI.
+2:b ASM_PFX(JumpToPEI)
+ // If we are at EL3 we need to configure it and switch to EL2
+3:b ASM_PFX(MainEntryPoint)
+
+ASM_PFX(MainEntryPoint):
+ // First ensure all interrupts are disabled
+ bl ASM_PFX(ArmDisableInterrupts)
+
+ // Ensure that the MMU and caches are off
+ bl ASM_PFX(ArmDisableCachesAndMmu)
+
+ // By default, we are doing a cold boot
+ mov x10, #ARM_SEC_COLD_BOOT
+
+ // Jump to Platform Specific Boot Action function
+ bl ASM_PFX(ArmPlatformSecBootAction)
+
+_IdentifyCpu:
+ // Identify CPU ID
+ bl ASM_PFX(ArmReadMpidr)
+ // Keep a copy of the MpId register value
+ mov x5, x0
+
+ // Is it the Primary Core ?
+ bl ASM_PFX(ArmPlatformIsPrimaryCore)
+ cmp x0, #1
+ // Only the primary core initialize the memory (SMC)
+ b.eq _InitMem
+
+_WaitInitMem:
+ // If we are not doing a cold boot in this case we should assume the Initial Memory to be already initialized
+ // Otherwise we have to wait the Primary Core to finish the initialization
+ cmp x10, #ARM_SEC_COLD_BOOT
+ b.ne _SetupSecondaryCoreStack
+
+ // Wait for the primary core to initialize the initial memory (event: BOOT_MEM_INIT)
+ bl ASM_PFX(ArmCallWFE)
+ // Now the Init Mem is initialized, we setup the secondary core stacks
+ b _SetupSecondaryCoreStack
+
+_InitMem:
+ // If we are not doing a cold boot in this case we should assume the Initial Memory to be already initialized
+ cmp x10, #ARM_SEC_COLD_BOOT
+ b.ne _SetupPrimaryCoreStack
+
+ // Initialize Init Boot Memory
+ bl ASM_PFX(ArmPlatformSecBootMemoryInit)
+
+_SetupPrimaryCoreStack:
+ // Get the top of the primary stacks (and the base of the secondary stacks)
+ LoadConstantToReg (FixedPcdGet32(PcdCPUCoresSecStackBase), x1)
+ LoadConstantToReg (FixedPcdGet32(PcdCPUCoreSecPrimaryStackSize), x2)
+ add x1, x1, x2
+
+ LoadConstantToReg (FixedPcdGet32(PcdSecGlobalVariableSize), x2)
+
+ // The reserved space for global variable must be 8-bytes aligned for pushing
+ // 64-bit variable on the stack
+ SetPrimaryStack (x1, x2, x3, x4)
+ b _PrepareArguments
+
+_SetupSecondaryCoreStack:
+ // Get the top of the primary stacks (and the base of the secondary stacks)
+ LoadConstantToReg (FixedPcdGet32(PcdCPUCoresSecStackBase), x1)
+ LoadConstantToReg (FixedPcdGet32(PcdCPUCoreSecPrimaryStackSize), x2)
+ add x6, x1, x2
+
+ // Get the Core Position
+ mov x0, x5
+ bl ASM_PFX(ArmPlatformGetCorePosition)
+ // The stack starts at the top of the stack region. Add '1' to the Core Position to get the top of the stack
+ add x0, x0, #1
+
+ // StackOffset = CorePos * StackSize
+ LoadConstantToReg (FixedPcdGet32(PcdCPUCoreSecSecondaryStackSize), x2)
+ mul x0, x0, x2
+ // SP = StackBase + StackOffset
+ add sp, x6, x0
+
+_PrepareArguments:
+ // Move sec startup address into a data register
+ // Ensure we're jumping to FV version of the code (not boot remapped alias)
+ ldr x3, StartupAddr
+
+ // Jump to SEC C code
+ // r0 = mp_id
+ // r1 = Boot Mode
+ mov x0, x5
+ mov x1, x10
+ blr x3
+
+ ret
+
+ASM_PFX(JumpToPEI):
+ LoadConstantToReg (FixedPcdGet32(PcdFvBaseAddress), x0)
+ blr x0
+
+dead:
+ b dead
diff --git a/ArmPlatformPkg/Sec/Sec.inf b/ArmPlatformPkg/Sec/Sec.inf
index 8e64a73142..dc1c4f2bee 100644
--- a/ArmPlatformPkg/Sec/Sec.inf
+++ b/ArmPlatformPkg/Sec/Sec.inf
@@ -1,7 +1,7 @@
#/** @file
-# SEC - Reset vector code that jumps to C and loads DXE core
+# SEC - Reset vector code that jumps to C and starts the PEI phase
#
-# Copyright (c) 2011-2012, ARM Limited. All rights reserved.
+# Copyright (c) 2011-2013, ARM Limited. All rights reserved.
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -29,6 +29,10 @@
Arm/SecEntryPoint.S | GCC
Arm/SecEntryPoint.asm | RVCT
+[Sources.AARCH64]
+ AArch64/Helper.S | GCC
+ AArch64/SecEntryPoint.S | GCC
+
[Packages]
MdePkg/MdePkg.dec
MdeModulePkg/MdeModulePkg.dec