summaryrefslogtreecommitdiff
path: root/src/arch/arm/faults.cc
diff options
context:
space:
mode:
authorARM gem5 Developers <none@none>2014-01-24 15:29:34 -0600
committerARM gem5 Developers <none@none>2014-01-24 15:29:34 -0600
commit612f8f074fa1099cf70faf495d46cc647762a031 (patch)
treebd1e99c43bf15292395eadd4b7ae3f5c823545c3 /src/arch/arm/faults.cc
parentf3585c841e964c98911784a187fc4f081a02a0a6 (diff)
downloadgem5-612f8f074fa1099cf70faf495d46cc647762a031.tar.xz
arm: Add support for ARMv8 (AArch64 & AArch32)
Note: AArch64 and AArch32 interworking is not supported. If you use an AArch64 kernel you are restricted to AArch64 user-mode binaries. This will be addressed in a later patch. Note: Virtualization is only supported in AArch32 mode. This will also be fixed in a later patch. Contributors: Giacomo Gabrielli (TrustZone, LPAE, system-level AArch64, AArch64 NEON, validation) Thomas Grocutt (AArch32 Virtualization, AArch64 FP, validation) Mbou Eyole (AArch64 NEON, validation) Ali Saidi (AArch64 Linux support, code integration, validation) Edmund Grimley-Evans (AArch64 FP) William Wang (AArch64 Linux support) Rene De Jong (AArch64 Linux support, performance opt.) Matt Horsnell (AArch64 MP, validation) Matt Evans (device models, code integration, validation) Chris Adeniyi-Jones (AArch64 syscall-emulation) Prakash Ramrakhyani (validation) Dam Sunwoo (validation) Chander Sudanthi (validation) Stephan Diestelhorst (validation) Andreas Hansson (code integration, performance opt.) Eric Van Hensbergen (performance opt.) Gabe Black
Diffstat (limited to 'src/arch/arm/faults.cc')
-rw-r--r--src/arch/arm/faults.cc1286
1 files changed, 1228 insertions, 58 deletions
diff --git a/src/arch/arm/faults.cc b/src/arch/arm/faults.cc
index be1c7ecc2..f8313efd2 100644
--- a/src/arch/arm/faults.cc
+++ b/src/arch/arm/faults.cc
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010 ARM Limited
+ * Copyright (c) 2010, 2012-2013 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
@@ -40,9 +40,15 @@
*
* Authors: Ali Saidi
* Gabe Black
+ * Giacomo Gabrielli
+ * Thomas Grocutt
*/
#include "arch/arm/faults.hh"
+#include "arch/arm/system.hh"
+#include "arch/arm/utility.hh"
+#include "arch/arm/insts/static_inst.hh"
+#include "base/compiler.hh"
#include "base/trace.hh"
#include "cpu/base.hh"
#include "cpu/thread_context.hh"
@@ -52,61 +58,413 @@
namespace ArmISA
{
-template<> ArmFault::FaultVals ArmFaultVals<Reset>::vals =
-{"reset", 0x00, MODE_SVC, 0, 0, true, true, FaultStat()};
+uint8_t ArmFault::shortDescFaultSources[] = {
+ 0x01, // AlignmentFault
+ 0x04, // InstructionCacheMaintenance
+ 0xff, // SynchExtAbtOnTranslTableWalkL0 (INVALID)
+ 0x0c, // SynchExtAbtOnTranslTableWalkL1
+ 0x0e, // SynchExtAbtOnTranslTableWalkL2
+ 0xff, // SynchExtAbtOnTranslTableWalkL3 (INVALID)
+ 0xff, // SynchPtyErrOnTranslTableWalkL0 (INVALID)
+ 0x1c, // SynchPtyErrOnTranslTableWalkL1
+ 0x1e, // SynchPtyErrOnTranslTableWalkL2
+ 0xff, // SynchPtyErrOnTranslTableWalkL3 (INVALID)
+ 0xff, // TranslationL0 (INVALID)
+ 0x05, // TranslationL1
+ 0x07, // TranslationL2
+ 0xff, // TranslationL3 (INVALID)
+ 0xff, // AccessFlagL0 (INVALID)
+ 0x03, // AccessFlagL1
+ 0x06, // AccessFlagL2
+ 0xff, // AccessFlagL3 (INVALID)
+ 0xff, // DomainL0 (INVALID)
+ 0x09, // DomainL1
+ 0x0b, // DomainL2
+ 0xff, // DomainL3 (INVALID)
+ 0xff, // PermissionL0 (INVALID)
+ 0x0d, // PermissionL1
+ 0x0f, // PermissionL2
+ 0xff, // PermissionL3 (INVALID)
+ 0x02, // DebugEvent
+ 0x08, // SynchronousExternalAbort
+ 0x10, // TLBConflictAbort
+ 0x19, // SynchPtyErrOnMemoryAccess
+ 0x16, // AsynchronousExternalAbort
+ 0x18, // AsynchPtyErrOnMemoryAccess
+ 0xff, // AddressSizeL0 (INVALID)
+ 0xff, // AddressSizeL1 (INVALID)
+ 0xff, // AddressSizeL2 (INVALID)
+ 0xff, // AddressSizeL3 (INVALID)
+ 0x40, // PrefetchTLBMiss
+ 0x80 // PrefetchUncacheable
+};
-template<> ArmFault::FaultVals ArmFaultVals<UndefinedInstruction>::vals =
-{"Undefined Instruction", 0x04, MODE_UNDEFINED, 4 ,2, false, false,
- FaultStat()} ;
+static_assert(sizeof(ArmFault::shortDescFaultSources) ==
+ ArmFault::NumFaultSources,
+ "Invalid size of ArmFault::shortDescFaultSources[]");
-template<> ArmFault::FaultVals ArmFaultVals<SupervisorCall>::vals =
-{"Supervisor Call", 0x08, MODE_SVC, 4, 2, false, false, FaultStat()};
+uint8_t ArmFault::longDescFaultSources[] = {
+ 0x21, // AlignmentFault
+ 0xff, // InstructionCacheMaintenance (INVALID)
+ 0xff, // SynchExtAbtOnTranslTableWalkL0 (INVALID)
+ 0x15, // SynchExtAbtOnTranslTableWalkL1
+ 0x16, // SynchExtAbtOnTranslTableWalkL2
+ 0x17, // SynchExtAbtOnTranslTableWalkL3
+ 0xff, // SynchPtyErrOnTranslTableWalkL0 (INVALID)
+ 0x1d, // SynchPtyErrOnTranslTableWalkL1
+ 0x1e, // SynchPtyErrOnTranslTableWalkL2
+ 0x1f, // SynchPtyErrOnTranslTableWalkL3
+ 0xff, // TranslationL0 (INVALID)
+ 0x05, // TranslationL1
+ 0x06, // TranslationL2
+ 0x07, // TranslationL3
+ 0xff, // AccessFlagL0 (INVALID)
+ 0x09, // AccessFlagL1
+ 0x0a, // AccessFlagL2
+ 0x0b, // AccessFlagL3
+ 0xff, // DomainL0 (INVALID)
+ 0x3d, // DomainL1
+ 0x3e, // DomainL2
+ 0xff, // DomainL3 (RESERVED)
+ 0xff, // PermissionL0 (INVALID)
+ 0x0d, // PermissionL1
+ 0x0e, // PermissionL2
+ 0x0f, // PermissionL3
+ 0x22, // DebugEvent
+ 0x10, // SynchronousExternalAbort
+ 0x30, // TLBConflictAbort
+ 0x18, // SynchPtyErrOnMemoryAccess
+ 0x11, // AsynchronousExternalAbort
+ 0x19, // AsynchPtyErrOnMemoryAccess
+ 0xff, // AddressSizeL0 (INVALID)
+ 0xff, // AddressSizeL1 (INVALID)
+ 0xff, // AddressSizeL2 (INVALID)
+ 0xff, // AddressSizeL3 (INVALID)
+ 0x40, // PrefetchTLBMiss
+ 0x80 // PrefetchUncacheable
+};
-template<> ArmFault::FaultVals ArmFaultVals<PrefetchAbort>::vals =
-{"Prefetch Abort", 0x0C, MODE_ABORT, 4, 4, true, false, FaultStat()};
+static_assert(sizeof(ArmFault::longDescFaultSources) ==
+ ArmFault::NumFaultSources,
+ "Invalid size of ArmFault::longDescFaultSources[]");
-template<> ArmFault::FaultVals ArmFaultVals<DataAbort>::vals =
-{"Data Abort", 0x10, MODE_ABORT, 8, 8, true, false, FaultStat()};
+uint8_t ArmFault::aarch64FaultSources[] = {
+ 0x21, // AlignmentFault
+ 0xff, // InstructionCacheMaintenance (INVALID)
+ 0x14, // SynchExtAbtOnTranslTableWalkL0
+ 0x15, // SynchExtAbtOnTranslTableWalkL1
+ 0x16, // SynchExtAbtOnTranslTableWalkL2
+ 0x17, // SynchExtAbtOnTranslTableWalkL3
+ 0x1c, // SynchPtyErrOnTranslTableWalkL0
+ 0x1d, // SynchPtyErrOnTranslTableWalkL1
+ 0x1e, // SynchPtyErrOnTranslTableWalkL2
+ 0x1f, // SynchPtyErrOnTranslTableWalkL3
+ 0x04, // TranslationL0
+ 0x05, // TranslationL1
+ 0x06, // TranslationL2
+ 0x07, // TranslationL3
+ 0x08, // AccessFlagL0
+ 0x09, // AccessFlagL1
+ 0x0a, // AccessFlagL2
+ 0x0b, // AccessFlagL3
+ // @todo: Section & Page Domain Fault in AArch64?
+ 0xff, // DomainL0 (INVALID)
+ 0xff, // DomainL1 (INVALID)
+ 0xff, // DomainL2 (INVALID)
+ 0xff, // DomainL3 (INVALID)
+ 0x0c, // PermissionL0
+ 0x0d, // PermissionL1
+ 0x0e, // PermissionL2
+ 0x0f, // PermissionL3
+ 0xff, // DebugEvent (INVALID)
+ 0x10, // SynchronousExternalAbort
+ 0x30, // TLBConflictAbort
+ 0x18, // SynchPtyErrOnMemoryAccess
+ 0xff, // AsynchronousExternalAbort (INVALID)
+ 0xff, // AsynchPtyErrOnMemoryAccess (INVALID)
+ 0x00, // AddressSizeL0
+ 0x01, // AddressSizeL1
+ 0x02, // AddressSizeL2
+ 0x03, // AddressSizeL3
+ 0x40, // PrefetchTLBMiss
+ 0x80 // PrefetchUncacheable
+};
-template<> ArmFault::FaultVals ArmFaultVals<Interrupt>::vals =
-{"IRQ", 0x18, MODE_IRQ, 4, 4, true, false, FaultStat()};
+static_assert(sizeof(ArmFault::aarch64FaultSources) ==
+ ArmFault::NumFaultSources,
+ "Invalid size of ArmFault::aarch64FaultSources[]");
-template<> ArmFault::FaultVals ArmFaultVals<FastInterrupt>::vals =
-{"FIQ", 0x1C, MODE_FIQ, 4, 4, true, true, FaultStat()};
+// Fields: name, offset, cur{ELT,ELH}Offset, lowerEL{64,32}Offset, next mode,
+// {ARM, Thumb, ARM_ELR, Thumb_ELR} PC offset, hyp trap,
+// {A, F} disable, class, stat
+template<> ArmFault::FaultVals ArmFaultVals<Reset>::vals = {
+ // Some dummy values (the reset vector has an IMPLEMENTATION DEFINED
+ // location in AArch64)
+ "Reset", 0x000, 0x000, 0x000, 0x000, 0x000, MODE_SVC,
+ 0, 0, 0, 0, false, true, true, EC_UNKNOWN, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<UndefinedInstruction>::vals = {
+ "Undefined Instruction", 0x004, 0x000, 0x200, 0x400, 0x600, MODE_UNDEFINED,
+ 4, 2, 0, 0, true, false, false, EC_UNKNOWN, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<SupervisorCall>::vals = {
+ "Supervisor Call", 0x008, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
+ 4, 2, 4, 2, true, false, false, EC_SVC_TO_HYP, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<SecureMonitorCall>::vals = {
+ "Secure Monitor Call", 0x008, 0x000, 0x200, 0x400, 0x600, MODE_MON,
+ 4, 4, 4, 4, false, true, true, EC_SMC_TO_HYP, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<HypervisorCall>::vals = {
+ "Hypervisor Call", 0x008, 0x000, 0x200, 0x400, 0x600, MODE_HYP,
+ 4, 4, 4, 4, true, false, false, EC_HVC, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<PrefetchAbort>::vals = {
+ "Prefetch Abort", 0x00C, 0x000, 0x200, 0x400, 0x600, MODE_ABORT,
+ 4, 4, 0, 0, true, true, false, EC_PREFETCH_ABORT_TO_HYP, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<DataAbort>::vals = {
+ "Data Abort", 0x010, 0x000, 0x200, 0x400, 0x600, MODE_ABORT,
+ 8, 8, 0, 0, true, true, false, EC_DATA_ABORT_TO_HYP, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<VirtualDataAbort>::vals = {
+ "Virtual Data Abort", 0x010, 0x000, 0x200, 0x400, 0x600, MODE_ABORT,
+ 8, 8, 0, 0, true, true, false, EC_INVALID, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<HypervisorTrap>::vals = {
+ // @todo: double check these values
+ "Hypervisor Trap", 0x014, 0x000, 0x200, 0x400, 0x600, MODE_HYP,
+ 0, 0, 0, 0, false, false, false, EC_UNKNOWN, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<Interrupt>::vals = {
+ "IRQ", 0x018, 0x080, 0x280, 0x480, 0x680, MODE_IRQ,
+ 4, 4, 0, 0, false, true, false, EC_UNKNOWN, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<VirtualInterrupt>::vals = {
+ "Virtual IRQ", 0x018, 0x080, 0x280, 0x480, 0x680, MODE_IRQ,
+ 4, 4, 0, 0, false, true, false, EC_INVALID, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<FastInterrupt>::vals = {
+ "FIQ", 0x01C, 0x100, 0x300, 0x500, 0x700, MODE_FIQ,
+ 4, 4, 0, 0, false, true, true, EC_UNKNOWN, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<VirtualFastInterrupt>::vals = {
+ "Virtual FIQ", 0x01C, 0x100, 0x300, 0x500, 0x700, MODE_FIQ,
+ 4, 4, 0, 0, false, true, true, EC_INVALID, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<SupervisorTrap>::vals = {
+ // Some dummy values (SupervisorTrap is AArch64-only)
+ "Supervisor Trap", 0x014, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
+ 0, 0, 0, 0, false, false, false, EC_UNKNOWN, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<SecureMonitorTrap>::vals = {
+ // Some dummy values (SecureMonitorTrap is AArch64-only)
+ "Secure Monitor Trap", 0x014, 0x000, 0x200, 0x400, 0x600, MODE_MON,
+ 0, 0, 0, 0, false, false, false, EC_UNKNOWN, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<PCAlignmentFault>::vals = {
+ // Some dummy values (PCAlignmentFault is AArch64-only)
+ "PC Alignment Fault", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
+ 0, 0, 0, 0, true, false, false, EC_PC_ALIGNMENT, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<SPAlignmentFault>::vals = {
+ // Some dummy values (SPAlignmentFault is AArch64-only)
+ "SP Alignment Fault", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
+ 0, 0, 0, 0, true, false, false, EC_STACK_PTR_ALIGNMENT, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<SystemError>::vals = {
+ // Some dummy values (SError is AArch64-only)
+ "SError", 0x000, 0x180, 0x380, 0x580, 0x780, MODE_SVC,
+ 0, 0, 0, 0, false, true, true, EC_SERROR, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<FlushPipe>::vals = {
+ // Some dummy values
+ "Pipe Flush", 0x000, 0x000, 0x000, 0x000, 0x000, MODE_SVC,
+ 0, 0, 0, 0, false, true, true, EC_UNKNOWN, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<ArmSev>::vals = {
+ // Some dummy values
+ "ArmSev Flush", 0x000, 0x000, 0x000, 0x000, 0x000, MODE_SVC,
+ 0, 0, 0, 0, false, true, true, EC_UNKNOWN, FaultStat()
+};
+template<> ArmFault::FaultVals ArmFaultVals<IllegalInstSetStateFault>::vals = {
+ // Some dummy values (SPAlignmentFault is AArch64-only)
+ "Illegal Inst Set State Fault", 0x000, 0x000, 0x200, 0x400, 0x600, MODE_SVC,
+ 0, 0, 0, 0, true, false, false, EC_ILLEGAL_INST, FaultStat()
+};
-template<> ArmFault::FaultVals ArmFaultVals<FlushPipe>::vals =
-{"Pipe Flush", 0x00, MODE_SVC, 0, 0, true, true, FaultStat()}; // dummy values
-
-template<> ArmFault::FaultVals ArmFaultVals<ArmSev>::vals =
-{"ArmSev Flush", 0x00, MODE_SVC, 0, 0, true, true, FaultStat()}; // dummy values
-Addr
+Addr
ArmFault::getVector(ThreadContext *tc)
{
- // ARM ARM B1-3
+ Addr base;
- SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR);
+ // ARM ARM issue C B1.8.1
+ bool haveSecurity = ArmSystem::haveSecurity(tc);
// panic if SCTLR.VE because I have no idea what to do with vectored
// interrupts
+ SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR);
assert(!sctlr.ve);
+ // Check for invalid modes
+ CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
+ assert(haveSecurity || cpsr.mode != MODE_MON);
+ assert(ArmSystem::haveVirtualization(tc) || cpsr.mode != MODE_HYP);
- if (!sctlr.v)
- return offset();
- return offset() + HighVecs;
+ switch (cpsr.mode)
+ {
+ case MODE_MON:
+ base = tc->readMiscReg(MISCREG_MVBAR);
+ break;
+ case MODE_HYP:
+ base = tc->readMiscReg(MISCREG_HVBAR);
+ break;
+ default:
+ if (sctlr.v) {
+ base = HighVecs;
+ } else {
+ base = haveSecurity ? tc->readMiscReg(MISCREG_VBAR) : 0;
+ }
+ break;
+ }
+ return base + offset(tc);
+}
+Addr
+ArmFault::getVector64(ThreadContext *tc)
+{
+ Addr vbar;
+ switch (toEL) {
+ case EL3:
+ assert(ArmSystem::haveSecurity(tc));
+ vbar = tc->readMiscReg(MISCREG_VBAR_EL3);
+ break;
+ // @todo: uncomment this to enable Virtualization
+ // case EL2:
+ // assert(ArmSystem::haveVirtualization(tc));
+ // vbar = tc->readMiscReg(MISCREG_VBAR_EL2);
+ // break;
+ case EL1:
+ vbar = tc->readMiscReg(MISCREG_VBAR_EL1);
+ break;
+ default:
+ panic("Invalid target exception level");
+ break;
+ }
+ return vbar + offset64();
}
-void
+MiscRegIndex
+ArmFault::getSyndromeReg64() const
+{
+ switch (toEL) {
+ case EL1:
+ return MISCREG_ESR_EL1;
+ case EL2:
+ return MISCREG_ESR_EL2;
+ case EL3:
+ return MISCREG_ESR_EL3;
+ default:
+ panic("Invalid exception level");
+ break;
+ }
+}
+
+MiscRegIndex
+ArmFault::getFaultAddrReg64() const
+{
+ switch (toEL) {
+ case EL1:
+ return MISCREG_FAR_EL1;
+ case EL2:
+ return MISCREG_FAR_EL2;
+ case EL3:
+ return MISCREG_FAR_EL3;
+ default:
+ panic("Invalid exception level");
+ break;
+ }
+}
+
+void
+ArmFault::setSyndrome(ThreadContext *tc, MiscRegIndex syndrome_reg)
+{
+ uint32_t value;
+ uint32_t exc_class = (uint32_t) ec(tc);
+ uint32_t issVal = iss();
+ assert(!from64 || ArmSystem::highestELIs64(tc));
+
+ value = exc_class << 26;
+
+ // HSR.IL not valid for Prefetch Aborts (0x20, 0x21) and Data Aborts (0x24,
+ // 0x25) for which the ISS information is not valid (ARMv7).
+ // @todo: ARMv8 revises AArch32 functionality: when HSR.IL is not
+ // valid it is treated as RES1.
+ if (to64) {
+ value |= 1 << 25;
+ } else if ((bits(exc_class, 5, 3) != 4) ||
+ (bits(exc_class, 2) && bits(issVal, 24))) {
+ if (!machInst.thumb || machInst.bigThumb)
+ value |= 1 << 25;
+ }
+ // Condition code valid for EC[5:4] nonzero
+ if (!from64 && ((bits(exc_class, 5, 4) == 0) &&
+ (bits(exc_class, 3, 0) != 0))) {
+ if (!machInst.thumb) {
+ uint32_t cond;
+ ConditionCode condCode = (ConditionCode) (uint32_t) machInst.condCode;
+ // If its on unconditional instruction report with a cond code of
+ // 0xE, ie the unconditional code
+ cond = (condCode == COND_UC) ? COND_AL : condCode;
+ value |= cond << 20;
+ value |= 1 << 24;
+ }
+ value |= bits(issVal, 19, 0);
+ } else {
+ value |= issVal;
+ }
+ tc->setMiscReg(syndrome_reg, value);
+}
+
+void
ArmFault::invoke(ThreadContext *tc, StaticInstPtr inst)
{
- // ARM ARM B1.6.3
+ CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+
+ if (ArmSystem::highestELIs64(tc)) { // ARMv8
+ // Determine source exception level and mode
+ fromMode = (OperatingMode) (uint8_t) cpsr.mode;
+ fromEL = opModeToEL(fromMode);
+ if (opModeIs64(fromMode))
+ from64 = true;
+
+ // Determine target exception level
+ if (ArmSystem::haveSecurity(tc) && routeToMonitor(tc))
+ toEL = EL3;
+ else
+ toEL = opModeToEL(nextMode());
+ if (fromEL > toEL)
+ toEL = fromEL;
+
+ if (toEL == ArmSystem::highestEL(tc) || ELIs64(tc, toEL)) {
+ // Invoke exception handler in AArch64 state
+ to64 = true;
+ invoke64(tc, inst);
+ return;
+ }
+ }
+
+ // ARMv7 (ARM ARM issue C B1.9)
+
+ bool have_security = ArmSystem::haveSecurity(tc);
+ bool have_virtualization = ArmSystem::haveVirtualization(tc);
+
FaultBase::invoke(tc);
if (!FullSystem)
return;
countStat()++;
SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR);
- CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+ SCR scr = tc->readMiscReg(MISCREG_SCR);
CPSR saved_cpsr = tc->readMiscReg(MISCREG_CPSR);
saved_cpsr.nz = tc->readIntReg(INTREG_CONDCODES_NZ);
saved_cpsr.c = tc->readIntReg(INTREG_CONDCODES_C);
@@ -118,22 +476,73 @@ ArmFault::invoke(ThreadContext *tc, StaticInstPtr inst)
saved_cpsr.it2 = it.top6;
saved_cpsr.it1 = it.bottom2;
- cpsr.mode = nextMode();
+ // if we have a valid instruction then use it to annotate this fault with
+ // extra information. This is used to generate the correct fault syndrome
+ // information
+ if (inst) {
+ ArmStaticInst *armInst = reinterpret_cast<ArmStaticInst *>(inst.get());
+ armInst->annotateFault(this);
+ }
+
+ if (have_security && routeToMonitor(tc))
+ cpsr.mode = MODE_MON;
+ else if (have_virtualization && routeToHyp(tc))
+ cpsr.mode = MODE_HYP;
+ else
+ cpsr.mode = nextMode();
+
+ // Ensure Secure state if initially in Monitor mode
+ if (have_security && saved_cpsr.mode == MODE_MON) {
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ if (scr.ns) {
+ scr.ns = 0;
+ tc->setMiscRegNoEffect(MISCREG_SCR, scr);
+ }
+ }
+
+ // some bits are set differently if we have been routed to hyp mode
+ if (cpsr.mode == MODE_HYP) {
+ SCTLR hsctlr = tc->readMiscReg(MISCREG_HSCTLR);
+ cpsr.t = hsctlr.te;
+ cpsr.e = hsctlr.ee;
+ if (!scr.ea) {cpsr.a = 1;}
+ if (!scr.fiq) {cpsr.f = 1;}
+ if (!scr.irq) {cpsr.i = 1;}
+ } else if (cpsr.mode == MODE_MON) {
+ // Special case handling when entering monitor mode
+ cpsr.t = sctlr.te;
+ cpsr.e = sctlr.ee;
+ cpsr.a = 1;
+ cpsr.f = 1;
+ cpsr.i = 1;
+ } else {
+ cpsr.t = sctlr.te;
+ cpsr.e = sctlr.ee;
+
+ // The *Disable functions are virtual and different per fault
+ cpsr.a = cpsr.a | abortDisable(tc);
+ cpsr.f = cpsr.f | fiqDisable(tc);
+ cpsr.i = 1;
+ }
cpsr.it1 = cpsr.it2 = 0;
cpsr.j = 0;
-
- cpsr.t = sctlr.te;
- cpsr.a = cpsr.a | abortDisable();
- cpsr.f = cpsr.f | fiqDisable();
- cpsr.i = 1;
- cpsr.e = sctlr.ee;
tc->setMiscReg(MISCREG_CPSR, cpsr);
+
// Make sure mailbox sets to one always
tc->setMiscReg(MISCREG_SEV_MAILBOX, 1);
- tc->setIntReg(INTREG_LR, curPc +
- (saved_cpsr.t ? thumbPcOffset() : armPcOffset()));
- switch (nextMode()) {
+ // Clear the exclusive monitor
+ tc->setMiscReg(MISCREG_LOCKFLAG, 0);
+
+ if (cpsr.mode == MODE_HYP) {
+ tc->setMiscReg(MISCREG_ELR_HYP, curPc +
+ (saved_cpsr.t ? thumbPcOffset(true) : armPcOffset(true)));
+ } else {
+ tc->setIntReg(INTREG_LR, curPc +
+ (saved_cpsr.t ? thumbPcOffset(false) : armPcOffset(false)));
+ }
+
+ switch (cpsr.mode) {
case MODE_FIQ:
tc->setMiscReg(MISCREG_SPSR_FIQ, saved_cpsr);
break;
@@ -143,12 +552,23 @@ ArmFault::invoke(ThreadContext *tc, StaticInstPtr inst)
case MODE_SVC:
tc->setMiscReg(MISCREG_SPSR_SVC, saved_cpsr);
break;
- case MODE_UNDEFINED:
- tc->setMiscReg(MISCREG_SPSR_UND, saved_cpsr);
+ case MODE_MON:
+ assert(have_security);
+ tc->setMiscReg(MISCREG_SPSR_MON, saved_cpsr);
break;
case MODE_ABORT:
tc->setMiscReg(MISCREG_SPSR_ABT, saved_cpsr);
break;
+ case MODE_UNDEFINED:
+ tc->setMiscReg(MISCREG_SPSR_UND, saved_cpsr);
+ if (ec(tc) != EC_UNKNOWN)
+ setSyndrome(tc, MISCREG_HSR);
+ break;
+ case MODE_HYP:
+ assert(have_virtualization);
+ tc->setMiscReg(MISCREG_SPSR_HYP, saved_cpsr);
+ setSyndrome(tc, MISCREG_HSR);
+ break;
default:
panic("unknown Mode\n");
}
@@ -161,7 +581,100 @@ ArmFault::invoke(ThreadContext *tc, StaticInstPtr inst)
pc.nextThumb(pc.thumb());
pc.jazelle(cpsr.j);
pc.nextJazelle(pc.jazelle());
+ pc.aarch64(!cpsr.width);
+ pc.nextAArch64(!cpsr.width);
+ tc->pcState(pc);
+}
+
+void
+ArmFault::invoke64(ThreadContext *tc, StaticInstPtr inst)
+{
+ // Determine actual misc. register indices for ELR_ELx and SPSR_ELx
+ MiscRegIndex elr_idx, spsr_idx;
+ switch (toEL) {
+ case EL1:
+ elr_idx = MISCREG_ELR_EL1;
+ spsr_idx = MISCREG_SPSR_EL1;
+ break;
+ // @todo: uncomment this to enable Virtualization
+ // case EL2:
+ // assert(ArmSystem::haveVirtualization());
+ // elr_idx = MISCREG_ELR_EL2;
+ // spsr_idx = MISCREG_SPSR_EL2;
+ // break;
+ case EL3:
+ assert(ArmSystem::haveSecurity(tc));
+ elr_idx = MISCREG_ELR_EL3;
+ spsr_idx = MISCREG_SPSR_EL3;
+ break;
+ default:
+ panic("Invalid target exception level");
+ break;
+ }
+
+ // Save process state into SPSR_ELx
+ CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+ CPSR spsr = cpsr;
+ spsr.nz = tc->readIntReg(INTREG_CONDCODES_NZ);
+ spsr.c = tc->readIntReg(INTREG_CONDCODES_C);
+ spsr.v = tc->readIntReg(INTREG_CONDCODES_V);
+ if (from64) {
+ // Force some bitfields to 0
+ spsr.q = 0;
+ spsr.it1 = 0;
+ spsr.j = 0;
+ spsr.res0_23_22 = 0;
+ spsr.ge = 0;
+ spsr.it2 = 0;
+ spsr.t = 0;
+ } else {
+ spsr.ge = tc->readIntReg(INTREG_CONDCODES_GE);
+ ITSTATE it = tc->pcState().itstate();
+ spsr.it2 = it.top6;
+ spsr.it1 = it.bottom2;
+ // Force some bitfields to 0
+ spsr.res0_23_22 = 0;
+ spsr.ss = 0;
+ }
+ tc->setMiscReg(spsr_idx, spsr);
+
+ // Save preferred return address into ELR_ELx
+ Addr curr_pc = tc->pcState().pc();
+ Addr ret_addr = curr_pc;
+ if (from64)
+ ret_addr += armPcElrOffset();
+ else
+ ret_addr += spsr.t ? thumbPcElrOffset() : armPcElrOffset();
+ tc->setMiscReg(elr_idx, ret_addr);
+
+ // Update process state
+ OperatingMode64 mode = 0;
+ mode.spX = 1;
+ mode.el = toEL;
+ mode.width = 0;
+ cpsr.mode = mode;
+ cpsr.daif = 0xf;
+ cpsr.il = 0;
+ cpsr.ss = 0;
+ tc->setMiscReg(MISCREG_CPSR, cpsr);
+
+ // Set PC to start of exception handler
+ Addr new_pc = purifyTaggedAddr(getVector64(tc), tc, toEL);
+ DPRINTF(Faults, "Invoking Fault (AArch64 target EL):%s cpsr:%#x PC:%#x "
+ "elr:%#x newVec: %#x\n", name(), cpsr, curr_pc, ret_addr, new_pc);
+ PCState pc(new_pc);
+ pc.aarch64(!cpsr.width);
+ pc.nextAArch64(!cpsr.width);
tc->pcState(pc);
+
+ // If we have a valid instruction then use it to annotate this fault with
+ // extra information. This is used to generate the correct fault syndrome
+ // information
+ if (inst)
+ reinterpret_cast<ArmStaticInst *>(inst.get())->annotateFault(this);
+ // Save exception syndrome
+ if ((nextMode() != MODE_IRQ) && (nextMode() != MODE_FIQ))
+ setSyndrome(tc, getSyndromeReg64());
}
void
@@ -171,7 +684,25 @@ Reset::invoke(ThreadContext *tc, StaticInstPtr inst)
tc->getCpuPtr()->clearInterrupts();
tc->clearArchRegs();
}
- ArmFault::invoke(tc, inst);
+ if (!ArmSystem::highestELIs64(tc)) {
+ ArmFault::invoke(tc, inst);
+ tc->setMiscReg(MISCREG_VMPIDR,
+ getMPIDR(dynamic_cast<ArmSystem*>(tc->getSystemPtr()), tc));
+
+ // Unless we have SMC code to get us there, boot in HYP!
+ if (ArmSystem::haveVirtualization(tc) &&
+ !ArmSystem::haveSecurity(tc)) {
+ CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+ cpsr.mode = MODE_HYP;
+ tc->setMiscReg(MISCREG_CPSR, cpsr);
+ }
+ } else {
+ // Advance the PC to the IMPLEMENTATION DEFINED reset value
+ PCState pc = ArmSystem::resetAddr64(tc);
+ pc.aarch64(true);
+ pc.nextAArch64(true);
+ tc->pcState(pc);
+ }
}
void
@@ -196,6 +727,45 @@ UndefinedInstruction::invoke(ThreadContext *tc, StaticInstPtr inst)
}
}
+bool
+UndefinedInstruction::routeToHyp(ThreadContext *tc) const
+{
+ bool toHyp;
+
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
+ CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
+
+ // if in Hyp mode then stay in Hyp mode
+ toHyp = scr.ns && (cpsr.mode == MODE_HYP);
+ // if HCR.TGE is set to 1, take to Hyp mode through Hyp Trap vector
+ toHyp |= !inSecureState(scr, cpsr) && hcr.tge && (cpsr.mode == MODE_USER);
+ return toHyp;
+}
+
+uint32_t
+UndefinedInstruction::iss() const
+{
+ if (overrideEc == EC_INVALID)
+ return issRaw;
+
+ uint32_t new_iss = 0;
+ uint32_t op0, op1, op2, CRn, CRm, Rt, dir;
+
+ dir = bits(machInst, 21, 21);
+ op0 = bits(machInst, 20, 19);
+ op1 = bits(machInst, 18, 16);
+ CRn = bits(machInst, 15, 12);
+ CRm = bits(machInst, 11, 8);
+ op2 = bits(machInst, 7, 5);
+ Rt = bits(machInst, 4, 0);
+
+ new_iss = op0 << 20 | op2 << 17 | op1 << 14 | CRn << 10 |
+ Rt << 5 | CRm << 1 | dir;
+
+ return new_iss;
+}
+
void
SupervisorCall::invoke(ThreadContext *tc, StaticInstPtr inst)
{
@@ -207,7 +777,12 @@ SupervisorCall::invoke(ThreadContext *tc, StaticInstPtr inst)
// As of now, there isn't a 32 bit thumb version of this instruction.
assert(!machInst.bigThumb);
uint32_t callNum;
- callNum = tc->readIntReg(INTREG_R7);
+ CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+ OperatingMode mode = (OperatingMode)(uint8_t)cpsr.mode;
+ if (opModeIs64(mode))
+ callNum = tc->readIntReg(INTREG_X8);
+ else
+ callNum = tc->readIntReg(INTREG_R7);
tc->syscall(callNum);
// Advance the PC since that won't happen automatically.
@@ -217,21 +792,593 @@ SupervisorCall::invoke(ThreadContext *tc, StaticInstPtr inst)
tc->pcState(pc);
}
+bool
+SupervisorCall::routeToHyp(ThreadContext *tc) const
+{
+ bool toHyp;
+
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
+ CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
+
+ // if in Hyp mode then stay in Hyp mode
+ toHyp = scr.ns && (cpsr.mode == MODE_HYP);
+ // if HCR.TGE is set to 1, take to Hyp mode through Hyp Trap vector
+ toHyp |= !inSecureState(scr, cpsr) && hcr.tge && (cpsr.mode == MODE_USER);
+ return toHyp;
+}
+
+ExceptionClass
+SupervisorCall::ec(ThreadContext *tc) const
+{
+ return (overrideEc != EC_INVALID) ? overrideEc :
+ (from64 ? EC_SVC_64 : vals.ec);
+}
+
+uint32_t
+SupervisorCall::iss() const
+{
+ // Even if we have a 24 bit imm from an arm32 instruction then we only use
+ // the bottom 16 bits for the ISS value (it doesn't hurt for AArch64 SVC).
+ return issRaw & 0xFFFF;
+}
+
+uint32_t
+SecureMonitorCall::iss() const
+{
+ if (from64)
+ return bits(machInst, 20, 5);
+ return 0;
+}
+
+ExceptionClass
+UndefinedInstruction::ec(ThreadContext *tc) const
+{
+ return (overrideEc != EC_INVALID) ? overrideEc : vals.ec;
+}
+
+
+HypervisorCall::HypervisorCall(ExtMachInst _machInst, uint32_t _imm) :
+ ArmFaultVals<HypervisorCall>(_machInst, _imm)
+{}
+
+ExceptionClass
+HypervisorTrap::ec(ThreadContext *tc) const
+{
+ return (overrideEc != EC_INVALID) ? overrideEc : vals.ec;
+}
+
+template<class T>
+FaultOffset
+ArmFaultVals<T>::offset(ThreadContext *tc)
+{
+ bool isHypTrap = false;
+
+ // Normally we just use the exception vector from the table at the top if
+ // this file, however if this exception has caused a transition to hype
+ // mode, and its an exception type that would only do this if it has been
+ // trapped then we use the hyp trap vector instead of the normal vector
+ if (vals.hypTrappable) {
+ CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+ if (cpsr.mode == MODE_HYP) {
+ CPSR spsr = tc->readMiscReg(MISCREG_SPSR_HYP);
+ isHypTrap = spsr.mode != MODE_HYP;
+ }
+ }
+ return isHypTrap ? 0x14 : vals.offset;
+}
+
+// void
+// SupervisorCall::setSyndrome64(ThreadContext *tc, MiscRegIndex esr_idx)
+// {
+// ESR esr = 0;
+// esr.ec = machInst.aarch64 ? SvcAArch64 : SvcAArch32;
+// esr.il = !machInst.thumb;
+// if (machInst.aarch64)
+// esr.imm16 = bits(machInst.instBits, 20, 5);
+// else if (machInst.thumb)
+// esr.imm16 = bits(machInst.instBits, 7, 0);
+// else
+// esr.imm16 = bits(machInst.instBits, 15, 0);
+// tc->setMiscReg(esr_idx, esr);
+// }
+
+void
+SecureMonitorCall::invoke(ThreadContext *tc, StaticInstPtr inst)
+{
+ if (FullSystem) {
+ ArmFault::invoke(tc, inst);
+ return;
+ }
+}
+
+ExceptionClass
+SecureMonitorCall::ec(ThreadContext *tc) const
+{
+ return (from64 ? EC_SMC_64 : vals.ec);
+}
+
+ExceptionClass
+SupervisorTrap::ec(ThreadContext *tc) const
+{
+ return (overrideEc != EC_INVALID) ? overrideEc : vals.ec;
+}
+
+ExceptionClass
+SecureMonitorTrap::ec(ThreadContext *tc) const
+{
+ return (overrideEc != EC_INVALID) ? overrideEc :
+ (from64 ? EC_SMC_64 : vals.ec);
+}
+
template<class T>
void
AbortFault<T>::invoke(ThreadContext *tc, StaticInstPtr inst)
{
+ if (tranMethod == ArmFault::UnknownTran) {
+ tranMethod = longDescFormatInUse(tc) ? ArmFault::LpaeTran
+ : ArmFault::VmsaTran;
+
+ if ((tranMethod == ArmFault::VmsaTran) && this->routeToMonitor(tc)) {
+ // See ARM ARM B3-1416
+ bool override_LPAE = false;
+ TTBCR ttbcr_s = tc->readMiscReg(MISCREG_TTBCR_S);
+ TTBCR M5_VAR_USED ttbcr_ns = tc->readMiscReg(MISCREG_TTBCR_NS);
+ if (ttbcr_s.eae) {
+ override_LPAE = true;
+ } else {
+ // Unimplemented code option, not seen in testing. May need
+ // extension according to the manual exceprt above.
+ DPRINTF(Faults, "Warning: Incomplete translation method "
+ "override detected.\n");
+ }
+ if (override_LPAE)
+ tranMethod = ArmFault::LpaeTran;
+ }
+ }
+
+ if (source == ArmFault::AsynchronousExternalAbort) {
+ tc->getCpuPtr()->clearInterrupt(INT_ABT, 0);
+ }
+ // Get effective fault source encoding
+ CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+ FSR fsr = getFsr(tc);
+
+ // source must be determined BEFORE invoking generic routines which will
+ // try to set hsr etc. and are based upon source!
ArmFaultVals<T>::invoke(tc, inst);
+
+ if (cpsr.width) { // AArch32
+ if (cpsr.mode == MODE_HYP) {
+ tc->setMiscReg(T::HFarIndex, faultAddr);
+ } else if (stage2) {
+ tc->setMiscReg(MISCREG_HPFAR, (faultAddr >> 8) & ~0xf);
+ tc->setMiscReg(T::HFarIndex, OVAddr);
+ } else {
+ tc->setMiscReg(T::FsrIndex, fsr);
+ tc->setMiscReg(T::FarIndex, faultAddr);
+ }
+ DPRINTF(Faults, "Abort Fault source=%#x fsr=%#x faultAddr=%#x "\
+ "tranMethod=%#x\n", source, fsr, faultAddr, tranMethod);
+ } else { // AArch64
+ // Set the FAR register. Nothing else to do if we are in AArch64 state
+ // because the syndrome register has already been set inside invoke64()
+ tc->setMiscReg(AbortFault<T>::getFaultAddrReg64(), faultAddr);
+ }
+}
+
+template<class T>
+FSR
+AbortFault<T>::getFsr(ThreadContext *tc)
+{
FSR fsr = 0;
- fsr.fsLow = bits(status, 3, 0);
- fsr.fsHigh = bits(status, 4);
- fsr.domain = domain;
- fsr.wnr = (write ? 1 : 0);
- fsr.ext = 0;
- tc->setMiscReg(T::FsrIndex, fsr);
- tc->setMiscReg(T::FarIndex, faultAddr);
- DPRINTF(Faults, "Abort Fault fsr=%#x faultAddr=%#x\n", fsr, faultAddr);
+ if (((CPSR) tc->readMiscRegNoEffect(MISCREG_CPSR)).width) {
+ // AArch32
+ assert(tranMethod != ArmFault::UnknownTran);
+ if (tranMethod == ArmFault::LpaeTran) {
+ srcEncoded = ArmFault::longDescFaultSources[source];
+ fsr.status = srcEncoded;
+ fsr.lpae = 1;
+ } else {
+ srcEncoded = ArmFault::shortDescFaultSources[source];
+ fsr.fsLow = bits(srcEncoded, 3, 0);
+ fsr.fsHigh = bits(srcEncoded, 4);
+ fsr.domain = static_cast<uint8_t>(domain);
+ }
+ fsr.wnr = (write ? 1 : 0);
+ fsr.ext = 0;
+ } else {
+ // AArch64
+ srcEncoded = ArmFault::aarch64FaultSources[source];
+ }
+ if (srcEncoded == ArmFault::FaultSourceInvalid) {
+ panic("Invalid fault source\n");
+ }
+ return fsr;
+}
+
+template<class T>
+bool
+AbortFault<T>::abortDisable(ThreadContext *tc)
+{
+ if (ArmSystem::haveSecurity(tc)) {
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ return (!scr.ns || scr.aw);
+ }
+ return true;
+}
+
+template<class T>
+void
+AbortFault<T>::annotate(ArmFault::AnnotationIDs id, uint64_t val)
+{
+ switch (id)
+ {
+ case ArmFault::S1PTW:
+ s1ptw = val;
+ break;
+ case ArmFault::OVA:
+ OVAddr = val;
+ break;
+
+ // Just ignore unknown ID's
+ default:
+ break;
+ }
+}
+
+template<class T>
+uint32_t
+AbortFault<T>::iss() const
+{
+ uint32_t val;
+
+ val = srcEncoded & 0x3F;
+ val |= write << 6;
+ val |= s1ptw << 7;
+ return (val);
+}
+
+template<class T>
+bool
+AbortFault<T>::isMMUFault() const
+{
+ // NOTE: Not relying on LL information being aligned to lowest bits here
+ return
+ (source == ArmFault::AlignmentFault) ||
+ ((source >= ArmFault::TranslationLL) &&
+ (source < ArmFault::TranslationLL + 4)) ||
+ ((source >= ArmFault::AccessFlagLL) &&
+ (source < ArmFault::AccessFlagLL + 4)) ||
+ ((source >= ArmFault::DomainLL) &&
+ (source < ArmFault::DomainLL + 4)) ||
+ ((source >= ArmFault::PermissionLL) &&
+ (source < ArmFault::PermissionLL + 4));
+}
+
+ExceptionClass
+PrefetchAbort::ec(ThreadContext *tc) const
+{
+ if (to64) {
+ // AArch64
+ if (toEL == fromEL)
+ return EC_PREFETCH_ABORT_CURR_EL;
+ else
+ return EC_PREFETCH_ABORT_LOWER_EL;
+ } else {
+ // AArch32
+ // Abort faults have different EC codes depending on whether
+ // the fault originated within HYP mode, or not. So override
+ // the method and add the extra adjustment of the EC value.
+
+ ExceptionClass ec = ArmFaultVals<PrefetchAbort>::vals.ec;
+
+ CPSR spsr = tc->readMiscReg(MISCREG_SPSR_HYP);
+ if (spsr.mode == MODE_HYP) {
+ ec = ((ExceptionClass) (((uint32_t) ec) + 1));
+ }
+ return ec;
+ }
+}
+
+bool
+PrefetchAbort::routeToMonitor(ThreadContext *tc) const
+{
+ SCR scr = 0;
+ if (from64)
+ scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
+ else
+ scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+
+ return scr.ea && !isMMUFault();
+}
+
+bool
+PrefetchAbort::routeToHyp(ThreadContext *tc) const
+{
+ bool toHyp;
+
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
+ CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
+ HDCR hdcr = tc->readMiscRegNoEffect(MISCREG_HDCR);
+
+ // if in Hyp mode then stay in Hyp mode
+ toHyp = scr.ns && (cpsr.mode == MODE_HYP);
+ // otherwise, check whether to take to Hyp mode through Hyp Trap vector
+ toHyp |= (stage2 ||
+ ( (source == DebugEvent) && hdcr.tde && (cpsr.mode != MODE_HYP)) ||
+ ( (source == SynchronousExternalAbort) && hcr.tge && (cpsr.mode == MODE_USER))
+ ) && !inSecureState(scr, cpsr);
+ return toHyp;
+}
+
+ExceptionClass
+DataAbort::ec(ThreadContext *tc) const
+{
+ if (to64) {
+ // AArch64
+ if (source == ArmFault::AsynchronousExternalAbort) {
+ panic("Asynchronous External Abort should be handled with \
+ SystemErrors (SErrors)!");
+ }
+ if (toEL == fromEL)
+ return EC_DATA_ABORT_CURR_EL;
+ else
+ return EC_DATA_ABORT_LOWER_EL;
+ } else {
+ // AArch32
+ // Abort faults have different EC codes depending on whether
+ // the fault originated within HYP mode, or not. So override
+ // the method and add the extra adjustment of the EC value.
+
+ ExceptionClass ec = ArmFaultVals<DataAbort>::vals.ec;
+
+ CPSR spsr = tc->readMiscReg(MISCREG_SPSR_HYP);
+ if (spsr.mode == MODE_HYP) {
+ ec = ((ExceptionClass) (((uint32_t) ec) + 1));
+ }
+ return ec;
+ }
+}
+
+bool
+DataAbort::routeToMonitor(ThreadContext *tc) const
+{
+ SCR scr = 0;
+ if (from64)
+ scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
+ else
+ scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+
+ return scr.ea && !isMMUFault();
+}
+
+bool
+DataAbort::routeToHyp(ThreadContext *tc) const
+{
+ bool toHyp;
+
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
+ CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
+ HDCR hdcr = tc->readMiscRegNoEffect(MISCREG_HDCR);
+
+ // if in Hyp mode then stay in Hyp mode
+ toHyp = scr.ns && (cpsr.mode == MODE_HYP);
+ // otherwise, check whether to take to Hyp mode through Hyp Trap vector
+ toHyp |= (stage2 ||
+ ( (cpsr.mode != MODE_HYP) && ( ((source == AsynchronousExternalAbort) && hcr.amo) ||
+ ((source == DebugEvent) && hdcr.tde) )
+ ) ||
+ ( (cpsr.mode == MODE_USER) && hcr.tge &&
+ ((source == AlignmentFault) ||
+ (source == SynchronousExternalAbort))
+ )
+ ) && !inSecureState(scr, cpsr);
+ return toHyp;
+}
+
+uint32_t
+DataAbort::iss() const
+{
+ uint32_t val;
+
+ // Add on the data abort specific fields to the generic abort ISS value
+ val = AbortFault<DataAbort>::iss();
+ // ISS is valid if not caused by a stage 1 page table walk, and when taken
+ // to AArch64 only when directed to EL2
+ if (!s1ptw && (!to64 || toEL == EL2)) {
+ val |= isv << 24;
+ if (isv) {
+ val |= sas << 22;
+ val |= sse << 21;
+ val |= srt << 16;
+ // AArch64 only. These assignments are safe on AArch32 as well
+ // because these vars are initialized to false
+ val |= sf << 15;
+ val |= ar << 14;
+ }
+ }
+ return (val);
+}
+
+void
+DataAbort::annotate(AnnotationIDs id, uint64_t val)
+{
+ AbortFault<DataAbort>::annotate(id, val);
+ switch (id)
+ {
+ case SAS:
+ isv = true;
+ sas = val;
+ break;
+ case SSE:
+ isv = true;
+ sse = val;
+ break;
+ case SRT:
+ isv = true;
+ srt = val;
+ break;
+ case SF:
+ isv = true;
+ sf = val;
+ break;
+ case AR:
+ isv = true;
+ ar = val;
+ break;
+ // Just ignore unknown ID's
+ default:
+ break;
+ }
+}
+
+void
+VirtualDataAbort::invoke(ThreadContext *tc, StaticInstPtr inst)
+{
+ AbortFault<VirtualDataAbort>::invoke(tc, inst);
+ HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
+ hcr.va = 0;
+ tc->setMiscRegNoEffect(MISCREG_HCR, hcr);
+}
+
+bool
+Interrupt::routeToMonitor(ThreadContext *tc) const
+{
+ assert(ArmSystem::haveSecurity(tc));
+ SCR scr = 0;
+ if (from64)
+ scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
+ else
+ scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ return scr.irq;
+}
+
+bool
+Interrupt::routeToHyp(ThreadContext *tc) const
+{
+ bool toHyp;
+
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
+ CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
+ // Determine whether IRQs are routed to Hyp mode.
+ toHyp = (!scr.irq && hcr.imo && !inSecureState(scr, cpsr)) ||
+ (cpsr.mode == MODE_HYP);
+ return toHyp;
+}
+
+bool
+Interrupt::abortDisable(ThreadContext *tc)
+{
+ if (ArmSystem::haveSecurity(tc)) {
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ return (!scr.ns || scr.aw);
+ }
+ return true;
+}
+
+VirtualInterrupt::VirtualInterrupt()
+{}
+
+bool
+FastInterrupt::routeToMonitor(ThreadContext *tc) const
+{
+ assert(ArmSystem::haveSecurity(tc));
+ SCR scr = 0;
+ if (from64)
+ scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
+ else
+ scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ return scr.fiq;
+}
+
+bool
+FastInterrupt::routeToHyp(ThreadContext *tc) const
+{
+ bool toHyp;
+
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
+ CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
+ // Determine whether IRQs are routed to Hyp mode.
+ toHyp = (!scr.fiq && hcr.fmo && !inSecureState(scr, cpsr)) ||
+ (cpsr.mode == MODE_HYP);
+ return toHyp;
+}
+
+bool
+FastInterrupt::abortDisable(ThreadContext *tc)
+{
+ if (ArmSystem::haveSecurity(tc)) {
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ return (!scr.ns || scr.aw);
+ }
+ return true;
+}
+
+bool
+FastInterrupt::fiqDisable(ThreadContext *tc)
+{
+ if (ArmSystem::haveVirtualization(tc)) {
+ return true;
+ } else if (ArmSystem::haveSecurity(tc)) {
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR);
+ return (!scr.ns || scr.fw);
+ }
+ return true;
+}
+
+VirtualFastInterrupt::VirtualFastInterrupt()
+{}
+
+void
+PCAlignmentFault::invoke(ThreadContext *tc, StaticInstPtr inst)
+{
+ ArmFaultVals<PCAlignmentFault>::invoke(tc, inst);
+ assert(from64);
+ // Set the FAR
+ tc->setMiscReg(getFaultAddrReg64(), faultPC);
+}
+
+SPAlignmentFault::SPAlignmentFault()
+{}
+
+SystemError::SystemError()
+{}
+
+void
+SystemError::invoke(ThreadContext *tc, StaticInstPtr inst)
+{
+ tc->getCpuPtr()->clearInterrupt(INT_ABT, 0);
+ ArmFault::invoke(tc, inst);
+}
+
+bool
+SystemError::routeToMonitor(ThreadContext *tc) const
+{
+ assert(ArmSystem::haveSecurity(tc));
+ assert(from64);
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
+ return scr.ea;
+}
+
+bool
+SystemError::routeToHyp(ThreadContext *tc) const
+{
+ bool toHyp;
+ assert(from64);
+
+ SCR scr = tc->readMiscRegNoEffect(MISCREG_SCR_EL3);
+ HCR hcr = tc->readMiscRegNoEffect(MISCREG_HCR);
+ CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
+
+ toHyp = (!scr.ea && hcr.amo && !inSecureState(scr, cpsr)) ||
+ (!scr.ea && !scr.rw && !hcr.amo && !inSecureState(scr,cpsr));
+ return toHyp;
}
void
@@ -247,11 +1394,6 @@ FlushPipe::invoke(ThreadContext *tc, StaticInstPtr inst) {
tc->pcState(pc);
}
-template void AbortFault<PrefetchAbort>::invoke(ThreadContext *tc,
- StaticInstPtr inst);
-template void AbortFault<DataAbort>::invoke(ThreadContext *tc,
- StaticInstPtr inst);
-
void
ArmSev::invoke(ThreadContext *tc, StaticInstPtr inst) {
DPRINTF(Faults, "Invoking ArmSev Fault\n");
@@ -265,6 +1407,34 @@ ArmSev::invoke(ThreadContext *tc, StaticInstPtr inst) {
tc->getCpuPtr()->clearInterrupt(INT_SEV, 0);
}
-// return via SUBS pc, lr, xxx; rfe, movs, ldm
+// Instantiate all the templates to make the linker happy
+template class ArmFaultVals<Reset>;
+template class ArmFaultVals<UndefinedInstruction>;
+template class ArmFaultVals<SupervisorCall>;
+template class ArmFaultVals<SecureMonitorCall>;
+template class ArmFaultVals<HypervisorCall>;
+template class ArmFaultVals<PrefetchAbort>;
+template class ArmFaultVals<DataAbort>;
+template class ArmFaultVals<VirtualDataAbort>;
+template class ArmFaultVals<HypervisorTrap>;
+template class ArmFaultVals<Interrupt>;
+template class ArmFaultVals<VirtualInterrupt>;
+template class ArmFaultVals<FastInterrupt>;
+template class ArmFaultVals<VirtualFastInterrupt>;
+template class ArmFaultVals<SupervisorTrap>;
+template class ArmFaultVals<SecureMonitorTrap>;
+template class ArmFaultVals<PCAlignmentFault>;
+template class ArmFaultVals<SPAlignmentFault>;
+template class ArmFaultVals<SystemError>;
+template class ArmFaultVals<FlushPipe>;
+template class ArmFaultVals<ArmSev>;
+template class AbortFault<PrefetchAbort>;
+template class AbortFault<DataAbort>;
+template class AbortFault<VirtualDataAbort>;
+
+
+IllegalInstSetStateFault::IllegalInstSetStateFault()
+{}
+
} // namespace ArmISA