summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiacomo Travaglini <giacomo.travaglini@arm.com>2019-07-31 09:41:48 +0100
committerGiacomo Travaglini <giacomo.travaglini@arm.com>2019-09-18 13:30:01 +0000
commita4bf7c3c20756f01f39e644d65f97fd9813502f0 (patch)
treef9488099d2ccdddce14473881d82d59be82b0605
parentdc70987e470d66f584e0ddf606e9f07da994ba75 (diff)
downloadgem5-a4bf7c3c20756f01f39e644d65f97fd9813502f0.tar.xz
arch-arm: Fix Data Abort ISS when caused by Atomic operation
Data Aborts caused by an atomic instruction have a special rule for their syndrome: From a ISS point of view they count as read if a read to that address would generate a fault; they count as writes otherwise (ISS.WnR bit) This patch is implementing this in the TLB. For permission faults we need to explicitly check if a read would trigger a fault (e.g. checking for the AP bits) since permissions can allow read-only accesses. For other MMU exceptions (like translation faults) we are confident the nature of the access doesn't affect the genration of a fault. This means that if the access is atomic, we treat it as a read from an ISS.WnR point of view. Change-Id: Ia524aa6ae07f81513cdc26c516b5fd9b01a931c3 Signed-off-by: Giacomo Travaglini <giacomo.travaglini@arm.com> Reviewed-by: Andreas Sandberg <andreas.sandberg@arm.com> Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/20981 Maintainer: Andreas Sandberg <andreas.sandberg@arm.com> Tested-by: kokoro <noreply+kokoro@google.com>
-rw-r--r--src/arch/arm/table_walker.cc47
-rw-r--r--src/arch/arm/tlb.cc19
2 files changed, 46 insertions, 20 deletions
diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc
index 508170a55..bfa5ba7bb 100644
--- a/src/arch/arm/table_walker.cc
+++ b/src/arch/arm/table_walker.cc
@@ -457,6 +457,7 @@ TableWalker::processWalk()
// If translation isn't enabled, we shouldn't be here
assert(currState->sctlr.m || isStage2);
+ const bool is_atomic = currState->req->isAtomic();
DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
@@ -478,7 +479,8 @@ TableWalker::processWalk()
else
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess, currState->isWrite,
+ TlbEntry::DomainType::NoAccess,
+ is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + L1, isStage2,
ArmFault::VmsaTran);
}
@@ -497,7 +499,8 @@ TableWalker::processWalk()
else
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess, currState->isWrite,
+ TlbEntry::DomainType::NoAccess,
+ is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + L1, isStage2,
ArmFault::VmsaTran);
}
@@ -591,6 +594,8 @@ TableWalker::processWalkLPAE()
else
ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
+ const bool is_atomic = currState->req->isAtomic();
+
// The following code snippet selects the appropriate translation table base
// address (TTBR0 or TTBR1) and the appropriate starting lookup level
// depending on the address range supported by the translation table (ARM
@@ -609,7 +614,7 @@ TableWalker::processWalkLPAE()
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
- currState->isWrite,
+ is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + L1,
isStage2,
ArmFault::LpaeTran);
@@ -633,7 +638,7 @@ TableWalker::processWalkLPAE()
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
- currState->isWrite,
+ is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + L1,
isStage2,
ArmFault::LpaeTran);
@@ -655,7 +660,8 @@ TableWalker::processWalkLPAE()
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
- currState->isWrite, ArmFault::TranslationLL + L1,
+ is_atomic ? false : currState->isWrite,
+ ArmFault::TranslationLL + L1,
isStage2, ArmFault::LpaeTran);
}
@@ -849,6 +855,8 @@ TableWalker::processWalkAArch64()
break;
}
+ const bool is_atomic = currState->req->isAtomic();
+
if (fault) {
Fault f;
if (currState->isFetch)
@@ -860,7 +868,7 @@ TableWalker::processWalkAArch64()
f = std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
- currState->isWrite,
+ is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + L0,
isStage2, ArmFault::LpaeTran);
@@ -946,7 +954,7 @@ TableWalker::processWalkAArch64()
f = std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
- currState->isWrite,
+ is_atomic ? false : currState->isWrite,
ArmFault::AddressSizeLL + start_lookup_level,
isStage2,
ArmFault::LpaeTran);
@@ -1458,6 +1466,8 @@ TableWalker::doL1Descriptor()
currState->vaddr_tainted, currState->l1Desc.data);
TlbEntry te;
+ const bool is_atomic = currState->req->isAtomic();
+
switch (currState->l1Desc.type()) {
case L1Descriptor::Ignore:
case L1Descriptor::Reserved:
@@ -1478,7 +1488,7 @@ TableWalker::doL1Descriptor()
std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
- currState->isWrite,
+ is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + L1, isStage2,
ArmFault::VmsaTran);
return;
@@ -1492,7 +1502,7 @@ TableWalker::doL1Descriptor()
currState->fault = std::make_shared<DataAbort>(
currState->vaddr_tainted,
currState->l1Desc.domain(),
- currState->isWrite,
+ is_atomic ? false : currState->isWrite,
ArmFault::AccessFlagLL + L1,
isStage2,
ArmFault::VmsaTran);
@@ -1555,7 +1565,7 @@ TableWalker::generateLongDescFault(ArmFault::FaultSource src)
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
- currState->isWrite,
+ currState->req->isAtomic() ? false : currState->isWrite,
src + currState->longDesc.lookupLevel,
isStage2,
ArmFault::LpaeTran);
@@ -1599,16 +1609,15 @@ TableWalker::doLongDescriptor()
switch (currState->longDesc.type()) {
case LongDescriptor::Invalid:
- if (!currState->timing) {
- currState->tc = NULL;
- currState->req = NULL;
- }
-
DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
currState->longDesc.lookupLevel,
ArmFault::TranslationLL + currState->longDesc.lookupLevel);
currState->fault = generateLongDescFault(ArmFault::TranslationLL);
+ if (!currState->timing) {
+ currState->tc = NULL;
+ currState->req = NULL;
+ }
return;
case LongDescriptor::Block:
@@ -1735,6 +1744,8 @@ TableWalker::doL2Descriptor()
currState->vaddr_tainted, currState->l2Desc.data);
TlbEntry te;
+ const bool is_atomic = currState->req->isAtomic();
+
if (currState->l2Desc.invalid()) {
DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
if (!currState->timing) {
@@ -1750,7 +1761,8 @@ TableWalker::doL2Descriptor()
else
currState->fault = std::make_shared<DataAbort>(
currState->vaddr_tainted, currState->l1Desc.domain(),
- currState->isWrite, ArmFault::TranslationLL + L2,
+ is_atomic ? false : currState->isWrite,
+ ArmFault::TranslationLL + L2,
isStage2,
ArmFault::VmsaTran);
return;
@@ -1765,7 +1777,8 @@ TableWalker::doL2Descriptor()
currState->fault = std::make_shared<DataAbort>(
currState->vaddr_tainted,
- TlbEntry::DomainType::NoAccess, currState->isWrite,
+ TlbEntry::DomainType::NoAccess,
+ is_atomic ? false : currState->isWrite,
ArmFault::AccessFlagLL + L2, isStage2,
ArmFault::VmsaTran);
}
diff --git a/src/arch/arm/tlb.cc b/src/arch/arm/tlb.cc
index 1e8003c21..1e4904c71 100644
--- a/src/arch/arm/tlb.cc
+++ b/src/arch/arm/tlb.cc
@@ -801,6 +801,7 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
bool is_fetch = (mode == Execute);
// Cache clean operations require read permissions to the specified VA
bool is_write = !req->isCacheClean() && mode == Write;
+ bool is_atomic = req->isAtomic();
bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
updateMiscReg(tc, curTranType);
@@ -825,7 +826,8 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
alignFaults++;
return std::make_shared<DataAbort>(
vaddr_tainted,
- TlbEntry::DomainType::NoAccess, is_write,
+ TlbEntry::DomainType::NoAccess,
+ is_atomic ? false : is_write,
ArmFault::AlignmentFault, isStage2,
ArmFault::LpaeTran);
}
@@ -852,6 +854,12 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
bool r = !is_write && !is_fetch;
bool w = is_write;
bool x = is_fetch;
+
+ // grant_read is used for faults from an atomic instruction that
+ // both reads and writes from a memory location. From a ISS point
+ // of view they count as read if a read to that address would have
+ // generated the fault; they count as writes otherwise
+ bool grant_read = true;
DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
"w:%d, x:%d\n", ap, xn, pxn, r, w, x);
@@ -861,18 +869,20 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
// The following permissions are described in ARM DDI 0487A.f
// D4-1802
uint8_t hap = 0x3 & te->hap;
+ grant_read = hap & 0x1;
if (is_fetch) {
// sctlr.wxn overrides the xn bit
grant = !sctlr.wxn && !xn;
} else if (is_write) {
grant = hap & 0x2;
} else { // is_read
- grant = hap & 0x1;
+ grant = grant_read;
}
} else {
switch (aarch64EL) {
case EL0:
{
+ grant_read = ap & 0x1;
uint8_t perm = (ap << 2) | (xn << 1) | pxn;
switch (perm) {
case 0:
@@ -906,6 +916,7 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
{
if (checkPAN(tc, ap, req, mode)) {
grant = false;
+ grant_read = false;
break;
}
@@ -945,6 +956,7 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
case EL2:
if (hcr.e2h && checkPAN(tc, ap, req, mode)) {
grant = false;
+ grant_read = false;
break;
}
M5_FALLTHROUGH;
@@ -990,7 +1002,8 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
"priv:%d write:%d\n", ap, is_priv, is_write);
return std::make_shared<DataAbort>(
- vaddr_tainted, te->domain, is_write,
+ vaddr_tainted, te->domain,
+ (is_atomic && !grant_read) ? false : is_write,
ArmFault::PermissionLL + te->lookupLevel,
isStage2, ArmFault::LpaeTran);
}