summaryrefslogtreecommitdiff
path: root/src/arch/arm/isa.cc
diff options
context:
space:
mode:
authorAndreas Sandberg <Andreas.Sandberg@ARM.com>2015-03-02 04:00:28 -0500
committerAndreas Sandberg <Andreas.Sandberg@ARM.com>2015-03-02 04:00:28 -0500
commit3b4ae7debb58e4f3fc4a5f2ad9f7faf896f854d8 (patch)
tree734b55f33bf18354e6d5f73a14be4d7c7f9b96fb /src/arch/arm/isa.cc
parent804b11a3ed37dd429de2e4ecf3a8892843d57e12 (diff)
downloadgem5-3b4ae7debb58e4f3fc4a5f2ad9f7faf896f854d8.tar.xz
arm: Don't truncate 16-bit ASIDs to 8 bits
The ISA code sometimes stores 16-bit ASIDs as 8-bit unsigned integers and has a couple of inverted checks that mask out the high 8 bits of an ASID if 16-bit ASIDs have been /enabled/. This changeset fixes both of those issues.
Diffstat (limited to 'src/arch/arm/isa.cc')
-rw-r--r--src/arch/arm/isa.cc8
1 files changed, 4 insertions, 4 deletions
diff --git a/src/arch/arm/isa.cc b/src/arch/arm/isa.cc
index 1198f852f..9e760fbdf 100644
--- a/src/arch/arm/isa.cc
+++ b/src/arch/arm/isa.cc
@@ -1386,7 +1386,7 @@ ISA::setMiscReg(int misc_reg, const MiscReg &val, ThreadContext *tc)
oc = sys->getThreadContext(x);
assert(oc->getITBPtr() && oc->getDTBPtr());
asid = bits(newVal, 63, 48);
- if (haveLargeAsid64)
+ if (!haveLargeAsid64)
asid &= mask(8);
oc->getITBPtr()->flushAsid(asid, secure_lookup, target_el);
oc->getDTBPtr()->flushAsid(asid, secure_lookup, target_el);
@@ -1941,10 +1941,10 @@ ISA::updateBootUncacheable(int sctlr_idx, ThreadContext *tc)
}
void
-ISA::tlbiVA(ThreadContext *tc, MiscReg newVal, uint8_t asid, bool secure_lookup,
- uint8_t target_el)
+ISA::tlbiVA(ThreadContext *tc, MiscReg newVal, uint16_t asid,
+ bool secure_lookup, uint8_t target_el)
{
- if (haveLargeAsid64)
+ if (!haveLargeAsid64)
asid &= mask(8);
Addr va = ((Addr) bits(newVal, 43, 0)) << 12;
System *sys = tc->getSystemPtr();