summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGabe Black <gblack@eecs.umich.edu>2010-10-16 00:00:54 -0700
committerGabe Black <gblack@eecs.umich.edu>2010-10-16 00:00:54 -0700
commitb289966a7817487d05bdf9722786a1216032978a (patch)
tree0b32cca3c61ee957b40b1127ffe16e724d065eba
parentab9f062166085c9750eeee2318c25efeb2ec5948 (diff)
downloadgem5-b289966a7817487d05bdf9722786a1216032978a.tar.xz
Mem: Reclaim some request flags used by MIPS for alignment checking.
These flags were being used to identify what alignment a request needed, but the same information is available using the request size. This change also eliminates the isMisaligned function. If more complicated alignment checks are needed, they can be signaled using the ASI_BITS space in the flags vector like is currently done with ARM.
-rw-r--r--src/arch/mips/isa/decoder.isa14
-rw-r--r--src/arch/mips/tlb.cc66
-rw-r--r--src/mem/request.hh22
3 files changed, 43 insertions, 59 deletions
diff --git a/src/arch/mips/isa/decoder.isa b/src/arch/mips/isa/decoder.isa
index 36533e076..9832937b5 100644
--- a/src/arch/mips/isa/decoder.isa
+++ b/src/arch/mips/isa/decoder.isa
@@ -2491,12 +2491,11 @@ decode OPCODE_HI default Unknown::unknown() {
0x4: decode OPCODE_LO {
format LoadMemory {
- 0x0: lb({{ Rt.sw = Mem.sb; }}, mem_flags = NO_ALIGN_FAULT);
- 0x1: lh({{ Rt.sw = Mem.sh; }}, mem_flags = NO_HALF_WORD_ALIGN_FAULT);
+ 0x0: lb({{ Rt.sw = Mem.sb; }});
+ 0x1: lh({{ Rt.sw = Mem.sh; }});
0x3: lw({{ Rt.sw = Mem.sw; }});
- 0x4: lbu({{ Rt.uw = Mem.ub;}}, mem_flags = NO_ALIGN_FAULT);
- 0x5: lhu({{ Rt.uw = Mem.uh; }},
- mem_flags = NO_HALF_WORD_ALIGN_FAULT);
+ 0x4: lbu({{ Rt.uw = Mem.ub;}});
+ 0x5: lhu({{ Rt.uw = Mem.uh; }});
}
format LoadUnalignedMemory {
@@ -2514,9 +2513,8 @@ decode OPCODE_HI default Unknown::unknown() {
0x5: decode OPCODE_LO {
format StoreMemory {
- 0x0: sb({{ Mem.ub = Rt<7:0>; }}, mem_flags = NO_ALIGN_FAULT);
- 0x1: sh({{ Mem.uh = Rt<15:0>; }},
- mem_flags = NO_HALF_WORD_ALIGN_FAULT);
+ 0x0: sb({{ Mem.ub = Rt<7:0>; }});
+ 0x1: sh({{ Mem.uh = Rt<15:0>; }});
0x3: sw({{ Mem.uw = Rt<31:0>; }});
}
diff --git a/src/arch/mips/tlb.cc b/src/arch/mips/tlb.cc
index e104b0bd2..288c8e88e 100644
--- a/src/arch/mips/tlb.cc
+++ b/src/arch/mips/tlb.cc
@@ -302,19 +302,23 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc)
return NoFault;
#else
- if (IsKSeg0(req->getVaddr())) {
+ Addr vaddr = req->getVaddr();
+
+ bool misaligned = (req->getSize() - 1) & vaddr;
+
+ if (IsKSeg0(vaddr)) {
// Address will not be translated through TLB, set response, and go!
- req->setPaddr(KSeg02Phys(req->getVaddr()));
+ req->setPaddr(KSeg02Phys(vaddr));
if (getOperatingMode(tc->readMiscReg(MISCREG_STATUS)) != mode_kernel ||
- req->isMisaligned()) {
+ misaligned) {
AddressErrorFault *Flt = new AddressErrorFault();
/* BadVAddr must be set */
- Flt->badVAddr = req->getVaddr();
+ Flt->badVAddr = vaddr;
return Flt;
}
- } else if(IsKSeg1(req->getVaddr())) {
+ } else if(IsKSeg1(vaddr)) {
// Address will not be translated through TLB, set response, and go!
- req->setPaddr(KSeg02Phys(req->getVaddr()));
+ req->setPaddr(KSeg02Phys(vaddr));
} else {
/*
* This is an optimization - smallPages is updated every time a TLB
@@ -323,16 +327,16 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc)
*/
Addr VPN;
if (smallPages == 1) {
- VPN = ((req->getVaddr() >> 11));
+ VPN = (vaddr >> 11);
} else {
- VPN = ((req->getVaddr() >> 11) & 0xFFFFFFFC);
+ VPN = ((vaddr >> 11) & 0xFFFFFFFC);
}
uint8_t Asid = req->getAsid();
- if (req->isMisaligned()) {
+ if (misaligned) {
// Unaligned address!
AddressErrorFault *Flt = new AddressErrorFault();
/* BadVAddr must be set */
- Flt->badVAddr = req->getVaddr();
+ Flt->badVAddr = vaddr;
return Flt;
}
PTE *pte = lookup(VPN,Asid);
@@ -341,7 +345,7 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc)
/* Check for valid bits */
int EvenOdd;
bool Valid;
- if ((((req->getVaddr()) >> pte->AddrShiftAmount) & 1) == 0) {
+ if ((((vaddr) >> pte->AddrShiftAmount) & 1) == 0) {
// Check even bits
Valid = pte->V0;
EvenOdd = 0;
@@ -360,7 +364,7 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc)
Flt->entryHiVPN2X = (VPN & 0x3);
/* BadVAddr must be set */
- Flt->badVAddr = req->getVaddr();
+ Flt->badVAddr = vaddr;
/* Context must be set */
Flt->contextBadVPN2 = (VPN >> 2);
@@ -375,7 +379,7 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc)
}
PAddr >>= (pte->AddrShiftAmount - 12);
PAddr <<= pte->AddrShiftAmount;
- PAddr |= ((req->getVaddr()) & pte->OffsetMask);
+ PAddr |= (vaddr & pte->OffsetMask);
req->setPaddr(PAddr);
}
} else {
@@ -387,7 +391,7 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc)
Flt->entryHiVPN2X = (VPN & 0x3);
/* BadVAddr must be set */
- Flt->badVAddr = req->getVaddr();
+ Flt->badVAddr = vaddr;
/* Context must be set */
Flt->contextBadVPN2 = (VPN >> 2);
@@ -422,37 +426,41 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
return NoFault;
#else
- if (IsKSeg0(req->getVaddr())) {
+ Addr vaddr = req->getVaddr();
+
+ bool misaligned = (req->getSize() - 1) & vaddr;
+
+ if (IsKSeg0(vaddr)) {
// Address will not be translated through TLB, set response, and go!
- req->setPaddr(KSeg02Phys(req->getVaddr()));
+ req->setPaddr(KSeg02Phys(vaddr));
if (getOperatingMode(tc->readMiscReg(MISCREG_STATUS)) != mode_kernel ||
- req->isMisaligned()) {
+ misaligned) {
StoreAddressErrorFault *Flt = new StoreAddressErrorFault();
/* BadVAddr must be set */
- Flt->badVAddr = req->getVaddr();
+ Flt->badVAddr = vaddr;
return Flt;
}
- } else if(IsKSeg1(req->getVaddr())) {
+ } else if(IsKSeg1(vaddr)) {
// Address will not be translated through TLB, set response, and go!
- req->setPaddr(KSeg02Phys(req->getVaddr()));
+ req->setPaddr(KSeg02Phys(vaddr));
} else {
/*
* This is an optimization - smallPages is updated every time a TLB
* operation is performed. That way, we don't need to look at
* Config3 _ SP and PageGrain _ ESP every time we do a TLB lookup
*/
- Addr VPN = ((req->getVaddr() >> 11) & 0xFFFFFFFC);
+ Addr VPN = (vaddr >> 11) & 0xFFFFFFFC;
if (smallPages == 1) {
- VPN = ((req->getVaddr() >> 11));
+ VPN = vaddr >> 11;
}
uint8_t Asid = req->getAsid();
PTE *pte = lookup(VPN, Asid);
- if (req->isMisaligned()) {
+ if (misaligned) {
// Unaligned address!
StoreAddressErrorFault *Flt = new StoreAddressErrorFault();
/* BadVAddr must be set */
- Flt->badVAddr = req->getVaddr();
+ Flt->badVAddr = vaddr;
return Flt;
}
if (pte != NULL) {
@@ -461,7 +469,7 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
int EvenOdd;
bool Valid;
bool Dirty;
- if (((((req->getVaddr()) >> pte->AddrShiftAmount) & 1)) == 0) {
+ if ((((vaddr >> pte->AddrShiftAmount) & 1)) == 0) {
// Check even bits
Valid = pte->V0;
Dirty = pte->D0;
@@ -482,7 +490,7 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
Flt->entryHiVPN2X = (VPN & 0x3);
/* BadVAddr must be set */
- Flt->badVAddr = req->getVaddr();
+ Flt->badVAddr = vaddr;
/* Context must be set */
Flt->contextBadVPN2 = (VPN >> 2);
@@ -498,7 +506,7 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
Flt->entryHiVPN2X = (VPN & 0x3);
/* BadVAddr must be set */
- Flt->badVAddr = req->getVaddr();
+ Flt->badVAddr = vaddr;
/* Context must be set */
Flt->contextBadVPN2 = (VPN >> 2);
@@ -512,7 +520,7 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
}
PAddr >>= (pte->AddrShiftAmount - 12);
PAddr <<= pte->AddrShiftAmount;
- PAddr |= ((req->getVaddr()) & pte->OffsetMask);
+ PAddr |= (vaddr & pte->OffsetMask);
req->setPaddr(PAddr);
}
} else {
@@ -524,7 +532,7 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
Flt->entryHiVPN2X = (VPN & 0x3);
/* BadVAddr must be set */
- Flt->badVAddr = req->getVaddr();
+ Flt->badVAddr = vaddr;
/* Context must be set */
Flt->contextBadVPN2 = (VPN >> 2);
diff --git a/src/mem/request.hh b/src/mem/request.hh
index 45551dd03..38daea266 100644
--- a/src/mem/request.hh
+++ b/src/mem/request.hh
@@ -74,10 +74,6 @@ class Request : public FastAlloc
/** This request is a clear exclusive. */
static const FlagsType CLEAR_LL = 0x00004000;
- /** The request should ignore unaligned access faults */
- static const FlagsType NO_ALIGN_FAULT = 0x00020000;
- /** The request should ignore unaligned access faults */
- static const FlagsType NO_HALF_WORD_ALIGN_FAULT = 0x00040000;
/** The request should not cause a memory access. */
static const FlagsType NO_ACCESS = 0x00080000;
/** This request will lock or unlock the accessed memory. When used with
@@ -459,24 +455,6 @@ class Request : public FastAlloc
bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
bool isMmapedIpr() const { return _flags.isSet(MMAPED_IPR); }
bool isClearLL() const { return _flags.isSet(CLEAR_LL); }
-
- bool
- isMisaligned() const
- {
- if (_flags.isSet(NO_ALIGN_FAULT))
- return false;
-
- if ((_vaddr & 0x1))
- return true;
-
- if (_flags.isSet(NO_HALF_WORD_ALIGN_FAULT))
- return false;
-
- if ((_vaddr & 0x2))
- return true;
-
- return false;
- }
};
#endif // __MEM_REQUEST_HH__