diff options
author | Gabe Black <gblack@eecs.umich.edu> | 2010-10-16 00:00:54 -0700 |
---|---|---|
committer | Gabe Black <gblack@eecs.umich.edu> | 2010-10-16 00:00:54 -0700 |
commit | b289966a7817487d05bdf9722786a1216032978a (patch) | |
tree | 0b32cca3c61ee957b40b1127ffe16e724d065eba /src/arch | |
parent | ab9f062166085c9750eeee2318c25efeb2ec5948 (diff) | |
download | gem5-b289966a7817487d05bdf9722786a1216032978a.tar.xz |
Mem: Reclaim some request flags used by MIPS for alignment checking.
These flags were being used to identify what alignment a request needed, but
the same information is available using the request size. This change also
eliminates the isMisaligned function. If more complicated alignment checks are
needed, they can be signaled using the ASI_BITS space in the flags vector like
is currently done with ARM.
Diffstat (limited to 'src/arch')
-rw-r--r-- | src/arch/mips/isa/decoder.isa | 14 | ||||
-rw-r--r-- | src/arch/mips/tlb.cc | 66 |
2 files changed, 43 insertions, 37 deletions
diff --git a/src/arch/mips/isa/decoder.isa b/src/arch/mips/isa/decoder.isa index 36533e076..9832937b5 100644 --- a/src/arch/mips/isa/decoder.isa +++ b/src/arch/mips/isa/decoder.isa @@ -2491,12 +2491,11 @@ decode OPCODE_HI default Unknown::unknown() { 0x4: decode OPCODE_LO { format LoadMemory { - 0x0: lb({{ Rt.sw = Mem.sb; }}, mem_flags = NO_ALIGN_FAULT); - 0x1: lh({{ Rt.sw = Mem.sh; }}, mem_flags = NO_HALF_WORD_ALIGN_FAULT); + 0x0: lb({{ Rt.sw = Mem.sb; }}); + 0x1: lh({{ Rt.sw = Mem.sh; }}); 0x3: lw({{ Rt.sw = Mem.sw; }}); - 0x4: lbu({{ Rt.uw = Mem.ub;}}, mem_flags = NO_ALIGN_FAULT); - 0x5: lhu({{ Rt.uw = Mem.uh; }}, - mem_flags = NO_HALF_WORD_ALIGN_FAULT); + 0x4: lbu({{ Rt.uw = Mem.ub;}}); + 0x5: lhu({{ Rt.uw = Mem.uh; }}); } format LoadUnalignedMemory { @@ -2514,9 +2513,8 @@ decode OPCODE_HI default Unknown::unknown() { 0x5: decode OPCODE_LO { format StoreMemory { - 0x0: sb({{ Mem.ub = Rt<7:0>; }}, mem_flags = NO_ALIGN_FAULT); - 0x1: sh({{ Mem.uh = Rt<15:0>; }}, - mem_flags = NO_HALF_WORD_ALIGN_FAULT); + 0x0: sb({{ Mem.ub = Rt<7:0>; }}); + 0x1: sh({{ Mem.uh = Rt<15:0>; }}); 0x3: sw({{ Mem.uw = Rt<31:0>; }}); } diff --git a/src/arch/mips/tlb.cc b/src/arch/mips/tlb.cc index e104b0bd2..288c8e88e 100644 --- a/src/arch/mips/tlb.cc +++ b/src/arch/mips/tlb.cc @@ -302,19 +302,23 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc) return NoFault; #else - if (IsKSeg0(req->getVaddr())) { + Addr vaddr = req->getVaddr(); + + bool misaligned = (req->getSize() - 1) & vaddr; + + if (IsKSeg0(vaddr)) { // Address will not be translated through TLB, set response, and go! - req->setPaddr(KSeg02Phys(req->getVaddr())); + req->setPaddr(KSeg02Phys(vaddr)); if (getOperatingMode(tc->readMiscReg(MISCREG_STATUS)) != mode_kernel || - req->isMisaligned()) { + misaligned) { AddressErrorFault *Flt = new AddressErrorFault(); /* BadVAddr must be set */ - Flt->badVAddr = req->getVaddr(); + Flt->badVAddr = vaddr; return Flt; } - } else if(IsKSeg1(req->getVaddr())) { + } else if(IsKSeg1(vaddr)) { // Address will not be translated through TLB, set response, and go! - req->setPaddr(KSeg02Phys(req->getVaddr())); + req->setPaddr(KSeg02Phys(vaddr)); } else { /* * This is an optimization - smallPages is updated every time a TLB @@ -323,16 +327,16 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc) */ Addr VPN; if (smallPages == 1) { - VPN = ((req->getVaddr() >> 11)); + VPN = (vaddr >> 11); } else { - VPN = ((req->getVaddr() >> 11) & 0xFFFFFFFC); + VPN = ((vaddr >> 11) & 0xFFFFFFFC); } uint8_t Asid = req->getAsid(); - if (req->isMisaligned()) { + if (misaligned) { // Unaligned address! AddressErrorFault *Flt = new AddressErrorFault(); /* BadVAddr must be set */ - Flt->badVAddr = req->getVaddr(); + Flt->badVAddr = vaddr; return Flt; } PTE *pte = lookup(VPN,Asid); @@ -341,7 +345,7 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc) /* Check for valid bits */ int EvenOdd; bool Valid; - if ((((req->getVaddr()) >> pte->AddrShiftAmount) & 1) == 0) { + if ((((vaddr) >> pte->AddrShiftAmount) & 1) == 0) { // Check even bits Valid = pte->V0; EvenOdd = 0; @@ -360,7 +364,7 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc) Flt->entryHiVPN2X = (VPN & 0x3); /* BadVAddr must be set */ - Flt->badVAddr = req->getVaddr(); + Flt->badVAddr = vaddr; /* Context must be set */ Flt->contextBadVPN2 = (VPN >> 2); @@ -375,7 +379,7 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc) } PAddr >>= (pte->AddrShiftAmount - 12); PAddr <<= pte->AddrShiftAmount; - PAddr |= ((req->getVaddr()) & pte->OffsetMask); + PAddr |= (vaddr & pte->OffsetMask); req->setPaddr(PAddr); } } else { @@ -387,7 +391,7 @@ TLB::translateInst(RequestPtr req, ThreadContext *tc) Flt->entryHiVPN2X = (VPN & 0x3); /* BadVAddr must be set */ - Flt->badVAddr = req->getVaddr(); + Flt->badVAddr = vaddr; /* Context must be set */ Flt->contextBadVPN2 = (VPN >> 2); @@ -422,37 +426,41 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write) return NoFault; #else - if (IsKSeg0(req->getVaddr())) { + Addr vaddr = req->getVaddr(); + + bool misaligned = (req->getSize() - 1) & vaddr; + + if (IsKSeg0(vaddr)) { // Address will not be translated through TLB, set response, and go! - req->setPaddr(KSeg02Phys(req->getVaddr())); + req->setPaddr(KSeg02Phys(vaddr)); if (getOperatingMode(tc->readMiscReg(MISCREG_STATUS)) != mode_kernel || - req->isMisaligned()) { + misaligned) { StoreAddressErrorFault *Flt = new StoreAddressErrorFault(); /* BadVAddr must be set */ - Flt->badVAddr = req->getVaddr(); + Flt->badVAddr = vaddr; return Flt; } - } else if(IsKSeg1(req->getVaddr())) { + } else if(IsKSeg1(vaddr)) { // Address will not be translated through TLB, set response, and go! - req->setPaddr(KSeg02Phys(req->getVaddr())); + req->setPaddr(KSeg02Phys(vaddr)); } else { /* * This is an optimization - smallPages is updated every time a TLB * operation is performed. That way, we don't need to look at * Config3 _ SP and PageGrain _ ESP every time we do a TLB lookup */ - Addr VPN = ((req->getVaddr() >> 11) & 0xFFFFFFFC); + Addr VPN = (vaddr >> 11) & 0xFFFFFFFC; if (smallPages == 1) { - VPN = ((req->getVaddr() >> 11)); + VPN = vaddr >> 11; } uint8_t Asid = req->getAsid(); PTE *pte = lookup(VPN, Asid); - if (req->isMisaligned()) { + if (misaligned) { // Unaligned address! StoreAddressErrorFault *Flt = new StoreAddressErrorFault(); /* BadVAddr must be set */ - Flt->badVAddr = req->getVaddr(); + Flt->badVAddr = vaddr; return Flt; } if (pte != NULL) { @@ -461,7 +469,7 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write) int EvenOdd; bool Valid; bool Dirty; - if (((((req->getVaddr()) >> pte->AddrShiftAmount) & 1)) == 0) { + if ((((vaddr >> pte->AddrShiftAmount) & 1)) == 0) { // Check even bits Valid = pte->V0; Dirty = pte->D0; @@ -482,7 +490,7 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write) Flt->entryHiVPN2X = (VPN & 0x3); /* BadVAddr must be set */ - Flt->badVAddr = req->getVaddr(); + Flt->badVAddr = vaddr; /* Context must be set */ Flt->contextBadVPN2 = (VPN >> 2); @@ -498,7 +506,7 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write) Flt->entryHiVPN2X = (VPN & 0x3); /* BadVAddr must be set */ - Flt->badVAddr = req->getVaddr(); + Flt->badVAddr = vaddr; /* Context must be set */ Flt->contextBadVPN2 = (VPN >> 2); @@ -512,7 +520,7 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write) } PAddr >>= (pte->AddrShiftAmount - 12); PAddr <<= pte->AddrShiftAmount; - PAddr |= ((req->getVaddr()) & pte->OffsetMask); + PAddr |= (vaddr & pte->OffsetMask); req->setPaddr(PAddr); } } else { @@ -524,7 +532,7 @@ TLB::translateData(RequestPtr req, ThreadContext *tc, bool write) Flt->entryHiVPN2X = (VPN & 0x3); /* BadVAddr must be set */ - Flt->badVAddr = req->getVaddr(); + Flt->badVAddr = vaddr; /* Context must be set */ Flt->contextBadVPN2 = (VPN >> 2); |