summaryrefslogtreecommitdiff
path: root/arch/alpha/alpha_memory.cc
diff options
context:
space:
mode:
authorSteve Reinhardt <stever@eecs.umich.edu>2004-06-15 10:48:08 -0700
committerSteve Reinhardt <stever@eecs.umich.edu>2004-06-15 10:48:08 -0700
commitd53c6c168afa7eafdf8c4fa10a10f835db25e3da (patch)
treec001ef81c09fb8e52afb64df5b2a5a231fe7a352 /arch/alpha/alpha_memory.cc
parent7b24ae00dc2b3f503a15c28a7728cfc9a3e9299f (diff)
downloadgem5-d53c6c168afa7eafdf8c4fa10a10f835db25e3da.tar.xz
Get software prefetching to work in full-system mode.
Mostly a matter of keeping prefetches to invalid addrs from messing up VM IPRs. Also discovered that wh64s were not being treated as prefetches, when they really should be (for the most part, anyway). arch/alpha/alpha_memory.cc: arch/alpha/alpha_memory.hh: - Get rid of intrlock flag for locking VM fault regs (a la EV5); instead, just don't update regs on VPTE loads (a la EV6). - Add NO_FAULT MemReq flag to indicate references that should not cause page faults (i.e., prefetches). arch/alpha/ev5.cc: - Get rid of intrlock flag for locking VM fault regs (a la EV5); instead, just don't update regs on VPTE loads (a la EV6). - Add Fault trace flag. arch/alpha/isa_desc: - Add NO_FAULT MemReq flag to indicate references that should not cause page faults (i.e., prefetches). - Mark wh64 as a "data prefetch" instruction so it gets controlled properly by the FullCPU data prefetch control switch. - Align wh64 EA in decoder so issue stage doesn't need to worry about it. arch/alpha/isa_traits.hh: - Get rid of intrlock flag for locking VM fault regs (a la EV5); instead, just don't update regs on VPTE loads (a la EV6). base/traceflags.py: - Add Fault trace flag. cpu/simple_cpu/simple_cpu.hh: - Pass MemReq flags to writeHint() operation. cpu/static_inst.hh: Update comment re: prefetches. --HG-- extra : convert_revision : 62e466b0f4c0ff9961796270fa2e371ec24bcbb6
Diffstat (limited to 'arch/alpha/alpha_memory.cc')
-rw-r--r--arch/alpha/alpha_memory.cc47
1 files changed, 21 insertions, 26 deletions
diff --git a/arch/alpha/alpha_memory.cc b/arch/alpha/alpha_memory.cc
index 23815bf01..58aa13b8f 100644
--- a/arch/alpha/alpha_memory.cc
+++ b/arch/alpha/alpha_memory.cc
@@ -415,12 +415,19 @@ AlphaDTB::regStats()
}
void
-AlphaDTB::fault(Addr vaddr, uint64_t flags, ExecContext *xc) const
+AlphaDTB::fault(MemReqPtr &req, uint64_t flags) const
{
+ ExecContext *xc = req->xc;
+ Addr vaddr = req->vaddr;
uint64_t *ipr = xc->regs.ipr;
- // set fault address and flags
- if (!xc->misspeculating() && !xc->regs.intrlock) {
+ // Set fault address and flags. Even though we're modeling an
+ // EV5, we use the EV6 technique of not latching fault registers
+ // on VPTE loads (instead of locking the registers until IPR_VA is
+ // read, like the EV5). The EV6 approach is cleaner and seems to
+ // work with EV5 PAL code, but not the other way around.
+ if (!xc->misspeculating()
+ && !(req->flags & VPTE) && !(req->flags & NO_FAULT)) {
// set VA register with faulting address
ipr[AlphaISA::IPR_VA] = vaddr;
@@ -432,9 +439,6 @@ AlphaDTB::fault(Addr vaddr, uint64_t flags, ExecContext *xc) const
// set VA_FORM register with faulting formatted address
ipr[AlphaISA::IPR_VA_FORM] =
ipr[AlphaISA::IPR_MVPTBR] | (VA_VPN(vaddr) << 3);
-
- // lock these registers until the VA register is read
- xc->regs.intrlock = true;
}
}
@@ -459,10 +463,8 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const
} else {
// verify that this is a good virtual address
if (!validVirtualAddress(req->vaddr)) {
- fault(req->vaddr,
- ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_BAD_VA_MASK |
- MM_STAT_ACV_MASK),
- req->xc);
+ fault(req, ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_BAD_VA_MASK |
+ MM_STAT_ACV_MASK));
if (write) { write_acv++; } else { read_acv++; }
return DTB_Fault_Fault;
@@ -476,9 +478,7 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const
// only valid in kernel mode
if (DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]) !=
AlphaISA::mode_kernel) {
- fault(req->vaddr,
- ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_ACV_MASK),
- req->xc);
+ fault(req, ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_ACV_MASK));
if (write) { write_acv++; } else { read_acv++; }
return DTB_Acv_Fault;
}
@@ -496,9 +496,8 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const
if (!pte) {
// page fault
- fault(req->vaddr,
- ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_DTB_MISS_MASK),
- req->xc);
+ fault(req,
+ (write ? MM_STAT_WR_MASK : 0) | MM_STAT_DTB_MISS_MASK);
if (write) { write_misses++; } else { read_misses++; }
return (req->flags & VPTE) ? Pdtb_Miss_Fault : Ndtb_Miss_Fault;
}
@@ -508,29 +507,25 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const
if (write) {
if (!(pte->xwe & MODE2MASK(mode))) {
// declare the instruction access fault
- fault(req->vaddr, MM_STAT_WR_MASK | MM_STAT_ACV_MASK |
- (pte->fonw ? MM_STAT_FONW_MASK : 0),
- req->xc);
+ fault(req, (MM_STAT_WR_MASK | MM_STAT_ACV_MASK |
+ (pte->fonw ? MM_STAT_FONW_MASK : 0)));
write_acv++;
return DTB_Fault_Fault;
}
if (pte->fonw) {
- fault(req->vaddr, MM_STAT_WR_MASK | MM_STAT_FONW_MASK,
- req->xc);
+ fault(req, MM_STAT_WR_MASK | MM_STAT_FONW_MASK);
write_acv++;
return DTB_Fault_Fault;
}
} else {
if (!(pte->xre & MODE2MASK(mode))) {
- fault(req->vaddr,
- MM_STAT_ACV_MASK |
- (pte->fonr ? MM_STAT_FONR_MASK : 0),
- req->xc);
+ fault(req, (MM_STAT_ACV_MASK |
+ (pte->fonr ? MM_STAT_FONR_MASK : 0)));
read_acv++;
return DTB_Acv_Fault;
}
if (pte->fonr) {
- fault(req->vaddr, MM_STAT_FONR_MASK, req->xc);
+ fault(req, MM_STAT_FONR_MASK);
read_acv++;
return DTB_Fault_Fault;
}