diff options
author | Steve Reinhardt <stever@eecs.umich.edu> | 2004-06-15 10:48:08 -0700 |
---|---|---|
committer | Steve Reinhardt <stever@eecs.umich.edu> | 2004-06-15 10:48:08 -0700 |
commit | d53c6c168afa7eafdf8c4fa10a10f835db25e3da (patch) | |
tree | c001ef81c09fb8e52afb64df5b2a5a231fe7a352 | |
parent | 7b24ae00dc2b3f503a15c28a7728cfc9a3e9299f (diff) | |
download | gem5-d53c6c168afa7eafdf8c4fa10a10f835db25e3da.tar.xz |
Get software prefetching to work in full-system mode.
Mostly a matter of keeping prefetches to invalid addrs
from messing up VM IPRs. Also discovered that wh64s were
not being treated as prefetches, when they really should be
(for the most part, anyway).
arch/alpha/alpha_memory.cc:
arch/alpha/alpha_memory.hh:
- Get rid of intrlock flag for locking VM fault regs (a la EV5);
instead, just don't update regs on VPTE loads (a la EV6).
- Add NO_FAULT MemReq flag to indicate references that should not
cause page faults (i.e., prefetches).
arch/alpha/ev5.cc:
- Get rid of intrlock flag for locking VM fault regs (a la EV5);
instead, just don't update regs on VPTE loads (a la EV6).
- Add Fault trace flag.
arch/alpha/isa_desc:
- Add NO_FAULT MemReq flag to indicate references that should not
cause page faults (i.e., prefetches).
- Mark wh64 as a "data prefetch" instruction so it gets controlled
properly by the FullCPU data prefetch control switch.
- Align wh64 EA in decoder so issue stage doesn't need to worry about it.
arch/alpha/isa_traits.hh:
- Get rid of intrlock flag for locking VM fault regs (a la EV5);
instead, just don't update regs on VPTE loads (a la EV6).
base/traceflags.py:
- Add Fault trace flag.
cpu/simple_cpu/simple_cpu.hh:
- Pass MemReq flags to writeHint() operation.
cpu/static_inst.hh:
Update comment re: prefetches.
--HG--
extra : convert_revision : 62e466b0f4c0ff9961796270fa2e371ec24bcbb6
-rw-r--r-- | arch/alpha/alpha_memory.cc | 47 | ||||
-rw-r--r-- | arch/alpha/alpha_memory.hh | 2 | ||||
-rw-r--r-- | arch/alpha/ev5.cc | 5 | ||||
-rw-r--r-- | arch/alpha/isa_desc | 11 | ||||
-rw-r--r-- | arch/alpha/isa_traits.hh | 1 | ||||
-rw-r--r-- | base/traceflags.py | 1 | ||||
-rw-r--r-- | cpu/simple_cpu/simple_cpu.hh | 2 | ||||
-rw-r--r-- | cpu/static_inst.hh | 3 |
8 files changed, 32 insertions, 40 deletions
diff --git a/arch/alpha/alpha_memory.cc b/arch/alpha/alpha_memory.cc index 23815bf01..58aa13b8f 100644 --- a/arch/alpha/alpha_memory.cc +++ b/arch/alpha/alpha_memory.cc @@ -415,12 +415,19 @@ AlphaDTB::regStats() } void -AlphaDTB::fault(Addr vaddr, uint64_t flags, ExecContext *xc) const +AlphaDTB::fault(MemReqPtr &req, uint64_t flags) const { + ExecContext *xc = req->xc; + Addr vaddr = req->vaddr; uint64_t *ipr = xc->regs.ipr; - // set fault address and flags - if (!xc->misspeculating() && !xc->regs.intrlock) { + // Set fault address and flags. Even though we're modeling an + // EV5, we use the EV6 technique of not latching fault registers + // on VPTE loads (instead of locking the registers until IPR_VA is + // read, like the EV5). The EV6 approach is cleaner and seems to + // work with EV5 PAL code, but not the other way around. + if (!xc->misspeculating() + && !(req->flags & VPTE) && !(req->flags & NO_FAULT)) { // set VA register with faulting address ipr[AlphaISA::IPR_VA] = vaddr; @@ -432,9 +439,6 @@ AlphaDTB::fault(Addr vaddr, uint64_t flags, ExecContext *xc) const // set VA_FORM register with faulting formatted address ipr[AlphaISA::IPR_VA_FORM] = ipr[AlphaISA::IPR_MVPTBR] | (VA_VPN(vaddr) << 3); - - // lock these registers until the VA register is read - xc->regs.intrlock = true; } } @@ -459,10 +463,8 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const } else { // verify that this is a good virtual address if (!validVirtualAddress(req->vaddr)) { - fault(req->vaddr, - ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_BAD_VA_MASK | - MM_STAT_ACV_MASK), - req->xc); + fault(req, ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_BAD_VA_MASK | + MM_STAT_ACV_MASK)); if (write) { write_acv++; } else { read_acv++; } return DTB_Fault_Fault; @@ -476,9 +478,7 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const // only valid in kernel mode if (DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]) != AlphaISA::mode_kernel) { - fault(req->vaddr, - ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_ACV_MASK), - req->xc); + fault(req, ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_ACV_MASK)); if (write) { write_acv++; } else { read_acv++; } return DTB_Acv_Fault; } @@ -496,9 +496,8 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const if (!pte) { // page fault - fault(req->vaddr, - ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_DTB_MISS_MASK), - req->xc); + fault(req, + (write ? MM_STAT_WR_MASK : 0) | MM_STAT_DTB_MISS_MASK); if (write) { write_misses++; } else { read_misses++; } return (req->flags & VPTE) ? Pdtb_Miss_Fault : Ndtb_Miss_Fault; } @@ -508,29 +507,25 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const if (write) { if (!(pte->xwe & MODE2MASK(mode))) { // declare the instruction access fault - fault(req->vaddr, MM_STAT_WR_MASK | MM_STAT_ACV_MASK | - (pte->fonw ? MM_STAT_FONW_MASK : 0), - req->xc); + fault(req, (MM_STAT_WR_MASK | MM_STAT_ACV_MASK | + (pte->fonw ? MM_STAT_FONW_MASK : 0))); write_acv++; return DTB_Fault_Fault; } if (pte->fonw) { - fault(req->vaddr, MM_STAT_WR_MASK | MM_STAT_FONW_MASK, - req->xc); + fault(req, MM_STAT_WR_MASK | MM_STAT_FONW_MASK); write_acv++; return DTB_Fault_Fault; } } else { if (!(pte->xre & MODE2MASK(mode))) { - fault(req->vaddr, - MM_STAT_ACV_MASK | - (pte->fonr ? MM_STAT_FONR_MASK : 0), - req->xc); + fault(req, (MM_STAT_ACV_MASK | + (pte->fonr ? MM_STAT_FONR_MASK : 0))); read_acv++; return DTB_Acv_Fault; } if (pte->fonr) { - fault(req->vaddr, MM_STAT_FONR_MASK, req->xc); + fault(req, MM_STAT_FONR_MASK); read_acv++; return DTB_Fault_Fault; } diff --git a/arch/alpha/alpha_memory.hh b/arch/alpha/alpha_memory.hh index b5fc18255..fbd6ecf15 100644 --- a/arch/alpha/alpha_memory.hh +++ b/arch/alpha/alpha_memory.hh @@ -112,7 +112,7 @@ class AlphaDTB : public AlphaTLB Stats::Formula accesses; protected: - void fault(Addr pc, uint64_t flags, ExecContext *xc) const; + void fault(MemReqPtr &req, uint64_t flags) const; public: AlphaDTB(const std::string &name, int size); diff --git a/arch/alpha/ev5.cc b/arch/alpha/ev5.cc index ecf66f4f5..d2ca71b3a 100644 --- a/arch/alpha/ev5.cc +++ b/arch/alpha/ev5.cc @@ -162,6 +162,7 @@ AlphaISA::zeroRegisters(XC *xc) void ExecContext::ev5_trap(Fault fault) { + DPRINTF(Fault, "Fault %s\n", FaultName(fault)); Stats::recordEvent(csprintf("Fault %s", FaultName(fault))); assert(!misspeculating()); @@ -302,11 +303,7 @@ ExecContext::readIpr(int idx, Fault &fault) break; case AlphaISA::IPR_VA: - // SFX: unlocks interrupt status registers retval = ipr[idx]; - - if (!misspeculating()) - regs.intrlock = false; break; case AlphaISA::IPR_VA_FORM: diff --git a/arch/alpha/isa_desc b/arch/alpha/isa_desc index 9fee12485..6c5912c52 100644 --- a/arch/alpha/isa_desc +++ b/arch/alpha/isa_desc @@ -1023,7 +1023,7 @@ def LoadStoreBase(name, Name, ea_code, memacc_code, postacc_code = '', # and memory access flags (handled here). # Would be nice to autogenerate this list, but oh well. - valid_mem_flags = ['LOCKED', 'EVICT_NEXT', 'PF_EXCLUSIVE'] + valid_mem_flags = ['LOCKED', 'NO_FAULT', 'EVICT_NEXT', 'PF_EXCLUSIVE'] inst_flags = [] mem_flags = [] for f in flags: @@ -1072,7 +1072,7 @@ def format LoadOrPrefetch(ea_code, memacc_code, *pf_flags) {{ # Declare the prefetch instruction object. # convert flags from tuple to list to make them mutable - pf_flags = list(pf_flags) + ['IsMemRef', 'IsLoad', 'IsDataPrefetch', 'MemReadOp'] + pf_flags = list(pf_flags) + ['IsMemRef', 'IsLoad', 'IsDataPrefetch', 'MemReadOp', 'NO_FAULT'] (pf_header_output, pf_decoder_output, _, pf_exec_output) = \ LoadStoreBase(name, Name + 'Prefetch', ea_code, '', @@ -2369,9 +2369,10 @@ decode OPCODE default Unknown::unknown() { } format MiscPrefetch { - 0xf800: wh64({{ EA = Rb; }}, - {{ xc->writeHint(EA, 64); }}, - IsMemRef, IsStore, MemWriteOp); + 0xf800: wh64({{ EA = Rb & ~ULL(63); }}, + {{ xc->writeHint(EA, 64, memAccessFlags); }}, + IsMemRef, IsDataPrefetch, IsStore, MemWriteOp, + NO_FAULT); } format BasicOperate { diff --git a/arch/alpha/isa_traits.hh b/arch/alpha/isa_traits.hh index 37ba77192..1a8ff663b 100644 --- a/arch/alpha/isa_traits.hh +++ b/arch/alpha/isa_traits.hh @@ -153,7 +153,6 @@ class AlphaISA #ifdef FULL_SYSTEM IntReg palregs[NumIntRegs]; // PAL shadow registers InternalProcReg ipr[NumInternalProcRegs]; // internal processor regs - int intrlock; // interrupt register lock flag int intrflag; // interrupt flag bool pal_shadow; // using pal_shadow registers #endif // FULL_SYSTEM diff --git a/base/traceflags.py b/base/traceflags.py index 69f4e7ab8..a01eae3eb 100644 --- a/base/traceflags.py +++ b/base/traceflags.py @@ -66,6 +66,7 @@ baseFlags = [ 'AlphaConsole', 'Flow', 'Interrupt', + 'Fault', 'Cycle', 'Loader', 'MMU', diff --git a/cpu/simple_cpu/simple_cpu.hh b/cpu/simple_cpu/simple_cpu.hh index 1c6b18d03..545c753f0 100644 --- a/cpu/simple_cpu/simple_cpu.hh +++ b/cpu/simple_cpu/simple_cpu.hh @@ -253,7 +253,7 @@ class SimpleCPU : public BaseCPU // need to do this... } - void writeHint(Addr addr, int size) + void writeHint(Addr addr, int size, unsigned flags) { // need to do this... } diff --git a/cpu/static_inst.hh b/cpu/static_inst.hh index 3eeefb675..68c30df2f 100644 --- a/cpu/static_inst.hh +++ b/cpu/static_inst.hh @@ -72,8 +72,7 @@ class StaticInstBase : public RefCounted /// unconditional branches, memory barriers) or both (e.g., an /// FP/int conversion). /// - If IsMemRef is set, then exactly one of IsLoad or IsStore - /// will be set. Prefetches are marked as IsLoad, even if they - /// prefetch exclusive copies. + /// will be set. /// - If IsControl is set, then exactly one of IsDirectControl or /// IsIndirect Control will be set, and exactly one of /// IsCondControl or IsUncondControl will be set. |