summaryrefslogtreecommitdiff
path: root/arch/alpha/alpha_memory.cc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/alpha_memory.cc')
-rw-r--r--arch/alpha/alpha_memory.cc96
1 files changed, 61 insertions, 35 deletions
diff --git a/arch/alpha/alpha_memory.cc b/arch/alpha/alpha_memory.cc
index d7004b461..a40ad7a5c 100644
--- a/arch/alpha/alpha_memory.cc
+++ b/arch/alpha/alpha_memory.cc
@@ -101,18 +101,34 @@ AlphaTLB::checkCacheability(MemReqPtr &req)
* to catch a weird case where both are used, which shouldn't happen.
*/
+
+#ifdef ALPHA_TLASER
+ if (req->paddr & PA_UNCACHED_BIT_39) {
+#else
if (req->paddr & PA_UNCACHED_BIT_43) {
+#endif
// IPR memory space not implemented
- if (PA_IPR_SPACE(req->paddr))
- if (!req->xc->misspeculating())
- panic("IPR memory space not implemented! PA=%x\n",
- req->paddr);
-
- // mark request as uncacheable
- req->flags |= UNCACHEABLE;
+ if (PA_IPR_SPACE(req->paddr)) {
+ if (!req->xc->misspeculating()) {
+ switch (req->paddr) {
+ case ULL(0xFFFFF00188):
+ req->data = 0;
+ break;
+
+ default:
+ panic("IPR memory space not implemented! PA=%x\n",
+ req->paddr);
+ }
+ }
+ } else {
+ // mark request as uncacheable
+ req->flags |= UNCACHEABLE;
- // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
- req->paddr &= PA_UNCACHED_MASK;
+#ifndef ALPHA_TLASER
+ // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
+ req->paddr &= PA_UNCACHED_MASK;
+#endif
+ }
}
}
@@ -301,7 +317,13 @@ AlphaITB::translate(MemReqPtr &req) const
// VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
// VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
+#ifdef ALPHA_TLASER
+ if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
+ VA_SPACE_EV5(req->vaddr) == 2) {
+#else
if (VA_SPACE_EV6(req->vaddr) == 0x7e) {
+#endif
+
// only valid in kernel mode
if (ICM_CM(ipr[AlphaISA::IPR_ICM]) != AlphaISA::mode_kernel) {
@@ -312,11 +334,13 @@ AlphaITB::translate(MemReqPtr &req) const
req->paddr = req->vaddr & PA_IMPL_MASK;
+#ifndef ALPHA_TLASER
// sign extend the physical address properly
if (req->paddr & PA_UNCACHED_BIT_40)
req->paddr |= ULL(0xf0000000000);
else
req->paddr &= ULL(0xffffffffff);
+#endif
} else {
// not a physical address: need to look up pte
@@ -430,12 +454,19 @@ AlphaDTB::regStats()
}
void
-AlphaDTB::fault(Addr vaddr, uint64_t flags, ExecContext *xc) const
+AlphaDTB::fault(MemReqPtr &req, uint64_t flags) const
{
+ ExecContext *xc = req->xc;
+ Addr vaddr = req->vaddr;
uint64_t *ipr = xc->regs.ipr;
- // set fault address and flags
- if (!xc->misspeculating() && !xc->regs.intrlock) {
+ // Set fault address and flags. Even though we're modeling an
+ // EV5, we use the EV6 technique of not latching fault registers
+ // on VPTE loads (instead of locking the registers until IPR_VA is
+ // read, like the EV5). The EV6 approach is cleaner and seems to
+ // work with EV5 PAL code, but not the other way around.
+ if (!xc->misspeculating()
+ && !(req->flags & VPTE) && !(req->flags & NO_FAULT)) {
// set VA register with faulting address
ipr[AlphaISA::IPR_VA] = vaddr;
@@ -447,9 +478,6 @@ AlphaDTB::fault(Addr vaddr, uint64_t flags, ExecContext *xc) const
// set VA_FORM register with faulting formatted address
ipr[AlphaISA::IPR_VA_FORM] =
ipr[AlphaISA::IPR_MVPTBR] | (VA_VPN(vaddr) << 3);
-
- // lock these registers until the VA register is read
- xc->regs.intrlock = true;
}
}
@@ -474,35 +502,38 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const
} else {
// verify that this is a good virtual address
if (!validVirtualAddress(req->vaddr)) {
- fault(req->vaddr,
- ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_BAD_VA_MASK |
- MM_STAT_ACV_MASK),
- req->xc);
+ fault(req, ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_BAD_VA_MASK |
+ MM_STAT_ACV_MASK));
if (write) { write_acv++; } else { read_acv++; }
return DTB_Fault_Fault;
}
// Check for "superpage" mapping
+#ifdef ALPHA_TLASER
+ if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
+ VA_SPACE_EV5(req->vaddr) == 2) {
+#else
if (VA_SPACE_EV6(req->vaddr) == 0x7e) {
+#endif
// only valid in kernel mode
if (DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]) !=
AlphaISA::mode_kernel) {
- fault(req->vaddr,
- ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_ACV_MASK),
- req->xc);
+ fault(req, ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_ACV_MASK));
if (write) { write_acv++; } else { read_acv++; }
return DTB_Acv_Fault;
}
req->paddr = req->vaddr & PA_IMPL_MASK;
+#ifndef ALPHA_TLASER
// sign extend the physical address properly
if (req->paddr & PA_UNCACHED_BIT_40)
req->paddr |= ULL(0xf0000000000);
else
req->paddr &= ULL(0xffffffffff);
+#endif
} else {
if (write)
@@ -516,9 +547,8 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const
if (!pte) {
// page fault
- fault(req->vaddr,
- ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_DTB_MISS_MASK),
- req->xc);
+ fault(req,
+ (write ? MM_STAT_WR_MASK : 0) | MM_STAT_DTB_MISS_MASK);
if (write) { write_misses++; } else { read_misses++; }
return (req->flags & VPTE) ? Pdtb_Miss_Fault : Ndtb_Miss_Fault;
}
@@ -528,29 +558,25 @@ AlphaDTB::translate(MemReqPtr &req, bool write) const
if (write) {
if (!(pte->xwe & MODE2MASK(mode))) {
// declare the instruction access fault
- fault(req->vaddr, MM_STAT_WR_MASK | MM_STAT_ACV_MASK |
- (pte->fonw ? MM_STAT_FONW_MASK : 0),
- req->xc);
+ fault(req, (MM_STAT_WR_MASK | MM_STAT_ACV_MASK |
+ (pte->fonw ? MM_STAT_FONW_MASK : 0)));
write_acv++;
return DTB_Fault_Fault;
}
if (pte->fonw) {
- fault(req->vaddr, MM_STAT_WR_MASK | MM_STAT_FONW_MASK,
- req->xc);
+ fault(req, MM_STAT_WR_MASK | MM_STAT_FONW_MASK);
write_acv++;
return DTB_Fault_Fault;
}
} else {
if (!(pte->xre & MODE2MASK(mode))) {
- fault(req->vaddr,
- MM_STAT_ACV_MASK |
- (pte->fonr ? MM_STAT_FONR_MASK : 0),
- req->xc);
+ fault(req, (MM_STAT_ACV_MASK |
+ (pte->fonr ? MM_STAT_FONR_MASK : 0)));
read_acv++;
return DTB_Acv_Fault;
}
if (pte->fonr) {
- fault(req->vaddr, MM_STAT_FONR_MASK, req->xc);
+ fault(req, MM_STAT_FONR_MASK);
read_acv++;
return DTB_Fault_Fault;
}