summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAndrew Schultz <alschult@umich.edu>2004-02-19 16:30:06 -0500
committerAndrew Schultz <alschult@umich.edu>2004-02-19 16:30:06 -0500
commite3fb3d1ad01aa08ab6631c3f77e6f105d5a736a6 (patch)
tree67ff2c8fa3eb42626bf3501f36d8003c27468ee8 /arch
parent59a6e9d705dd6d143534a42822634f67046ff16c (diff)
downloadgem5-e3fb3d1ad01aa08ab6631c3f77e6f105d5a736a6.tar.xz
Misspeculation fix and (more importantly) change to allow for the ev6 style
physical addressing. This has the uncacheable bit as bit 40 as opposed to bit 39. Additionally, we now support (at least superficially) a 44-bit physical address. To deal with superpage access in this scheme, any super page access with either bit 39 or 40 set is sign extended. --HG-- extra : convert_revision : 05ddbcb9a6a92481109a63b261743881953620ab
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/alpha_memory.cc255
-rw-r--r--arch/alpha/alpha_memory.hh2
-rw-r--r--arch/alpha/ev5.hh7
3 files changed, 157 insertions, 107 deletions
diff --git a/arch/alpha/alpha_memory.cc b/arch/alpha/alpha_memory.cc
index 7bdbea6c1..cddee3b22 100644
--- a/arch/alpha/alpha_memory.cc
+++ b/arch/alpha/alpha_memory.cc
@@ -44,6 +44,12 @@ using namespace std;
//
// Alpha TLB
//
+
+#ifdef DEBUG
+ bool uncacheBit39 = false;
+ bool uncacheBit40 = false;
+#endif
+
AlphaTlb::AlphaTlb(const string &name, int s)
: SimObject(name), size(s), nlu(0)
{
@@ -87,24 +93,42 @@ AlphaTlb::checkCacheability(MemReqPtr &req)
{
// in Alpha, cacheability is controlled by upper-level bits of the
// physical address
- if (req->paddr & PA_UNCACHED_BIT) {
- if (PA_IPR_SPACE(req->paddr)) {
- // IPR memory space not implemented
- if (!req->xc->misspeculating()) {
- switch (req->paddr) {
- case 0xFFFFF00188:
- req->data = 0;
- break;
-
- default:
- panic("IPR memory space not implemented! PA=%x\n",
- req->paddr);
- }
- }
- } else {
- // mark request as uncacheable
- req->flags |= UNCACHEABLE;
+
+ /*
+ * We support having the uncacheable bit in either bit 39 or bit 40.
+ * The Turbolaser platform (and EV5) support having the bit in 39, but
+ * Tsunami (which Linux assumes uses an EV6) generates accesses with
+ * the bit in 40. So we must check for both, but we have debug flags
+ * to catch a weird case where both are used, which shouldn't happen.
+ */
+
+ if (req->paddr & PA_UNCACHED_BIT_40 ||
+ req->paddr & PA_UNCACHED_BIT_39) {
+
+#ifdef DEBUG
+ if (req->paddr & PA_UNCACHED_BIT_40) {
+ if(uncacheBit39)
+ panic("Bit 40 access follows bit 39 access, PA=%x\n",
+ req->paddr);
+
+ uncacheBit40 = true;
+ } else if (req->paddr & PA_UNCACHED_BIT_39) {
+ if(uncacheBit40)
+ panic("Bit 39 acceess follows bit 40 access, PA=%x\n",
+ req->paddr);
+
+ uncacheBit39 = true;
}
+#endif
+
+ // IPR memory space not implemented
+ if (PA_IPR_SPACE(req->paddr))
+ if (!req->xc->misspeculating())
+ panic("IPR memory space not implemented! PA=%x\n",
+ req->paddr);
+
+ // mark request as uncacheable
+ req->flags |= UNCACHEABLE;
}
}
@@ -282,19 +306,6 @@ AlphaItb::translate(MemReqPtr &req) const
if (req->flags & PHYSICAL) {
req->paddr = req->vaddr;
- } else if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
- VA_SPACE(req->vaddr) == 2) {
- // Check for "superpage" mapping: when SP<1> is set, and
- // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13>.
-
- // only valid in kernel mode
- if (ICM_CM(ipr[AlphaISA::IPR_ICM]) != AlphaISA::mode_kernel) {
- fault(req->vaddr, req->xc);
- acv++;
- return Itb_Acv_Fault;
- }
-
- req->paddr = req->vaddr & PA_IMPL_MASK;
} else {
// verify that this is a good virtual address
if (!validVirtualAddress(req->vaddr)) {
@@ -303,24 +314,49 @@ AlphaItb::translate(MemReqPtr &req) const
return Itb_Acv_Fault;
}
- // not a physical address: need to look up pte
- AlphaISA::PTE *pte = lookup(VA_VPN(req->vaddr),
- DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
+ // Check for "superpage" mapping: when SP<1> is set, and
+ // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13>.
+ if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
+ VA_SPACE(req->vaddr) == 2) {
- if (!pte) {
- fault(req->vaddr, req->xc);
- misses++;
- return Itb_Fault_Fault;
- }
+ // only valid in kernel mode
+ if (ICM_CM(ipr[AlphaISA::IPR_ICM]) != AlphaISA::mode_kernel) {
+ fault(req->vaddr, req->xc);
+ acv++;
+ return Itb_Acv_Fault;
+ }
- req->paddr = PA_PFN2PA(pte->ppn) + VA_POFS(req->vaddr & ~3);
+ req->paddr = req->vaddr & PA_IMPL_MASK;
- // check permissions for this access
- if (!(pte->xre & (1 << ICM_CM(ipr[AlphaISA::IPR_ICM])))) {
- // instruction access fault
- fault(req->vaddr, req->xc);
- acv++;
- return Itb_Acv_Fault;
+ // sign extend the physical address properly
+ if (req->paddr & PA_UNCACHED_BIT_39 ||
+ req->paddr & PA_UNCACHED_BIT_40)
+ req->paddr |= 0xf0000000000;
+ else
+ req->paddr &= 0xffffffffff;
+
+ } else {
+ // not a physical address: need to look up pte
+ AlphaISA::PTE *pte = lookup(VA_VPN(req->vaddr),
+ DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
+
+ if (!pte) {
+ fault(req->vaddr, req->xc);
+ misses++;
+ return Itb_Fault_Fault;
+ }
+
+ req->paddr = PA_PFN2PA(pte->ppn) + VA_POFS(req->vaddr & ~3);
+
+ // check permissions for this access
+ if (!(pte->xre & (1 << ICM_CM(ipr[AlphaISA::IPR_ICM])))) {
+ // instruction access fault
+ fault(req->vaddr, req->xc);
+ acv++;
+ return Itb_Acv_Fault;
+ }
+
+ hits++;
}
}
@@ -330,7 +366,6 @@ AlphaItb::translate(MemReqPtr &req) const
checkCacheability(req);
- hits++;
return No_Fault;
}
@@ -453,27 +488,7 @@ AlphaDtb::translate(MemReqPtr &req, bool write) const
if (req->flags & PHYSICAL) {
req->paddr = req->vaddr;
- } else if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
- VA_SPACE(req->vaddr) == 2) {
- // Check for "superpage" mapping: when SP<1> is set, and
- // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13>.
-
- // only valid in kernel mode
- if (DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]) != AlphaISA::mode_kernel) {
- fault(req->vaddr,
- ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_ACV_MASK),
- req->xc);
- if (write) { write_acv++; } else { read_acv++; }
- return Dtb_Acv_Fault;
- }
-
- req->paddr = req->vaddr & PA_IMPL_MASK;
} else {
- if (write)
- write_accesses++;
- else
- read_accesses++;
-
// verify that this is a good virtual address
if (!validVirtualAddress(req->vaddr)) {
fault(req->vaddr,
@@ -485,48 +500,80 @@ AlphaDtb::translate(MemReqPtr &req, bool write) const
return Dtb_Fault_Fault;
}
- // not a physical address: need to look up pte
- AlphaISA::PTE *pte = lookup(VA_VPN(req->vaddr),
- DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
-
- if (!pte) {
- // page fault
- fault(req->vaddr,
- ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_DTB_MISS_MASK),
- req->xc);
- if (write) { write_misses++; } else { read_misses++; }
- return (req->flags & VPTE) ? Pdtb_Miss_Fault : Ndtb_Miss_Fault;
- }
-
- req->paddr = PA_PFN2PA(pte->ppn) | VA_POFS(req->vaddr);
+ // Check for "superpage" mapping: when SP<1> is set, and
+ // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13>.
+ if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
+ VA_SPACE(req->vaddr) == 2) {
- if (write) {
- if (!(pte->xwe & MODE2MASK(mode))) {
- // declare the instruction access fault
- fault(req->vaddr, MM_STAT_WR_MASK | MM_STAT_ACV_MASK |
- (pte->fonw ? MM_STAT_FONW_MASK : 0),
- req->xc);
- write_acv++;
- return Dtb_Fault_Fault;
- }
- if (pte->fonw) {
- fault(req->vaddr, MM_STAT_WR_MASK | MM_STAT_FONW_MASK,
+ // only valid in kernel mode
+ if (DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]) !=
+ AlphaISA::mode_kernel) {
+ fault(req->vaddr,
+ ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_ACV_MASK),
req->xc);
- write_acv++;
- return Dtb_Fault_Fault;
+ if (write) { write_acv++; } else { read_acv++; }
+ return Dtb_Acv_Fault;
}
+
+ req->paddr = req->vaddr & PA_IMPL_MASK;
+
+ // sign extend the physical address properly
+ if (req->paddr & PA_UNCACHED_BIT_39 ||
+ req->paddr & PA_UNCACHED_BIT_40)
+ req->paddr |= 0xf0000000000;
+ else
+ req->paddr &= 0xffffffffff;
+
} else {
- if (!(pte->xre & MODE2MASK(mode))) {
+ if (write)
+ write_accesses++;
+ else
+ read_accesses++;
+
+ // not a physical address: need to look up pte
+ AlphaISA::PTE *pte = lookup(VA_VPN(req->vaddr),
+ DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
+
+ if (!pte) {
+ // page fault
fault(req->vaddr,
- MM_STAT_ACV_MASK | (pte->fonr ? MM_STAT_FONR_MASK : 0),
+ ((write ? MM_STAT_WR_MASK : 0) | MM_STAT_DTB_MISS_MASK),
req->xc);
- read_acv++;
- return Dtb_Acv_Fault;
+ if (write) { write_misses++; } else { read_misses++; }
+ return (req->flags & VPTE) ? Pdtb_Miss_Fault : Ndtb_Miss_Fault;
}
- if (pte->fonr) {
- fault(req->vaddr, MM_STAT_FONR_MASK, req->xc);
- read_acv++;
- return Dtb_Fault_Fault;
+
+ req->paddr = PA_PFN2PA(pte->ppn) | VA_POFS(req->vaddr);
+
+ if (write) {
+ if (!(pte->xwe & MODE2MASK(mode))) {
+ // declare the instruction access fault
+ fault(req->vaddr, MM_STAT_WR_MASK | MM_STAT_ACV_MASK |
+ (pte->fonw ? MM_STAT_FONW_MASK : 0),
+ req->xc);
+ write_acv++;
+ return Dtb_Fault_Fault;
+ }
+ if (pte->fonw) {
+ fault(req->vaddr, MM_STAT_WR_MASK | MM_STAT_FONW_MASK,
+ req->xc);
+ write_acv++;
+ return Dtb_Fault_Fault;
+ }
+ } else {
+ if (!(pte->xre & MODE2MASK(mode))) {
+ fault(req->vaddr,
+ MM_STAT_ACV_MASK |
+ (pte->fonr ? MM_STAT_FONR_MASK : 0),
+ req->xc);
+ read_acv++;
+ return Dtb_Acv_Fault;
+ }
+ if (pte->fonr) {
+ fault(req->vaddr, MM_STAT_FONR_MASK, req->xc);
+ read_acv++;
+ return Dtb_Fault_Fault;
+ }
}
}
@@ -546,10 +593,12 @@ AlphaDtb::translate(MemReqPtr &req, bool write) const
}
AlphaISA::PTE &
-AlphaTlb::index()
+AlphaTlb::index(bool advance)
{
AlphaISA::PTE *pte = &table[nlu];
- nextnlu();
+
+ if (advance)
+ nextnlu();
return *pte;
}
diff --git a/arch/alpha/alpha_memory.hh b/arch/alpha/alpha_memory.hh
index 482a13eee..999eec228 100644
--- a/arch/alpha/alpha_memory.hh
+++ b/arch/alpha/alpha_memory.hh
@@ -56,7 +56,7 @@ class AlphaTlb : public SimObject
int getsize() const { return size; }
- AlphaISA::PTE &index();
+ AlphaISA::PTE &index(bool advance = true);
void insert(Addr vaddr, AlphaISA::PTE &pte);
void flushAll();
diff --git a/arch/alpha/ev5.hh b/arch/alpha/ev5.hh
index aa3d7e226..bd4115704 100644
--- a/arch/alpha/ev5.hh
+++ b/arch/alpha/ev5.hh
@@ -57,9 +57,10 @@
#define VA_SPACE(X) (((X) >> 41) & 0x3)
#define VA_POFS(X) ((X) & 0x1fff)
-#define PA_IMPL_MASK ULL(0xffffffffff)
-#define PA_UNCACHED_BIT ULL(0x8000000000)
-#define PA_IPR_SPACE(X) ((X) >= ULL(0xFFFFF00000))
+#define PA_IMPL_MASK ULL(0xfffffffffff) // for Tsunami
+#define PA_UNCACHED_BIT_39 ULL(0x8000000000)
+#define PA_UNCACHED_BIT_40 ULL(0x10000000000)
+#define PA_IPR_SPACE(X) ((X) >= ULL(0xFFFFFF00000))
#define PA_PFN2PA(X) ((X) << 13)