summaryrefslogtreecommitdiff
path: root/src/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/x86')
-rw-r--r--src/arch/x86/insts/microldstop.hh3
-rw-r--r--src/arch/x86/isa/microops/ldstop.isa4
-rw-r--r--src/arch/x86/tlb.cc36
3 files changed, 21 insertions, 22 deletions
diff --git a/src/arch/x86/insts/microldstop.hh b/src/arch/x86/insts/microldstop.hh
index f0051e2cf..1774454c3 100644
--- a/src/arch/x86/insts/microldstop.hh
+++ b/src/arch/x86/insts/microldstop.hh
@@ -67,7 +67,8 @@ namespace X86ISA
static const Request::FlagsType SegmentFlagMask = mask(4);
static const int FlagShift = 4;
enum FlagBit {
- CPL0FlagBit = 1
+ CPL0FlagBit = 1,
+ AddrSizeFlagBit = 2
};
/**
diff --git a/src/arch/x86/isa/microops/ldstop.isa b/src/arch/x86/isa/microops/ldstop.isa
index 3bc238174..834b3947f 100644
--- a/src/arch/x86/isa/microops/ldstop.isa
+++ b/src/arch/x86/isa/microops/ldstop.isa
@@ -375,6 +375,8 @@ let {{
self.memFlags += " | (CPL0FlagBit << FlagShift)"
if prefetch:
self.memFlags += " | Request::PF_EXCLUSIVE"
+ self.memFlags += " | (machInst.legacy.addr ? " + \
+ "(AddrSizeFlagBit << FlagShift) : 0)"
def getAllocator(self, *microFlags):
allocator = '''new %(class_name)s(machInst, macrocodeBlock
@@ -439,7 +441,7 @@ let {{
defineMicroLoadOp('Ldfp', 'FpData.uqw = Mem;')
def defineMicroStoreOp(mnemonic, code, \
- postCode="", completeCode="", mem_flags=0):
+ postCode="", completeCode="", mem_flags="0"):
global header_output
global decoder_output
global exec_output
diff --git a/src/arch/x86/tlb.cc b/src/arch/x86/tlb.cc
index 603d4e45f..47a2eb37e 100644
--- a/src/arch/x86/tlb.cc
+++ b/src/arch/x86/tlb.cc
@@ -575,38 +575,34 @@ TLB::translate(RequestPtr req, ThreadContext *tc,
if (!tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg)))
return new GeneralProtection(0);
bool expandDown = false;
+ SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg));
if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) {
- SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg));
if (!attr.writable && write)
return new GeneralProtection(0);
if (!attr.readable && !write && !execute)
return new GeneralProtection(0);
expandDown = attr.expandDown;
+
}
Addr base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(seg));
Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg));
+ // This assumes we're not in 64 bit mode. If we were, the default
+ // address size is 64 bits, overridable to 32.
+ int size = 32;
+ bool sizeOverride = (flags & (AddrSizeFlagBit << FlagShift));
+ if (csAttr.defaultSize && sizeOverride ||
+ !csAttr.defaultSize && !sizeOverride)
+ size = 16;
+ Addr offset = bits(vaddr - base, size-1, 0);
+ Addr endOffset = offset + req->getSize() - 1;
if (expandDown) {
DPRINTF(TLB, "Checking an expand down segment.\n");
- // We don't have to worry about the access going around the
- // end of memory because accesses will be broken up into
- // pieces at boundaries aligned on sizes smaller than an
- // entire address space. We do have to worry about the limit
- // being less than the base.
- if (limit < base) {
- if (limit < vaddr + req->getSize() && vaddr < base)
- return new GeneralProtection(0);
- } else {
- if (limit < vaddr + req->getSize())
- return new GeneralProtection(0);
- }
+ warn_once("Expand down segments are untested.\n");
+ if (offset <= limit || endOffset <= limit)
+ return new GeneralProtection(0);
} else {
- if (limit < base) {
- if (vaddr <= limit || vaddr + req->getSize() >= base)
- return new GeneralProtection(0);
- } else {
- if (vaddr <= limit && vaddr + req->getSize() >= base)
- return new GeneralProtection(0);
- }
+ if (offset > limit || endOffset > limit)
+ return new GeneralProtection(0);
}
}
// If paging is enabled, do the translation.