summaryrefslogtreecommitdiff
path: root/src/arch/x86
diff options
context:
space:
mode:
authorNilay Vaish <nilay@cs.wisc.edu>2013-08-07 14:51:17 -0500
committerNilay Vaish <nilay@cs.wisc.edu>2013-08-07 14:51:17 -0500
commite0387415988a11f30b5aac66cd5cc32f7387e08e (patch)
treed0b6b1f6dd744c4cc491d4dca38c9063d69895cf /src/arch/x86
parentb5bb2a25aa702ad3d1a173e9e86d2addc24d9c13 (diff)
downloadgem5-e0387415988a11f30b5aac66cd5cc32f7387e08e.tar.xz
x86: add tlb checkpointing
This patch adds checkpointing support to x86 tlb. It upgrades the cpt_upgrader.py script so that previously created checkpoints can be updated. It moves the checkpoint version to 6.
Diffstat (limited to 'src/arch/x86')
-rw-r--r--src/arch/x86/X86TLB.py2
-rw-r--r--src/arch/x86/pagetable.cc15
-rw-r--r--src/arch/x86/tlb.cc31
-rw-r--r--src/arch/x86/tlb.hh3
4 files changed, 35 insertions, 16 deletions
diff --git a/src/arch/x86/X86TLB.py b/src/arch/x86/X86TLB.py
index b652118ce..7f195f233 100644
--- a/src/arch/x86/X86TLB.py
+++ b/src/arch/x86/X86TLB.py
@@ -54,6 +54,6 @@ class X86TLB(BaseTLB):
type = 'X86TLB'
cxx_class = 'X86ISA::TLB'
cxx_header = 'arch/x86/tlb.hh'
- size = Param.Int(64, "TLB size")
+ size = Param.Unsigned(64, "TLB size")
walker = Param.X86PagetableWalker(\
X86PagetableWalker(), "page table walker")
diff --git a/src/arch/x86/pagetable.cc b/src/arch/x86/pagetable.cc
index 40d5e0984..a9ef18129 100644
--- a/src/arch/x86/pagetable.cc
+++ b/src/arch/x86/pagetable.cc
@@ -70,25 +70,14 @@ TlbEntry::unserialize(Checkpoint *cp, const std::string &section)
{
UNSERIALIZE_SCALAR(paddr);
UNSERIALIZE_SCALAR(vaddr);
- //
- // The logBytes scalar variable replaced the previous size variable.
- // The following code maintains backwards compatibility with previous
- // checkpoints using the old size variable.
- //
- if (UNSERIALIZE_OPT_SCALAR(logBytes) == false) {
- int size;
- UNSERIALIZE_SCALAR(size);
- logBytes = log2(size);
- }
+ UNSERIALIZE_SCALAR(logBytes);
UNSERIALIZE_SCALAR(writable);
UNSERIALIZE_SCALAR(user);
UNSERIALIZE_SCALAR(uncacheable);
UNSERIALIZE_SCALAR(global);
UNSERIALIZE_SCALAR(patBit);
UNSERIALIZE_SCALAR(noExec);
- if (UNSERIALIZE_OPT_SCALAR(lruSeq) == false) {
- lruSeq = 0;
- }
+ UNSERIALIZE_SCALAR(lruSeq);
}
}
diff --git a/src/arch/x86/tlb.cc b/src/arch/x86/tlb.cc
index 52cc3e0ee..087cfbadf 100644
--- a/src/arch/x86/tlb.cc
+++ b/src/arch/x86/tlb.cc
@@ -439,11 +439,42 @@ TLB::getWalker()
void
TLB::serialize(std::ostream &os)
{
+ // Only store the entries in use.
+ uint32_t _size = size - freeList.size();
+ SERIALIZE_SCALAR(_size);
+ SERIALIZE_SCALAR(lruSeq);
+
+ uint32_t _count = 0;
+
+ for (uint32_t x = 0; x < size; x++) {
+ if (tlb[x].trieHandle != NULL) {
+ os << "\n[" << csprintf("%s.Entry%d", name(), _count) << "]\n";
+ tlb[x].serialize(os);
+ _count++;
+ }
+ }
}
void
TLB::unserialize(Checkpoint *cp, const std::string &section)
{
+ // Do not allow to restore with a smaller tlb.
+ uint32_t _size;
+ UNSERIALIZE_SCALAR(_size);
+ if (_size > size) {
+ fatal("TLB size less than the one in checkpoint!");
+ }
+
+ UNSERIALIZE_SCALAR(lruSeq);
+
+ for (uint32_t x = 0; x < _size; x++) {
+ TlbEntry *newEntry = freeList.front();
+ freeList.pop_front();
+
+ newEntry->unserialize(cp, csprintf("%s.Entry%d", name(), x));
+ newEntry->trieHandle = trie.insert(newEntry->vaddr,
+ TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry);
+ }
}
BaseMasterPort *
diff --git a/src/arch/x86/tlb.hh b/src/arch/x86/tlb.hh
index 4f0d58d5c..ea2d50ec2 100644
--- a/src/arch/x86/tlb.hh
+++ b/src/arch/x86/tlb.hh
@@ -95,12 +95,11 @@ namespace X86ISA
void demapPage(Addr va, uint64_t asn);
protected:
- int size;
+ uint32_t size;
TlbEntry * tlb;
EntryList freeList;
- EntryList entryList;
TlbEntryTrie trie;
uint64_t lruSeq;