summaryrefslogtreecommitdiff
path: root/src/mem/physical.cc
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2014-02-18 05:51:01 -0500
committerAndreas Hansson <andreas.hansson@arm.com>2014-02-18 05:51:01 -0500
commit4b81585c495662ca113e436ef7f238fddf7692f4 (patch)
tree3002f04c4d29af0e94d50f7f431182fd01553109 /src/mem/physical.cc
parentf0ea79c41f6c6b00b976b6fe67ac350a1852022a (diff)
downloadgem5-4b81585c495662ca113e436ef7f238fddf7692f4.tar.xz
mem: Fix bug in PhysicalMemory use of mmap and munmap
This patch fixes a bug in how physical memory used to be mapped and unmapped. Previously we unmapped and re-mapped if restoring from a checkpoint. However, we never checked that the new mapping was actually the same, it was just magically working as the OS seems to fairly reliably give us the same chunk back. This patch fixes this issue by relying entirely on the mmap call in the constructor.
Diffstat (limited to 'src/mem/physical.cc')
-rw-r--r--src/mem/physical.cc24
1 files changed, 5 insertions, 19 deletions
diff --git a/src/mem/physical.cc b/src/mem/physical.cc
index 10176bba0..1b76b52e8 100644
--- a/src/mem/physical.cc
+++ b/src/mem/physical.cc
@@ -95,10 +95,9 @@ PhysicalMemory::PhysicalMemory(const string& _name,
}
}
- // iterate over the increasing addresses and chunks of contigous
- // space to be mapped to backing store, also remember what
- // memories constitute the range so we can go and find out if we
- // have to init their parts to zero
+ // iterate over the increasing addresses and chunks of contiguous
+ // space to be mapped to backing store, create it and inform the
+ // memories
vector<AddrRange> intlv_ranges;
vector<AbstractMemory*> curr_memories;
for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
@@ -162,8 +161,7 @@ PhysicalMemory::createBackingStore(AddrRange range,
// it appropriately
backingStore.push_back(make_pair(range, pmem));
- // point the memories to their backing store, and if requested,
- // initialize the memory range to 0
+ // point the memories to their backing store
for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
m != _memories.end(); ++m) {
DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n",
@@ -393,14 +391,10 @@ PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
fatal("Insufficient memory to allocate compression state for %s\n",
filename);
+ // we've already got the actual backing store mapped
uint8_t* pmem = backingStore[store_id].second;
AddrRange range = backingStore[store_id].first;
- // unmap file that was mmapped in the constructor, this is
- // done here to make sure that gzip and open don't muck with
- // our nice large space of memory before we reallocate it
- munmap((char*) pmem, range.size());
-
long range_size;
UNSERIALIZE_SCALAR(range_size);
@@ -411,14 +405,6 @@ PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
fatal("Memory range size has changed! Saw %lld, expected %lld\n",
range_size, range.size());
- pmem = (uint8_t*) mmap(NULL, range.size(), PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
-
- if (pmem == (void*) MAP_FAILED) {
- perror("mmap");
- fatal("Could not mmap physical memory!\n");
- }
-
uint64_t curr_size = 0;
long* temp_page = new long[chunk_size];
long* pmem_current;