summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDavid Hashe <david.j.hashe@gmail.com>2016-08-22 11:41:05 -0400
committerDavid Hashe <david.j.hashe@gmail.com>2016-08-22 11:41:05 -0400
commitf3ccaab1e982f4482177aefa95575f7d7dae21f7 (patch)
tree8797cdd232c0ccafa53d0601bc9da14a16f6db00 /src
parentd80a613990935dd144a5665a875f91725ee81f78 (diff)
downloadgem5-f3ccaab1e982f4482177aefa95575f7d7dae21f7.tar.xz
cpu, mem, sim: Change how KVM maps memory
Only map memories into the KVM guest address space that are marked as usable by KVM. Create BackingStoreEntry class containing flags for is_conf_reported, in_addr_map, and kvm_map.
Diffstat (limited to 'src')
-rw-r--r--src/cpu/kvm/vm.cc11
-rw-r--r--src/mem/AbstractMemory.py6
-rw-r--r--src/mem/abstract_mem.cc2
-rw-r--r--src/mem/abstract_mem.hh15
-rw-r--r--src/mem/physical.cc51
-rw-r--r--src/mem/physical.hh54
6 files changed, 120 insertions, 19 deletions
diff --git a/src/cpu/kvm/vm.cc b/src/cpu/kvm/vm.cc
index ccf739305..39bce32f4 100644
--- a/src/cpu/kvm/vm.cc
+++ b/src/cpu/kvm/vm.cc
@@ -341,13 +341,18 @@ KvmVM::cpuStartup()
void
KvmVM::delayedStartup()
{
- const std::vector<std::pair<AddrRange, uint8_t*> >&memories(
+ const std::vector<BackingStoreEntry> &memories(
system->getPhysMem().getBackingStore());
DPRINTF(Kvm, "Mapping %i memory region(s)\n", memories.size());
for (int slot(0); slot < memories.size(); ++slot) {
- const AddrRange &range(memories[slot].first);
- void *pmem(memories[slot].second);
+ if (!memories[slot].kvmMap) {
+ DPRINTF(Kvm, "Skipping region marked as not usable by KVM\n");
+ continue;
+ }
+
+ const AddrRange &range(memories[slot].range);
+ void *pmem(memories[slot].pmem);
if (pmem) {
DPRINTF(Kvm, "Mapping region: 0x%p -> 0x%llx [size: 0x%llx]\n",
diff --git a/src/mem/AbstractMemory.py b/src/mem/AbstractMemory.py
index ab1a6028c..d5b34bbd0 100644
--- a/src/mem/AbstractMemory.py
+++ b/src/mem/AbstractMemory.py
@@ -57,6 +57,12 @@ class AbstractMemory(MemObject):
# e.g. by the testers that use shadow memories as a reference
in_addr_map = Param.Bool(True, "Memory part of the global address map")
+ # When KVM acceleration is used, memory is mapped into the guest process
+ # address space and accessed directly. Some memories may need to be
+ # excluded from this mapping if they overlap with other memory ranges or
+ # are not accessible by the CPU.
+ kvm_map = Param.Bool(True, "Should KVM map this memory for the guest")
+
# Should the bootloader include this memory when passing
# configuration information about the physical memory layout to
# the kernel, e.g. using ATAG or ACPI
diff --git a/src/mem/abstract_mem.cc b/src/mem/abstract_mem.cc
index 04e4b0057..1797deae8 100644
--- a/src/mem/abstract_mem.cc
+++ b/src/mem/abstract_mem.cc
@@ -57,7 +57,7 @@ using namespace std;
AbstractMemory::AbstractMemory(const Params *p) :
MemObject(p), range(params()->range), pmemAddr(NULL),
confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
- _system(NULL)
+ kvmMap(p->kvm_map), _system(NULL)
{
}
diff --git a/src/mem/abstract_mem.hh b/src/mem/abstract_mem.hh
index 8ab28770d..31d34f051 100644
--- a/src/mem/abstract_mem.hh
+++ b/src/mem/abstract_mem.hh
@@ -111,10 +111,13 @@ class AbstractMemory : public MemObject
uint8_t* pmemAddr;
// Enable specific memories to be reported to the configuration table
- bool confTableReported;
+ const bool confTableReported;
// Should the memory appear in the global address map
- bool inAddrMap;
+ const bool inAddrMap;
+
+ // Should KVM map this memory for the guest
+ const bool kvmMap;
std::list<LockedAddr> lockedAddrList;
@@ -283,6 +286,14 @@ class AbstractMemory : public MemObject
bool isInAddrMap() const { return inAddrMap; }
/**
+ * When shadow memories are in use, KVM may want to make one or the other,
+ * but cannot map both into the guest address space.
+ *
+ * @return if this memory should be mapped into the KVM guest address space
+ */
+ bool isKvmMap() const { return kvmMap; }
+
+ /**
* Perform an untimed memory access and update all the state
* (e.g. locked addresses) and statistics accordingly. The packet
* is turned into a response if required.
diff --git a/src/mem/physical.cc b/src/mem/physical.cc
index 82e331fe1..05881b224 100644
--- a/src/mem/physical.cc
+++ b/src/mem/physical.cc
@@ -111,7 +111,9 @@ PhysicalMemory::PhysicalMemory(const string& _name,
// memories are allowed to overlap in the logic address
// map
vector<AbstractMemory*> unmapped_mems{m};
- createBackingStore(m->getAddrRange(), unmapped_mems);
+ createBackingStore(m->getAddrRange(), unmapped_mems,
+ m->isConfReported(), m->isInAddrMap(),
+ m->isKvmMap());
}
}
@@ -132,7 +134,19 @@ PhysicalMemory::PhysicalMemory(const string& _name,
if (!intlv_ranges.empty() &&
!intlv_ranges.back().mergesWith(r.first)) {
AddrRange merged_range(intlv_ranges);
- createBackingStore(merged_range, curr_memories);
+
+ AbstractMemory *f = curr_memories.front();
+ for (const auto& c : curr_memories)
+ if (f->isConfReported() != c->isConfReported() ||
+ f->isInAddrMap() != c->isInAddrMap() ||
+ f->isKvmMap() != c->isKvmMap())
+ fatal("Inconsistent flags in an interleaved "
+ "range\n");
+
+ createBackingStore(merged_range, curr_memories,
+ f->isConfReported(), f->isInAddrMap(),
+ f->isKvmMap());
+
intlv_ranges.clear();
curr_memories.clear();
}
@@ -140,7 +154,10 @@ PhysicalMemory::PhysicalMemory(const string& _name,
curr_memories.push_back(r.second);
} else {
vector<AbstractMemory*> single_memory{r.second};
- createBackingStore(r.first, single_memory);
+ createBackingStore(r.first, single_memory,
+ r.second->isConfReported(),
+ r.second->isInAddrMap(),
+ r.second->isKvmMap());
}
}
}
@@ -149,13 +166,26 @@ PhysicalMemory::PhysicalMemory(const string& _name,
// ahead and do it
if (!intlv_ranges.empty()) {
AddrRange merged_range(intlv_ranges);
- createBackingStore(merged_range, curr_memories);
+
+ AbstractMemory *f = curr_memories.front();
+ for (const auto& c : curr_memories)
+ if (f->isConfReported() != c->isConfReported() ||
+ f->isInAddrMap() != c->isInAddrMap() ||
+ f->isKvmMap() != c->isKvmMap())
+ fatal("Inconsistent flags in an interleaved "
+ "range\n");
+
+ createBackingStore(merged_range, curr_memories,
+ f->isConfReported(), f->isInAddrMap(),
+ f->isKvmMap());
}
}
void
PhysicalMemory::createBackingStore(AddrRange range,
- const vector<AbstractMemory*>& _memories)
+ const vector<AbstractMemory*>& _memories,
+ bool conf_table_reported,
+ bool in_addr_map, bool kvm_map)
{
panic_if(range.interleaved(),
"Cannot create backing store for interleaved range %s\n",
@@ -184,7 +214,8 @@ PhysicalMemory::createBackingStore(AddrRange range,
// remember this backing store so we can checkpoint it and unmap
// it appropriately
- backingStore.push_back(make_pair(range, pmem));
+ backingStore.emplace_back(range, pmem,
+ conf_table_reported, in_addr_map, kvm_map);
// point the memories to their backing store
for (const auto& m : _memories) {
@@ -198,7 +229,7 @@ PhysicalMemory::~PhysicalMemory()
{
// unmap the backing store
for (auto& s : backingStore)
- munmap((char*)s.second, s.first.size());
+ munmap((char*)s.pmem, s.range.size());
}
bool
@@ -314,7 +345,7 @@ PhysicalMemory::serialize(CheckpointOut &cp) const
// store each backing store memory segment in a file
for (auto& s : backingStore) {
ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
- serializeStore(cp, store_id++, s.first, s.second);
+ serializeStore(cp, store_id++, s.range, s.pmem);
}
}
@@ -407,8 +438,8 @@ PhysicalMemory::unserializeStore(CheckpointIn &cp)
fatal("Can't open physical memory checkpoint file '%s'", filename);
// we've already got the actual backing store mapped
- uint8_t* pmem = backingStore[store_id].second;
- AddrRange range = backingStore[store_id].first;
+ uint8_t* pmem = backingStore[store_id].pmem;
+ AddrRange range = backingStore[store_id].range;
long range_size;
UNSERIALIZE_SCALAR(range_size);
diff --git a/src/mem/physical.hh b/src/mem/physical.hh
index 7f4c975f0..cc733b2d6 100644
--- a/src/mem/physical.hh
+++ b/src/mem/physical.hh
@@ -49,6 +49,51 @@
class AbstractMemory;
/**
+ * A single entry for the backing store.
+ */
+class BackingStoreEntry
+{
+ public:
+
+ /**
+ * Create a backing store entry. Don't worry about managing the memory
+ * pointers, because PhysicalMemory is responsible for that.
+ */
+ BackingStoreEntry(AddrRange range, uint8_t* pmem,
+ bool conf_table_reported, bool in_addr_map, bool kvm_map)
+ : range(range), pmem(pmem), confTableReported(conf_table_reported),
+ inAddrMap(in_addr_map), kvmMap(kvm_map)
+ {}
+
+ /**
+ * The address range covered in the guest.
+ */
+ AddrRange range;
+
+ /**
+ * Pointer to the host memory this range maps to. This memory is the same
+ * size as the range field.
+ */
+ uint8_t* pmem;
+
+ /**
+ * Whether this memory should be reported to the configuration table
+ */
+ bool confTableReported;
+
+ /**
+ * Whether this memory should appear in the global address map
+ */
+ bool inAddrMap;
+
+ /**
+ * Whether KVM should map this memory into the guest address space during
+ * acceleration.
+ */
+ bool kvmMap;
+};
+
+/**
* The physical memory encapsulates all memories in the system and
* provides basic functionality for accessing those memories without
* going through the memory system and interconnect.
@@ -90,7 +135,7 @@ class PhysicalMemory : public Serializable
// The physical memory used to provide the memory in the simulated
// system
- std::vector<std::pair<AddrRange, uint8_t*>> backingStore;
+ std::vector<BackingStoreEntry> backingStore;
// Prevent copying
PhysicalMemory(const PhysicalMemory&);
@@ -105,9 +150,12 @@ class PhysicalMemory : public Serializable
*
* @param range The address range covered
* @param memories The memories this range maps to
+ * @param kvm_map Should KVM map this memory for the guest
*/
void createBackingStore(AddrRange range,
- const std::vector<AbstractMemory*>& _memories);
+ const std::vector<AbstractMemory*>& _memories,
+ bool conf_table_reported,
+ bool in_addr_map, bool kvm_map);
public:
@@ -167,7 +215,7 @@ class PhysicalMemory : public Serializable
*
* @return Pointers to the memory backing store
*/
- std::vector<std::pair<AddrRange, uint8_t*>> getBackingStore() const
+ std::vector<BackingStoreEntry> getBackingStore() const
{ return backingStore; }
/**