summaryrefslogtreecommitdiff
path: root/src/cpu/kvm
diff options
context:
space:
mode:
authorAndreas Sandberg <andreas@sandberg.pp.se>2014-04-09 16:01:58 +0200
committerAndreas Sandberg <andreas@sandberg.pp.se>2014-04-09 16:01:58 +0200
commit02b51afb7e6b250b4cd964cd447e71bd4f31236d (patch)
treea6cfea68dd9dae6ecd21340a18b8cc9434ef6388 /src/cpu/kvm
parent221f4f232ae79b8123e7ce28d26a873e1ba9f9dc (diff)
downloadgem5-02b51afb7e6b250b4cd964cd447e71bd4f31236d.tar.xz
kvm, x86: Add initial support for multicore simulation
Simulating a SMP or multicore requires devices to be shared between multiple KVM vCPUs. This means that locking is required when accessing devices. This changeset adds the necessary locking to allow devices to execute correctly. It is implemented by temporarily migrating the KVM CPU to the VM's (and devices) event queue when handling MMIO. Similarly, the VM migrates to the interrupt controller's event queue when delivering an interrupt. The support for fast-forwarding of multicore simulations added by this changeset assumes that all devices in a system are simulated in the same thread and each vCPU has its own thread. Special care must be taken to ensure that devices living under the CPU in the object hierarchy (e.g., the interrupt controller) do not inherit the parent CPUs thread and are assigned to device thread. The KvmVM object is assumed to live in the same thread as the other devices in the system.
Diffstat (limited to 'src/cpu/kvm')
-rw-r--r--src/cpu/kvm/base.cc23
-rw-r--r--src/cpu/kvm/x86_cpu.cc18
2 files changed, 39 insertions, 2 deletions
diff --git a/src/cpu/kvm/base.cc b/src/cpu/kvm/base.cc
index 8ba9f944e..2082670dc 100644
--- a/src/cpu/kvm/base.cc
+++ b/src/cpu/kvm/base.cc
@@ -415,6 +415,13 @@ void
BaseKvmCPU::wakeup()
{
DPRINTF(Kvm, "wakeup()\n");
+ // This method might have been called from another
+ // context. Migrate to this SimObject's event queue when
+ // delivering the wakeup signal.
+ EventQueue::ScopedMigration migrate(eventQueue());
+
+ // Kick the vCPU to get it to come out of KVM.
+ kick();
if (thread->status() != ThreadContext::Suspended)
return;
@@ -635,6 +642,14 @@ BaseKvmCPU::kvmRun(Tick ticks)
// twice.
ticksExecuted = clockPeriod();
} else {
+ // This method is executed as a result of a tick event. That
+ // means that the event queue will be locked when entering the
+ // method. We temporarily unlock the event queue to allow
+ // other threads to steal control of this thread to inject
+ // interrupts. They will typically lock the queue and then
+ // force an exit from KVM by kicking the vCPU.
+ EventQueue::ScopedRelease release(curEventQueue());
+
if (ticks < runTimer->resolution()) {
DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n",
ticks, runTimer->resolution());
@@ -990,11 +1005,19 @@ BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write)
pkt.dataStatic(data);
if (mmio_req.isMmappedIpr()) {
+ // We currently assume that there is no need to migrate to a
+ // different event queue when doing IPRs. Currently, IPRs are
+ // only used for m5ops, so it should be a valid assumption.
const Cycles ipr_delay(write ?
TheISA::handleIprWrite(tc, &pkt) :
TheISA::handleIprRead(tc, &pkt));
return clockPeriod() * ipr_delay;
} else {
+ // Temporarily lock and migrate to the event queue of the
+ // VM. This queue is assumed to "own" all devices we need to
+ // access if running in multi-core mode.
+ EventQueue::ScopedMigration migrate(vm.eventQueue());
+
return dataPort.sendAtomic(&pkt);
}
}
diff --git a/src/cpu/kvm/x86_cpu.cc b/src/cpu/kvm/x86_cpu.cc
index ea8c47082..3e736a913 100644
--- a/src/cpu/kvm/x86_cpu.cc
+++ b/src/cpu/kvm/x86_cpu.cc
@@ -1134,10 +1134,20 @@ X86KvmCPU::updateThreadContextMSRs()
void
X86KvmCPU::deliverInterrupts()
{
+ Fault fault;
+
syncThreadContext();
- Fault fault(interrupts->getInterrupt(tc));
- interrupts->updateIntrInfo(tc);
+ {
+ // Migrate to the interrupt controller's thread to get the
+ // interrupt. Even though the individual methods are safe to
+ // call across threads, we might still lose interrupts unless
+ // they are getInterrupt() and updateIntrInfo() are called
+ // atomically.
+ EventQueue::ScopedMigration migrate(interrupts->eventQueue());
+ fault = interrupts->getInterrupt(tc);
+ interrupts->updateIntrInfo(tc);
+ }
X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
@@ -1340,6 +1350,10 @@ X86KvmCPU::handleKvmExitIO()
dataMasterId());
const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
+ // Temporarily lock and migrate to the event queue of the
+ // VM. This queue is assumed to "own" all devices we need to
+ // access if running in multi-core mode.
+ EventQueue::ScopedMigration migrate(vm.eventQueue());
for (int i = 0; i < count; ++i) {
Packet pkt(&io_req, cmd);