summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/kvm/base.cc23
-rw-r--r--src/cpu/kvm/x86_cpu.cc18
2 files changed, 39 insertions, 2 deletions
diff --git a/src/cpu/kvm/base.cc b/src/cpu/kvm/base.cc
index 8ba9f944e..2082670dc 100644
--- a/src/cpu/kvm/base.cc
+++ b/src/cpu/kvm/base.cc
@@ -415,6 +415,13 @@ void
BaseKvmCPU::wakeup()
{
DPRINTF(Kvm, "wakeup()\n");
+ // This method might have been called from another
+ // context. Migrate to this SimObject's event queue when
+ // delivering the wakeup signal.
+ EventQueue::ScopedMigration migrate(eventQueue());
+
+ // Kick the vCPU to get it to come out of KVM.
+ kick();
if (thread->status() != ThreadContext::Suspended)
return;
@@ -635,6 +642,14 @@ BaseKvmCPU::kvmRun(Tick ticks)
// twice.
ticksExecuted = clockPeriod();
} else {
+ // This method is executed as a result of a tick event. That
+ // means that the event queue will be locked when entering the
+ // method. We temporarily unlock the event queue to allow
+ // other threads to steal control of this thread to inject
+ // interrupts. They will typically lock the queue and then
+ // force an exit from KVM by kicking the vCPU.
+ EventQueue::ScopedRelease release(curEventQueue());
+
if (ticks < runTimer->resolution()) {
DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n",
ticks, runTimer->resolution());
@@ -990,11 +1005,19 @@ BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write)
pkt.dataStatic(data);
if (mmio_req.isMmappedIpr()) {
+ // We currently assume that there is no need to migrate to a
+ // different event queue when doing IPRs. Currently, IPRs are
+ // only used for m5ops, so it should be a valid assumption.
const Cycles ipr_delay(write ?
TheISA::handleIprWrite(tc, &pkt) :
TheISA::handleIprRead(tc, &pkt));
return clockPeriod() * ipr_delay;
} else {
+ // Temporarily lock and migrate to the event queue of the
+ // VM. This queue is assumed to "own" all devices we need to
+ // access if running in multi-core mode.
+ EventQueue::ScopedMigration migrate(vm.eventQueue());
+
return dataPort.sendAtomic(&pkt);
}
}
diff --git a/src/cpu/kvm/x86_cpu.cc b/src/cpu/kvm/x86_cpu.cc
index ea8c47082..3e736a913 100644
--- a/src/cpu/kvm/x86_cpu.cc
+++ b/src/cpu/kvm/x86_cpu.cc
@@ -1134,10 +1134,20 @@ X86KvmCPU::updateThreadContextMSRs()
void
X86KvmCPU::deliverInterrupts()
{
+ Fault fault;
+
syncThreadContext();
- Fault fault(interrupts->getInterrupt(tc));
- interrupts->updateIntrInfo(tc);
+ {
+ // Migrate to the interrupt controller's thread to get the
+ // interrupt. Even though the individual methods are safe to
+ // call across threads, we might still lose interrupts unless
+ // they are getInterrupt() and updateIntrInfo() are called
+ // atomically.
+ EventQueue::ScopedMigration migrate(interrupts->eventQueue());
+ fault = interrupts->getInterrupt(tc);
+ interrupts->updateIntrInfo(tc);
+ }
X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
@@ -1340,6 +1350,10 @@ X86KvmCPU::handleKvmExitIO()
dataMasterId());
const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
+ // Temporarily lock and migrate to the event queue of the
+ // VM. This queue is assumed to "own" all devices we need to
+ // access if running in multi-core mode.
+ EventQueue::ScopedMigration migrate(vm.eventQueue());
for (int i = 0; i < count; ++i) {
Packet pkt(&io_req, cmd);