diff options
author | Brandon Potter <brandon.potter@amd.com> | 2015-07-20 09:15:21 -0500 |
---|---|---|
committer | Brandon Potter <brandon.potter@amd.com> | 2015-07-20 09:15:21 -0500 |
commit | a5802c823f4f6ec2bd97c953494551e31faa2cf8 (patch) | |
tree | b150d317b84b2168c09381d304919e86efcc9ab6 /src/cpu | |
parent | a7a0fd2c58cbd0e8e7e83ad8e7f7729a32527c02 (diff) | |
download | gem5-a5802c823f4f6ec2bd97c953494551e31faa2cf8.tar.xz |
syscall_emul: [patch 13/22] add system call retry capability
This changeset adds functionality that allows system calls to retry without
affecting thread context state such as the program counter or register values
for the associated thread context (when system calls return with a retry
fault).
This functionality is needed to solve problems with blocking system calls
in multi-process or multi-threaded simulations where information is passed
between processes/threads. Blocking system calls can cause deadlock because
the simulator itself is single threaded. There is only a single thread
servicing the event queue which can cause deadlock if the thread hits a
blocking system call instruction.
To illustrate the problem, consider two processes using the producer/consumer
sharing model. The processes can use file descriptors and the read and write
calls to pass information to one another. If the consumer calls the blocking
read system call before the producer has produced anything, the call will
block the event queue (while executing the system call instruction) and
deadlock the simulation.
The solution implemented in this changeset is to recognize that the system
calls will block and then generate a special retry fault. The fault will
be sent back up through the function call chain until it is exposed to the
cpu model's pipeline where the fault becomes visible. The fault will trigger
the cpu model to replay the instruction at a future tick where the call has
a chance to succeed without actually going into a blocking state.
In subsequent patches, we recognize that a syscall will block by calling a
non-blocking poll (from inside the system call implementation) and checking
for events. When events show up during the poll, it signifies that the call
would not have blocked and the syscall is allowed to proceed (calling an
underlying host system call if necessary). If no events are returned from the
poll, we generate the fault and try the instruction for the thread context
at a distant tick. Note that retrying every tick is not efficient.
As an aside, the simulator has some multi-threading support for the event
queue, but it is not used by default and needs work. Even if the event queue
was completely multi-threaded, meaning that there is a hardware thread on
the host servicing a single simulator thread contexts with a 1:1 mapping
between them, it's still possible to run into deadlock due to the event queue
barriers on quantum boundaries. The solution of replaying at a later tick
is the simplest solution and solves the problem generally.
Diffstat (limited to 'src/cpu')
-rw-r--r-- | src/cpu/BaseCPU.py | 2 | ||||
-rw-r--r-- | src/cpu/base.cc | 3 | ||||
-rw-r--r-- | src/cpu/base.hh | 2 | ||||
-rw-r--r-- | src/cpu/checker/cpu.hh | 2 | ||||
-rw-r--r-- | src/cpu/checker/thread_context.hh | 4 | ||||
-rw-r--r-- | src/cpu/exec_context.hh | 2 | ||||
-rw-r--r-- | src/cpu/minor/exec_context.hh | 4 | ||||
-rw-r--r-- | src/cpu/o3/commit.hh | 2 | ||||
-rw-r--r-- | src/cpu/o3/commit_impl.hh | 14 | ||||
-rw-r--r-- | src/cpu/o3/cpu.cc | 4 | ||||
-rw-r--r-- | src/cpu/o3/cpu.hh | 2 | ||||
-rw-r--r-- | src/cpu/o3/dyn_inst.hh | 2 | ||||
-rw-r--r-- | src/cpu/o3/dyn_inst_impl.hh | 4 | ||||
-rwxr-xr-x | src/cpu/o3/thread_context.hh | 4 | ||||
-rw-r--r-- | src/cpu/o3/thread_state.hh | 5 | ||||
-rw-r--r-- | src/cpu/simple/atomic.cc | 9 | ||||
-rw-r--r-- | src/cpu/simple/exec_context.hh | 4 | ||||
-rw-r--r-- | src/cpu/simple/timing.cc | 10 | ||||
-rw-r--r-- | src/cpu/simple_thread.hh | 4 | ||||
-rw-r--r-- | src/cpu/thread_context.hh | 6 |
20 files changed, 57 insertions, 32 deletions
diff --git a/src/cpu/BaseCPU.py b/src/cpu/BaseCPU.py index c85e5afda..7b8a615ea 100644 --- a/src/cpu/BaseCPU.py +++ b/src/cpu/BaseCPU.py @@ -142,6 +142,8 @@ class BaseCPU(MemObject): checker = Param.BaseCPU(NULL, "checker CPU") + syscallRetryLatency = Param.Cycles(10000, "Cycles to wait until retry") + do_checkpoint_insts = Param.Bool(True, "enable checkpoint pseudo instructions") do_statistics_insts = Param.Bool(True, diff --git a/src/cpu/base.cc b/src/cpu/base.cc index 10b8ce297..08f95ea49 100644 --- a/src/cpu/base.cc +++ b/src/cpu/base.cc @@ -135,7 +135,8 @@ BaseCPU::BaseCPU(Params *p, bool is_checker) numThreads(p->numThreads), system(p->system), functionTraceStream(nullptr), currentFunctionStart(0), currentFunctionEnd(0), functionEntryTick(0), - addressMonitor(p->numThreads) + addressMonitor(p->numThreads), + syscallRetryLatency(p->syscallRetryLatency) { // if Python did not provide a valid ID, do it here if (_cpuId == -1 ) { diff --git a/src/cpu/base.hh b/src/cpu/base.hh index 6622339e0..14dfc260b 100644 --- a/src/cpu/base.hh +++ b/src/cpu/base.hh @@ -588,6 +588,8 @@ class BaseCPU : public MemObject assert(tid < numThreads); return &addressMonitor[tid]; } + + Cycles syscallRetryLatency; }; #endif // THE_ISA == NULL_ISA diff --git a/src/cpu/checker/cpu.hh b/src/cpu/checker/cpu.hh index 21ff9c7f7..e47c88484 100644 --- a/src/cpu/checker/cpu.hh +++ b/src/cpu/checker/cpu.hh @@ -393,7 +393,7 @@ class CheckerCPU : public BaseCPU, public ExecContext void wakeup(ThreadID tid) override { } // Assume that the normal CPU's call to syscall was successful. // The checker's state would have already been updated by the syscall. - void syscall(int64_t callnum) override { } + void syscall(int64_t callnum, Fault *fault) override { } void handleError() { diff --git a/src/cpu/checker/thread_context.hh b/src/cpu/checker/thread_context.hh index 5fcb82f6d..0313d079b 100644 --- a/src/cpu/checker/thread_context.hh +++ b/src/cpu/checker/thread_context.hh @@ -146,8 +146,8 @@ class CheckerThreadContext : public ThreadContext SETranslatingPortProxy &getMemProxy() { return actualTC->getMemProxy(); } /** Executes a syscall in SE mode. */ - void syscall(int64_t callnum) - { return actualTC->syscall(callnum); } + void syscall(int64_t callnum, Fault *fault) + { return actualTC->syscall(callnum, fault); } Status status() const { return actualTC->status(); } diff --git a/src/cpu/exec_context.hh b/src/cpu/exec_context.hh index dd718b56a..b21f0767a 100644 --- a/src/cpu/exec_context.hh +++ b/src/cpu/exec_context.hh @@ -228,7 +228,7 @@ class ExecContext { /** * Executes a syscall specified by the callnum. */ - virtual void syscall(int64_t callnum) = 0; + virtual void syscall(int64_t callnum, Fault *fault) = 0; /** @} */ diff --git a/src/cpu/minor/exec_context.hh b/src/cpu/minor/exec_context.hh index 6235721cf..6b2eae0f1 100644 --- a/src/cpu/minor/exec_context.hh +++ b/src/cpu/minor/exec_context.hh @@ -241,12 +241,12 @@ class ExecContext : public ::ExecContext } void - syscall(int64_t callnum) override + syscall(int64_t callnum, Fault *fault) override { if (FullSystem) panic("Syscall emulation isn't available in FS mode.\n"); - thread.syscall(callnum); + thread.syscall(callnum, fault); } ThreadContext *tcBase() override { return thread.getTC(); } diff --git a/src/cpu/o3/commit.hh b/src/cpu/o3/commit.hh index 48c169389..3cce7f69c 100644 --- a/src/cpu/o3/commit.hh +++ b/src/cpu/o3/commit.hh @@ -235,7 +235,7 @@ class DefaultCommit size_t numROBFreeEntries(ThreadID tid); /** Generates an event to schedule a squash due to a trap. */ - void generateTrapEvent(ThreadID tid); + void generateTrapEvent(ThreadID tid, Fault inst_fault); /** Records that commit needs to initiate a squash due to an * external state update through the TC. diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh index c6c6ea723..ea77f18fb 100644 --- a/src/cpu/o3/commit_impl.hh +++ b/src/cpu/o3/commit_impl.hh @@ -526,13 +526,16 @@ DefaultCommit<Impl>::numROBFreeEntries(ThreadID tid) template <class Impl> void -DefaultCommit<Impl>::generateTrapEvent(ThreadID tid) +DefaultCommit<Impl>::generateTrapEvent(ThreadID tid, Fault inst_fault) { DPRINTF(Commit, "Generating trap event for [tid:%i]\n", tid); TrapEvent *trap = new TrapEvent(this, tid); - cpu->schedule(trap, cpu->clockEdge(trapLatency)); + Cycles latency = dynamic_pointer_cast<SyscallRetryFault>(inst_fault) ? + cpu->syscallRetryLatency : trapLatency; + + cpu->schedule(trap, cpu->clockEdge(latency)); trapInFlight[tid] = true; thread[tid]->trapPending = true; } @@ -767,10 +770,11 @@ DefaultCommit<Impl>::handleInterrupt() commitStatus[0] = TrapPending; + interrupt = NoFault; + // Generate trap squash event. - generateTrapEvent(0); + generateTrapEvent(0, interrupt); - interrupt = NoFault; avoidQuiesceLiveLock = false; } else { DPRINTF(Commit, "Interrupt pending: instruction is %sin " @@ -1240,7 +1244,7 @@ DefaultCommit<Impl>::commitHead(DynInstPtr &head_inst, unsigned inst_num) } // Generate trap squash event. - generateTrapEvent(tid); + generateTrapEvent(tid, inst_fault); return false; } diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc index d85895030..8d38ed1f2 100644 --- a/src/cpu/o3/cpu.cc +++ b/src/cpu/o3/cpu.cc @@ -972,7 +972,7 @@ FullO3CPU<Impl>::trap(const Fault &fault, ThreadID tid, template <class Impl> void -FullO3CPU<Impl>::syscall(int64_t callnum, ThreadID tid) +FullO3CPU<Impl>::syscall(int64_t callnum, ThreadID tid, Fault *fault) { DPRINTF(O3CPU, "[tid:%i] Executing syscall().\n\n", tid); @@ -983,7 +983,7 @@ FullO3CPU<Impl>::syscall(int64_t callnum, ThreadID tid) ++(this->thread[tid]->funcExeInst); // Execute the actual syscall. - this->thread[tid]->syscall(callnum); + this->thread[tid]->syscall(callnum, fault); // Decrease funcExeInst by one as the normal commit will handle // incrementing it. diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh index 2065202f7..abe036b09 100644 --- a/src/cpu/o3/cpu.hh +++ b/src/cpu/o3/cpu.hh @@ -344,7 +344,7 @@ class FullO3CPU : public BaseO3CPU /** Executes a syscall. * @todo: Determine if this needs to be virtual. */ - void syscall(int64_t callnum, ThreadID tid); + void syscall(int64_t callnum, ThreadID tid, Fault *fault); /** Starts draining the CPU's pipeline of all instructions in * order to stop all memory accesses. */ diff --git a/src/cpu/o3/dyn_inst.hh b/src/cpu/o3/dyn_inst.hh index 6740c601d..8ab9979d2 100644 --- a/src/cpu/o3/dyn_inst.hh +++ b/src/cpu/o3/dyn_inst.hh @@ -237,7 +237,7 @@ class BaseO3DynInst : public BaseDynInst<Impl> bool simPalCheck(int palFunc); /** Emulates a syscall. */ - void syscall(int64_t callnum); + void syscall(int64_t callnum, Fault *fault); public: diff --git a/src/cpu/o3/dyn_inst_impl.hh b/src/cpu/o3/dyn_inst_impl.hh index 06c0e15f3..00bcb3345 100644 --- a/src/cpu/o3/dyn_inst_impl.hh +++ b/src/cpu/o3/dyn_inst_impl.hh @@ -242,7 +242,7 @@ BaseO3DynInst<Impl>::simPalCheck(int palFunc) template <class Impl> void -BaseO3DynInst<Impl>::syscall(int64_t callnum) +BaseO3DynInst<Impl>::syscall(int64_t callnum, Fault *fault) { if (FullSystem) panic("Syscall emulation isn't available in FS mode.\n"); @@ -251,7 +251,7 @@ BaseO3DynInst<Impl>::syscall(int64_t callnum) // changes, update this instruction's nextPC because the syscall // must have changed the nextPC. TheISA::PCState curPC = this->cpu->pcState(this->threadNumber); - this->cpu->syscall(callnum, this->threadNumber); + this->cpu->syscall(callnum, this->threadNumber, fault); TheISA::PCState newPC = this->cpu->pcState(this->threadNumber); if (!(curPC == newPC)) { this->pcState(newPC); diff --git a/src/cpu/o3/thread_context.hh b/src/cpu/o3/thread_context.hh index 87b7d9198..0321f57f7 100755 --- a/src/cpu/o3/thread_context.hh +++ b/src/cpu/o3/thread_context.hh @@ -258,8 +258,8 @@ class O3ThreadContext : public ThreadContext { thread->storeCondFailures = sc_failures; } /** Executes a syscall in SE mode. */ - virtual void syscall(int64_t callnum) - { return cpu->syscall(callnum, thread->threadId()); } + virtual void syscall(int64_t callnum, Fault *fault) + { return cpu->syscall(callnum, thread->threadId(), fault); } /** Reads the funcExeInst counter. */ virtual Counter readFuncExeInst() { return thread->funcExeInst; } diff --git a/src/cpu/o3/thread_state.hh b/src/cpu/o3/thread_state.hh index 7765f86ea..4b4f51e8f 100644 --- a/src/cpu/o3/thread_state.hh +++ b/src/cpu/o3/thread_state.hh @@ -140,7 +140,10 @@ struct O3ThreadState : public ThreadState { ThreadContext *getTC() { return tc; } /** Handles the syscall. */ - void syscall(int64_t callnum) { process->syscall(callnum, tc); } + void syscall(int64_t callnum, Fault *fault) + { + process->syscall(callnum, tc, fault); + } void dumpFuncProfile() { diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index c09f0c526..6c31f1ddd 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -628,6 +628,7 @@ AtomicSimpleCPU::tick() preExecute(); + Tick stall_ticks = 0; if (curStaticInst) { fault = curStaticInst->execute(&t_info, traceData); @@ -641,6 +642,13 @@ AtomicSimpleCPU::tick() traceData = NULL; } + if (dynamic_pointer_cast<SyscallRetryFault>(fault)) { + // Retry execution of system calls after a delay. + // Prevents immediate re-execution since conditions which + // caused the retry are unlikely to change every tick. + stall_ticks += clockEdge(syscallRetryLatency) - curTick(); + } + postExecute(); } @@ -649,7 +657,6 @@ AtomicSimpleCPU::tick() curStaticInst->isFirstMicroop())) instCnt++; - Tick stall_ticks = 0; if (simulate_inst_stalls && icache_access) stall_ticks += icache_latency; diff --git a/src/cpu/simple/exec_context.hh b/src/cpu/simple/exec_context.hh index 430790c09..bfae118ad 100644 --- a/src/cpu/simple/exec_context.hh +++ b/src/cpu/simple/exec_context.hh @@ -323,12 +323,12 @@ class SimpleExecContext : public ExecContext { /** * Executes a syscall specified by the callnum. */ - void syscall(int64_t callnum) override + void syscall(int64_t callnum, Fault *fault) override { if (FullSystem) panic("Syscall emulation isn't available in FS mode."); - thread->syscall(callnum); + thread->syscall(callnum, fault); } /** Returns a pointer to the ThreadContext. */ diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 43c50b948..1c468dc99 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -670,9 +670,15 @@ TimingSimpleCPU::advanceInst(const Fault &fault) return; if (fault != NoFault) { - advancePC(fault); DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n"); - reschedule(fetchEvent, clockEdge(), true); + + advancePC(fault); + + Tick stall = dynamic_pointer_cast<SyscallRetryFault>(fault) ? + clockEdge(syscallRetryLatency) : clockEdge(); + + reschedule(fetchEvent, stall, true); + _status = Faulting; return; } diff --git a/src/cpu/simple_thread.hh b/src/cpu/simple_thread.hh index 631b8ccfc..9ef00ab3f 100644 --- a/src/cpu/simple_thread.hh +++ b/src/cpu/simple_thread.hh @@ -424,9 +424,9 @@ class SimpleThread : public ThreadState void setStCondFailures(unsigned sc_failures) { storeCondFailures = sc_failures; } - void syscall(int64_t callnum) + void syscall(int64_t callnum, Fault *fault) { - process->syscall(callnum, tc); + process->syscall(callnum, tc, fault); } uint64_t readIntRegFlat(int idx) { return intRegs[idx]; } diff --git a/src/cpu/thread_context.hh b/src/cpu/thread_context.hh index f966c0aa1..ecbd1a41e 100644 --- a/src/cpu/thread_context.hh +++ b/src/cpu/thread_context.hh @@ -264,7 +264,7 @@ class ThreadContext // Same with st cond failures. virtual Counter readFuncExeInst() = 0; - virtual void syscall(int64_t callnum) = 0; + virtual void syscall(int64_t callnum, Fault *fault) = 0; // This function exits the thread context in the CPU and returns // 1 if the CPU has no more active threads (meaning it's OK to exit); @@ -471,8 +471,8 @@ class ProxyThreadContext : public ThreadContext void setStCondFailures(unsigned sc_failures) { actualTC->setStCondFailures(sc_failures); } - void syscall(int64_t callnum) - { actualTC->syscall(callnum); } + void syscall(int64_t callnum, Fault *fault) + { actualTC->syscall(callnum, fault); } Counter readFuncExeInst() { return actualTC->readFuncExeInst(); } |