summaryrefslogtreecommitdiff
path: root/src/cpu/minor/pipeline.cc
diff options
context:
space:
mode:
authorAndrew Bardsley <Andrew.Bardsley@arm.com>2014-07-23 16:09:04 -0500
committerAndrew Bardsley <Andrew.Bardsley@arm.com>2014-07-23 16:09:04 -0500
commit0e8a90f06bd3db00f700891a33458353478cce76 (patch)
tree50742efcc18254a36e80029b522139e8bd601dc2 /src/cpu/minor/pipeline.cc
parent040fa23d01109c68d194d2517df777844e4e2f13 (diff)
downloadgem5-0e8a90f06bd3db00f700891a33458353478cce76.tar.xz
cpu: `Minor' in-order CPU model
This patch contains a new CPU model named `Minor'. Minor models a four stage in-order execution pipeline (fetch lines, decompose into macroops, decompose macroops into microops, execute). The model was developed to support the ARM ISA but should be fixable to support all the remaining gem5 ISAs. It currently also works for Alpha, and regressions are included for ARM and Alpha (including Linux boot). Documentation for the model can be found in src/doc/inside-minor.doxygen and its internal operations can be visualised using the Minorview tool utils/minorview.py. Minor was designed to be fairly simple and not to engage in a lot of instruction annotation. As such, it currently has very few gathered stats and may lack other gem5 features. Minor is faster than the o3 model. Sample results: Benchmark | Stat host_seconds (s) ---------------+--------v--------v-------- (on ARM, opt) | simple | o3 | minor | timing | timing | timing ---------------+--------+--------+-------- 10.linux-boot | 169 | 1883 | 1075 10.mcf | 117 | 967 | 491 20.parser | 668 | 6315 | 3146 30.eon | 542 | 3413 | 2414 40.perlbmk | 2339 | 20905 | 11532 50.vortex | 122 | 1094 | 588 60.bzip2 | 2045 | 18061 | 9662 70.twolf | 207 | 2736 | 1036
Diffstat (limited to 'src/cpu/minor/pipeline.cc')
-rw-r--r--src/cpu/minor/pipeline.cc250
1 files changed, 250 insertions, 0 deletions
diff --git a/src/cpu/minor/pipeline.cc b/src/cpu/minor/pipeline.cc
new file mode 100644
index 000000000..9d802234b
--- /dev/null
+++ b/src/cpu/minor/pipeline.cc
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2013-2014 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Andrew Bardsley
+ */
+
+#include <algorithm>
+
+#include "cpu/minor/decode.hh"
+#include "cpu/minor/execute.hh"
+#include "cpu/minor/fetch1.hh"
+#include "cpu/minor/fetch2.hh"
+#include "cpu/minor/pipeline.hh"
+#include "debug/Drain.hh"
+#include "debug/MinorCPU.hh"
+#include "debug/MinorTrace.hh"
+#include "debug/Quiesce.hh"
+
+namespace Minor
+{
+
+Pipeline::Pipeline(MinorCPU &cpu_, MinorCPUParams &params) :
+ Ticked(cpu_, &(cpu_.BaseCPU::numCycles)),
+ cpu(cpu_),
+ allow_idling(params.enableIdling),
+ f1ToF2(cpu.name() + ".f1ToF2", "lines",
+ params.fetch1ToFetch2ForwardDelay),
+ f2ToF1(cpu.name() + ".f2ToF1", "prediction",
+ params.fetch1ToFetch2BackwardDelay, true),
+ f2ToD(cpu.name() + ".f2ToD", "insts",
+ params.fetch2ToDecodeForwardDelay),
+ dToE(cpu.name() + ".dToE", "insts",
+ params.decodeToExecuteForwardDelay),
+ eToF1(cpu.name() + ".eToF1", "branch",
+ params.executeBranchDelay),
+ execute(cpu.name() + ".execute", cpu, params,
+ dToE.output(), eToF1.input()),
+ decode(cpu.name() + ".decode", cpu, params,
+ f2ToD.output(), dToE.input(), execute.inputBuffer),
+ fetch2(cpu.name() + ".fetch2", cpu, params,
+ f1ToF2.output(), eToF1.output(), f2ToF1.input(), f2ToD.input(),
+ decode.inputBuffer),
+ fetch1(cpu.name() + ".fetch1", cpu, params,
+ eToF1.output(), f1ToF2.input(), f2ToF1.output(), fetch2.inputBuffer),
+ activityRecorder(cpu.name() + ".activity", Num_StageId,
+ /* The max depth of inter-stage FIFOs */
+ std::max(params.fetch1ToFetch2ForwardDelay,
+ std::max(params.fetch2ToDecodeForwardDelay,
+ std::max(params.decodeToExecuteForwardDelay,
+ params.executeBranchDelay)))),
+ needToSignalDrained(false)
+{
+ if (params.fetch1ToFetch2ForwardDelay < 1) {
+ fatal("%s: fetch1ToFetch2ForwardDelay must be >= 1 (%d)\n",
+ cpu.name(), params.fetch1ToFetch2ForwardDelay);
+ }
+
+ if (params.fetch2ToDecodeForwardDelay < 1) {
+ fatal("%s: fetch2ToDecodeForwardDelay must be >= 1 (%d)\n",
+ cpu.name(), params.fetch2ToDecodeForwardDelay);
+ }
+
+ if (params.decodeToExecuteForwardDelay < 1) {
+ fatal("%s: decodeToExecuteForwardDelay must be >= 1 (%d)\n",
+ cpu.name(), params.decodeToExecuteForwardDelay);
+ }
+
+ if (params.executeBranchDelay < 1) {
+ fatal("%s: executeBranchDelay must be >= 1\n",
+ cpu.name(), params.executeBranchDelay);
+ }
+}
+
+void
+Pipeline::minorTrace() const
+{
+ fetch1.minorTrace();
+ f1ToF2.minorTrace();
+ f2ToF1.minorTrace();
+ fetch2.minorTrace();
+ f2ToD.minorTrace();
+ decode.minorTrace();
+ dToE.minorTrace();
+ execute.minorTrace();
+ eToF1.minorTrace();
+ activityRecorder.minorTrace();
+}
+
+void
+Pipeline::evaluate()
+{
+ /* Note that it's important to evaluate the stages in order to allow
+ * 'immediate', 0-time-offset TimeBuffer activity to be visible from
+ * later stages to earlier ones in the same cycle */
+ execute.evaluate();
+ decode.evaluate();
+ fetch2.evaluate();
+ fetch1.evaluate();
+
+ if (DTRACE(MinorTrace))
+ minorTrace();
+
+ /* Update the time buffers after the stages */
+ f1ToF2.evaluate();
+ f2ToF1.evaluate();
+ f2ToD.evaluate();
+ dToE.evaluate();
+ eToF1.evaluate();
+
+ /* The activity recorder must be be called after all the stages and
+ * before the idler (which acts on the advice of the activity recorder */
+ activityRecorder.evaluate();
+
+ if (allow_idling) {
+ /* Become idle if we can but are not draining */
+ if (!activityRecorder.active() && !needToSignalDrained) {
+ DPRINTF(Quiesce, "Suspending as the processor is idle\n");
+ stop();
+ }
+
+ /* Deactivate all stages. Note that the stages *could*
+ * activate and deactivate themselves but that's fraught
+ * with additional difficulty.
+ * As organised herre */
+ activityRecorder.deactivateStage(Pipeline::CPUStageId);
+ activityRecorder.deactivateStage(Pipeline::Fetch1StageId);
+ activityRecorder.deactivateStage(Pipeline::Fetch2StageId);
+ activityRecorder.deactivateStage(Pipeline::DecodeStageId);
+ activityRecorder.deactivateStage(Pipeline::ExecuteStageId);
+ }
+
+ if (needToSignalDrained) /* Must be draining */
+ {
+ DPRINTF(Drain, "Still draining\n");
+ if (isDrained()) {
+ DPRINTF(Drain, "Signalling end of draining\n");
+ cpu.signalDrainDone();
+ needToSignalDrained = false;
+ stop();
+ }
+ }
+}
+
+MinorCPU::MinorCPUPort &
+Pipeline::getInstPort()
+{
+ return fetch1.getIcachePort();
+}
+
+MinorCPU::MinorCPUPort &
+Pipeline::getDataPort()
+{
+ return execute.getDcachePort();
+}
+
+void
+Pipeline::wakeupFetch()
+{
+ execute.wakeupFetch();
+}
+
+unsigned int
+Pipeline::drain(DrainManager *manager)
+{
+ DPRINTF(MinorCPU, "Draining pipeline by halting inst fetches. "
+ " Execution should drain naturally\n");
+
+ execute.drain();
+
+ /* Make sure that needToSignalDrained isn't accidentally set if we
+ * are 'pre-drained' */
+ bool drained = isDrained();
+ needToSignalDrained = !drained;
+
+ return (drained ? 0 : 1);
+}
+
+void
+Pipeline::drainResume()
+{
+ DPRINTF(Drain, "Drain resume\n");
+ execute.drainResume();
+}
+
+bool
+Pipeline::isDrained()
+{
+ bool fetch1_drained = fetch1.isDrained();
+ bool fetch2_drained = fetch2.isDrained();
+ bool decode_drained = decode.isDrained();
+ bool execute_drained = execute.isDrained();
+
+ bool f1_to_f2_drained = f1ToF2.empty();
+ bool f2_to_f1_drained = f2ToF1.empty();
+ bool f2_to_d_drained = f2ToD.empty();
+ bool d_to_e_drained = dToE.empty();
+
+ bool ret = fetch1_drained && fetch2_drained &&
+ decode_drained && execute_drained &&
+ f1_to_f2_drained && f2_to_f1_drained &&
+ f2_to_d_drained && d_to_e_drained;
+
+ DPRINTF(MinorCPU, "Pipeline undrained stages state:%s%s%s%s%s%s%s%s\n",
+ (fetch1_drained ? "" : " Fetch1"),
+ (fetch2_drained ? "" : " Fetch2"),
+ (decode_drained ? "" : " Decode"),
+ (execute_drained ? "" : " Execute"),
+ (f1_to_f2_drained ? "" : " F1->F2"),
+ (f2_to_f1_drained ? "" : " F2->F1"),
+ (f2_to_d_drained ? "" : " F2->D"),
+ (d_to_e_drained ? "" : " D->E")
+ );
+
+ return ret;
+}
+
+}