summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/cpu/cpu_models.py3
-rw-r--r--src/cpu/inorder/InOrderCPU.py80
-rw-r--r--src/cpu/inorder/InOrderTrace.py35
-rw-r--r--src/cpu/inorder/SConscript82
-rw-r--r--src/cpu/inorder/SConsopts33
-rw-r--r--src/cpu/inorder/comm.hh104
-rw-r--r--src/cpu/inorder/cpu.cc1322
-rw-r--r--src/cpu/inorder/cpu.hh730
-rw-r--r--src/cpu/inorder/first_stage.cc251
-rw-r--r--src/cpu/inorder/first_stage.hh97
-rw-r--r--src/cpu/inorder/inorder_cpu_builder.cc61
-rw-r--r--src/cpu/inorder/inorder_dyn_inst.cc780
-rw-r--r--src/cpu/inorder/inorder_dyn_inst.hh975
-rw-r--r--src/cpu/inorder/inorder_trace.cc94
-rw-r--r--src/cpu/inorder/inorder_trace.hh98
-rw-r--r--src/cpu/inorder/params.hh124
-rw-r--r--src/cpu/inorder/pipeline_stage.cc1033
-rw-r--r--src/cpu/inorder/pipeline_stage.hh358
-rw-r--r--src/cpu/inorder/pipeline_traits.5stage.cc166
-rw-r--r--src/cpu/inorder/pipeline_traits.5stage.hh147
-rw-r--r--src/cpu/inorder/pipeline_traits.9stage.cc242
-rw-r--r--src/cpu/inorder/pipeline_traits.9stage.hh155
-rw-r--r--src/cpu/inorder/pipeline_traits.9stage.smt2.cc240
-rw-r--r--src/cpu/inorder/pipeline_traits.9stage.smt2.hh155
-rw-r--r--src/cpu/inorder/pipeline_traits.cc153
-rw-r--r--src/cpu/inorder/pipeline_traits.hh177
-rw-r--r--src/cpu/inorder/reg_dep_map.cc236
-rw-r--r--src/cpu/inorder/reg_dep_map.hh105
-rw-r--r--src/cpu/inorder/resource.cc434
-rw-r--r--src/cpu/inorder/resource.hh404
-rw-r--r--src/cpu/inorder/resource_pool.9stage.cc357
-rw-r--r--src/cpu/inorder/resource_pool.cc364
-rw-r--r--src/cpu/inorder/resource_pool.hh189
-rw-r--r--src/cpu/inorder/resources/agen_unit.cc98
-rw-r--r--src/cpu/inorder/resources/agen_unit.hh64
-rw-r--r--src/cpu/inorder/resources/bpred_unit.cc426
-rw-r--r--src/cpu/inorder/resources/bpred_unit.hh258
-rw-r--r--src/cpu/inorder/resources/branch_predictor.cc149
-rw-r--r--src/cpu/inorder/resources/branch_predictor.hh87
-rw-r--r--src/cpu/inorder/resources/cache_unit.cc609
-rw-r--r--src/cpu/inorder/resources/cache_unit.hh305
-rw-r--r--src/cpu/inorder/resources/decode_unit.cc92
-rw-r--r--src/cpu/inorder/resources/decode_unit.hh68
-rw-r--r--src/cpu/inorder/resources/execution_unit.cc182
-rw-r--r--src/cpu/inorder/resources/execution_unit.hh77
-rw-r--r--src/cpu/inorder/resources/fetch_seq_unit.cc324
-rw-r--r--src/cpu/inorder/resources/fetch_seq_unit.hh119
-rw-r--r--src/cpu/inorder/resources/graduation_unit.cc113
-rw-r--r--src/cpu/inorder/resources/graduation_unit.hh70
-rw-r--r--src/cpu/inorder/resources/inst_buffer.cc222
-rw-r--r--src/cpu/inorder/resources/inst_buffer.hh93
-rw-r--r--src/cpu/inorder/resources/inst_buffer_new.cc156
-rw-r--r--src/cpu/inorder/resources/inst_buffer_new.hh109
-rw-r--r--src/cpu/inorder/resources/mem_dep_unit.hh66
-rw-r--r--src/cpu/inorder/resources/mult_div_unit.cc291
-rw-r--r--src/cpu/inorder/resources/mult_div_unit.hh138
-rw-r--r--src/cpu/inorder/resources/resource_list.hh47
-rw-r--r--src/cpu/inorder/resources/tlb_unit.cc186
-rw-r--r--src/cpu/inorder/resources/tlb_unit.hh124
-rw-r--r--src/cpu/inorder/resources/use_def.cc326
-rw-r--r--src/cpu/inorder/resources/use_def.hh102
-rw-r--r--src/cpu/inorder/thread_context.cc371
-rw-r--r--src/cpu/inorder/thread_context.hh286
-rw-r--r--src/cpu/inorder/thread_state.hh90
64 files changed, 15432 insertions, 0 deletions
diff --git a/src/cpu/cpu_models.py b/src/cpu/cpu_models.py
index 5b0c6c4da..793f8c646 100644
--- a/src/cpu/cpu_models.py
+++ b/src/cpu/cpu_models.py
@@ -82,3 +82,6 @@ CpuModel('CheckerCPU', 'checker_cpu_exec.cc',
CpuModel('O3CPU', 'o3_cpu_exec.cc',
'#include "cpu/o3/isa_specific.hh"',
{ 'CPU_exec_context': 'O3DynInst' })
+CpuModel('InOrderCPU', 'inorder_cpu_exec.cc',
+ '#include "cpu/inorder/inorder_dyn_inst.hh"',
+ { 'CPU_exec_context': 'InOrderDynInst' })
diff --git a/src/cpu/inorder/InOrderCPU.py b/src/cpu/inorder/InOrderCPU.py
new file mode 100644
index 000000000..a5e81a090
--- /dev/null
+++ b/src/cpu/inorder/InOrderCPU.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2007 MIPS Technologies, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Korey Sewell
+
+from m5.params import *
+from m5.proxy import *
+from m5 import build_env
+from BaseCPU import BaseCPU
+
+class InOrderCPU(BaseCPU):
+ type = 'InOrderCPU'
+ activity = Param.Unsigned(0, "Initial count")
+ numThreads = Param.Unsigned(1, "number of HW thread contexts")
+
+ cachePorts = Param.Unsigned("Cache Ports")
+ stageWidth = Param.Unsigned(1, "Stage width")
+
+ fetchMemPort = Param.String("icache_port" , "Name of Memory Port to get instructions from")
+ dataMemPort = Param.String("dcache_port" , "Name of Memory Port to get data from")
+ icache_port = Port("Instruction Port")
+ dcache_port = Port("Data Port")
+ _mem_ports = ['icache_port', 'dcache_port']
+
+ predType = Param.String("tournament", "Branch predictor type ('local', 'tournament')")
+ localPredictorSize = Param.Unsigned(2048, "Size of local predictor")
+ localCtrBits = Param.Unsigned(2, "Bits per counter")
+ localHistoryTableSize = Param.Unsigned(2048, "Size of local history table")
+ localHistoryBits = Param.Unsigned(11, "Bits for the local history")
+ globalPredictorSize = Param.Unsigned(8192, "Size of global predictor")
+ globalCtrBits = Param.Unsigned(2, "Bits per counter")
+ globalHistoryBits = Param.Unsigned(13, "Bits of history")
+ choicePredictorSize = Param.Unsigned(8192, "Size of choice predictor")
+ choiceCtrBits = Param.Unsigned(2, "Bits of choice counters")
+
+ BTBEntries = Param.Unsigned(4096, "Number of BTB entries")
+ BTBTagSize = Param.Unsigned(16, "Size of the BTB tags, in bits")
+
+ RASSize = Param.Unsigned(16, "RAS size")
+
+ instShiftAmt = Param.Unsigned(2, "Number of bits to shift instructions by")
+ functionTrace = Param.Bool(False, "Enable function trace")
+ functionTraceStart = Param.Tick(0, "Cycle to start function trace")
+ stageTracing = Param.Bool(False, "Enable tracing of each stage in CPU")
+
+ memBlockSize = Param.Unsigned("Memory Block Size")
+
+ multLatency = Param.Unsigned(1, "Latency for Multiply Operations")
+ multRepeatRate = Param.Unsigned(1, "Repeat Rate for Multiply Operations")
+ div8Latency = Param.Unsigned(1, "Latency for 8-bit Divide Operations")
+ div8RepeatRate = Param.Unsigned(1, "Repeat Rate for 8-bit Divide Operations")
+ div16Latency = Param.Unsigned(1, "Latency for 16-bit Divide Operations")
+ div16RepeatRate = Param.Unsigned(1, "Repeat Rate for 16-bit Divide Operations")
+ div24Latency = Param.Unsigned(1, "Latency for 24-bit Divide Operations")
+ div24RepeatRate = Param.Unsigned(1, "Repeat Rate for 24-bit Divide Operations")
+ div32Latency = Param.Unsigned(1, "Latency for 32-bit Divide Operations")
+ div32RepeatRate = Param.Unsigned(1, "Repeat Rate for 32-bit Divide Operations")
diff --git a/src/cpu/inorder/InOrderTrace.py b/src/cpu/inorder/InOrderTrace.py
new file mode 100644
index 000000000..3453fa675
--- /dev/null
+++ b/src/cpu/inorder/InOrderTrace.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2007 MIPS Technologies, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Korey Sewell
+
+from m5.SimObject import SimObject
+from m5.params import *
+from InstTracer import InstTracer
+
+class InOrderTrace(InstTracer):
+ type = 'InOrderTrace'
+ cxx_class = 'Trace::InOrderTrace'
diff --git a/src/cpu/inorder/SConscript b/src/cpu/inorder/SConscript
new file mode 100644
index 000000000..c7edd1d76
--- /dev/null
+++ b/src/cpu/inorder/SConscript
@@ -0,0 +1,82 @@
+# -*- mode:python -*-
+
+# Copyright (c) 2007 MIPS Technologies, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Korey Sewell
+
+Import('*')
+
+if 'InOrderCPU' in env['CPU_MODELS']:
+ SimObject('InOrderCPU.py')
+ SimObject('InOrderTrace.py')
+
+ TraceFlag('ResReqCount')
+ TraceFlag('FreeList')
+ TraceFlag('InOrderCachePort')
+ TraceFlag('InOrderStage')
+ TraceFlag('InOrderStall')
+ TraceFlag('InOrderCPU')
+ TraceFlag('InOrderMDU')
+ TraceFlag('RegDepMap')
+ TraceFlag('Rename')
+ TraceFlag('InOrderDynInst')
+ TraceFlag('Resource')
+ TraceFlag('RefCount')
+
+ CompoundFlag('InOrderCPUAll', [ 'InOrderStage', 'InOrderStall', 'InOrderCPU',
+ 'InOrderMDU', 'RegDepMap', 'Resource', 'Rename'])
+
+ Source('pipeline_traits.cc')
+ Source('inorder_dyn_inst.cc')
+ Source('inorder_cpu_builder.cc')
+ Source('inorder_trace.cc')
+ Source('pipeline_stage.cc')
+ Source('first_stage.cc')
+ Source('resource.cc')
+ Source('resources/agen_unit.cc')
+ Source('resources/execution_unit.cc')
+ Source('resources/bpred_unit.cc')
+ Source('resources/branch_predictor.cc')
+ Source('resources/cache_unit.cc')
+ Source('resources/use_def.cc')
+ Source('resources/decode_unit.cc')
+ Source('resources/inst_buffer.cc')
+ Source('resources/graduation_unit.cc')
+ Source('resources/tlb_unit.cc')
+ Source('resources/fetch_seq_unit.cc')
+ Source('resources/mult_div_unit.cc')
+ Source('resource_pool.cc')
+ Source('reg_dep_map.cc')
+ Source('../o3/btb.cc')
+ Source('../o3/tournament_pred.cc')
+ Source('../o3/2bit_local_pred.cc')
+ Source('../o3/free_list.cc')
+ Source('../o3/rename_map.cc')
+ Source('../o3/ras.cc')
+ Source('thread_context.cc')
+ Source('cpu.cc')
+
diff --git a/src/cpu/inorder/SConsopts b/src/cpu/inorder/SConsopts
new file mode 100644
index 000000000..82ebd18ea
--- /dev/null
+++ b/src/cpu/inorder/SConsopts
@@ -0,0 +1,33 @@
+# -*- mode:python -*-
+
+# Copyright (c) 2007 MIPS Technologies, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Korey Sewell
+
+Import('*')
+
+all_cpu_list.append('InOrderCPU')
diff --git a/src/cpu/inorder/comm.hh b/src/cpu/inorder/comm.hh
new file mode 100644
index 000000000..c687a9ab4
--- /dev/null
+++ b/src/cpu/inorder/comm.hh
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_COMM_HH__
+#define __CPU_INORDER_COMM_HH__
+
+#include <vector>
+
+#include "arch/faults.hh"
+#include "arch/isa_traits.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inst_seq.hh"
+#include "sim/host.hh"
+
+/** Struct that defines the information passed from in between stages */
+/** This information mainly goes forward through the pipeline. */
+struct InterStageStruct {
+ int size;
+ ThePipeline::DynInstPtr insts[ThePipeline::StageWidth];
+ bool squash;
+ bool branchMispredict;
+ bool branchTaken;
+ uint64_t mispredPC;
+ uint64_t nextPC;
+ InstSeqNum squashedSeqNum;
+ bool includeSquashInst;
+};
+
+/** Turn This into a Class */
+/** Struct that defines all backwards communication. */
+struct TimeStruct {
+ struct stageComm {
+ bool squash;
+ bool predIncorrect;
+ uint64_t branchAddr;
+
+ // @todo: Might want to package this kind of branch stuff into a single
+ // struct as it is used pretty frequently.
+ bool branchMispredict;
+ bool branchTaken;
+ uint64_t mispredPC;
+ uint64_t nextPC;
+
+ unsigned branchCount;
+
+ // Represents the instruction that has either been retired or
+ // squashed. Similar to having a single bus that broadcasts the
+ // retired or squashed sequence number.
+ InstSeqNum doneSeqNum;
+ InstSeqNum bdelayDoneSeqNum;
+ bool squashDelaySlot;
+
+ //Just in case we want to do a commit/squash on a cycle
+ //(necessary for multiple ROBs?)
+ bool commitInsts;
+ InstSeqNum squashSeqNum;
+
+ // Communication specifically to the IQ to tell the IQ that it can
+ // schedule a non-speculative instruction.
+ InstSeqNum nonSpecSeqNum;
+
+ bool uncached;
+ ThePipeline::DynInstPtr uncachedLoad;
+
+ bool interruptPending;
+ bool clearInterrupt;
+ };
+
+ stageComm stageInfo[ThePipeline::NumStages][ThePipeline::MaxThreads];
+
+ bool stageBlock[ThePipeline::NumStages][ThePipeline::MaxThreads];
+ bool stageUnblock[ThePipeline::NumStages][ThePipeline::MaxThreads];
+};
+
+#endif //__CPU_INORDER_COMM_HH__
diff --git a/src/cpu/inorder/cpu.cc b/src/cpu/inorder/cpu.cc
new file mode 100644
index 000000000..adbf645f4
--- /dev/null
+++ b/src/cpu/inorder/cpu.cc
@@ -0,0 +1,1322 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "arch/utility.hh"
+#include "cpu/exetrace.hh"
+#include "cpu/activity.hh"
+#include "cpu/simple_thread.hh"
+#include "cpu/thread_context.hh"
+#include "cpu/base.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/thread_context.hh"
+#include "cpu/inorder/thread_state.hh"
+#include "cpu/inorder/cpu.hh"
+#include "params/InOrderCPU.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/first_stage.hh"
+#include "cpu/inorder/resources/resource_list.hh"
+#include "cpu/inorder/resource_pool.hh"
+#include "mem/translating_port.hh"
+#include "sim/process.hh"
+//#include "sim/root.hh"
+#include "sim/stat_control.hh"
+#include <algorithm>
+
+using namespace std;
+using namespace TheISA;
+using namespace ThePipeline;
+
+InOrderCPU::TickEvent::TickEvent(InOrderCPU *c)
+ : Event(CPU_Tick_Pri), cpu(c)
+{ }
+
+
+void
+InOrderCPU::TickEvent::process()
+{
+ cpu->tick();
+}
+
+
+const char *
+InOrderCPU::TickEvent::description()
+{
+ return "InOrderCPU tick event";
+}
+
+InOrderCPU::CPUEvent::CPUEvent(InOrderCPU *_cpu, CPUEventType e_type,
+ Fault fault, unsigned _tid, unsigned _vpe)
+ : Event(CPU_Tick_Pri), cpu(_cpu)
+{
+ setEvent(e_type, fault, _tid, _vpe);
+}
+
+void
+InOrderCPU::CPUEvent::process()
+{
+ switch (cpuEventType)
+ {
+ case ActivateThread:
+ cpu->activateThread(tid);
+ break;
+
+ //@TODO: Consider Implementing "Suspend Thread" as Separate from Deallocate
+ case SuspendThread: // Suspend & Deallocate are same for now.
+ //cpu->suspendThread(tid);
+ //break;
+ case DeallocateThread:
+ cpu->deallocateThread(tid);
+ break;
+
+ case EnableVPEs:
+ cpu->enableVPEs(vpe);
+ break;
+
+ case DisableVPEs:
+ cpu->disableVPEs(tid, vpe);
+ break;
+
+ case EnableThreads:
+ cpu->enableThreads(vpe);
+ break;
+
+ case DisableThreads:
+ cpu->disableThreads(tid, vpe);
+ break;
+
+ case Trap:
+ cpu->trapCPU(fault, tid);
+ break;
+
+ default:
+ fatal("Unrecognized Event Type %d", cpuEventType);
+ }
+
+ cpu->cpuEventRemoveList.push(this);
+}
+
+const char *
+InOrderCPU::CPUEvent::description()
+{
+ return "InOrderCPU event";
+}
+
+void
+InOrderCPU::CPUEvent::scheduleEvent(int delay)
+{
+ if (squashed())
+ mainEventQueue.reschedule(this,curTick + cpu->ticks(delay));
+ else if (!scheduled())
+ mainEventQueue.schedule(this,curTick + cpu->ticks(delay));
+}
+
+void
+InOrderCPU::CPUEvent::unscheduleEvent()
+{
+ if (scheduled())
+ squash();
+}
+
+InOrderCPU::InOrderCPU(Params *params)
+ : BaseCPU(params),
+ cpu_id(params->cpu_id),
+ tickEvent(this),
+ miscRegFile(this),
+ timeBuffer(2 , 2),
+ removeInstsThisCycle(false),
+ activityRec(params->name, NumStages, 10, params->activity),
+ switchCount(0),
+ deferRegistration(false/*params->deferRegistration*/),
+ stageTracing(params->stageTracing),
+ numThreads(params->numThreads),
+ numVirtProcs(1)
+{
+ cpu_params = params;
+
+ resPool = new ResourcePool(this, params);
+// resPool->init();
+
+ coreType = "default"; // eventually get this from params
+
+ _status = Idle;
+
+ // Resize for Multithreading CPUs
+ thread.resize(numThreads);
+
+ int active_threads = params->workload.size();
+
+ if (active_threads > MaxThreads) {
+ panic("Workload Size too large. Increase the 'MaxThreads'"
+ "in your InOrder implementation or "
+ "edit your workload size.");
+ }
+
+ // Bind the fetch & data ports from the resource pool.
+ fetchPortIdx = resPool->getPortIdx(params->fetchMemPort);
+ if (fetchPortIdx == 0) {
+ warn("Unable to find port to fetch instructions from.\n");
+ }
+
+ dataPortIdx = resPool->getPortIdx(params->dataMemPort);
+ if (dataPortIdx == 0) {
+ warn("Unable to find port for data.\n");
+ }
+
+
+ /* Use this port to for syscall emulation writes to memory. */
+ //Port *mem_port = NULL;
+ //TranslatingPort *trans_port = NULL;
+
+ for (int i = 0; i < numThreads; ++i) {
+ if (i < params->workload.size()) {
+ DPRINTF(InOrderCPU, "Workload[%i] process is %#x",
+ i, this->thread[i]);
+ this->thread[i] = new Thread(this, i, params->workload[i],
+ i);
+
+ // Start thread's off in "Suspended" status
+ this->thread[i]->setStatus(ThreadContext::Suspended);
+
+ } else {
+ //Allocate Empty thread so M5 can use later
+ //when scheduling threads to CPU
+ Process* dummy_proc = params->workload[0]; //LiveProcess::createDummy();
+ this->thread[i] = new Thread(this, i, dummy_proc, i);
+
+ // Set Up Syscall Emulation Port
+ //this->thread[i]->setMemPort(trans_port);
+ }
+
+ // Setup the TC that will serve as the interface to the threads/CPU.
+ InOrderThreadContext *tc = new InOrderThreadContext;
+ tc->cpu = this;
+ tc->thread = this->thread[i];
+
+ // Give the thread the TC.
+ thread[i]->tc = tc;
+ thread[i]->setFuncExeInst(0);
+ globalSeqNum[i] = 1;
+
+ // Add the TC to the CPU's list of TC's.
+ this->threadContexts.push_back(tc);
+ }
+
+ // Initialize TimeBuffer Stage Queues
+ // For now just have these time buffers be pretty big.
+ // @note: This could be statically allocated but changes
+ // would have to be made to the standard time buffer class.
+ for (int stNum=0; stNum < NumStages - 1; stNum++) {
+ stageQueue[stNum] = new StageQueue(NumStages, NumStages);
+ }
+
+
+ // Set Up Pipeline Stages
+ for (int stNum=0; stNum < NumStages; stNum++) {
+ if (stNum == 0)
+ pipelineStage[stNum] = new FirstStage(params, stNum);
+ else
+ pipelineStage[stNum] = new PipelineStage(params, stNum);
+
+ pipelineStage[stNum]->setCPU(this);
+ pipelineStage[stNum]->setActiveThreads(&activeThreads);
+ pipelineStage[stNum]->setTimeBuffer(&timeBuffer);
+
+ // Take Care of 1st/Nth stages
+ if (stNum > 0)
+ pipelineStage[stNum]->setPrevStageQueue(stageQueue[stNum - 1]);
+ if (stNum < NumStages - 2)
+ pipelineStage[stNum]->setNextStageQueue(stageQueue[stNum + 1]);
+ }
+
+ // Initialize thread specific variables
+ for (int tid=0; tid < numThreads; tid++) {
+ archRegDepMap[tid].setCPU(this);
+
+ nonSpecInstActive[tid] = false;
+ nonSpecSeqNum[tid] = 0;
+
+ squashSeqNum[tid] = MaxAddr;
+ lastSquashCycle[tid] = 0;
+
+ intRegFile[tid].clear();
+ floatRegFile[tid].clear();
+ }
+
+ // Update miscRegFile if necessary
+ if (numThreads > 1) {
+ miscRegFile.expandForMultithreading(numThreads, numVirtProcs);
+ }
+
+ miscRegFile.clear();
+
+ lastRunningCycle = curTick;
+ contextSwitch = false;
+
+ // Define dummy instructions and resource requests to be used.
+ DynInstPtr dummyBufferInst = new InOrderDynInst(this, NULL, 0, 0);
+ dummyReq = new ResourceRequest(NULL, NULL, 0, 0, 0, 0);
+
+ // Reset CPU to reset state.
+#if FULL_SYSTEM
+ Fault resetFault = new ResetFault();
+ resetFault->invoke(tcBase());
+#else
+ reset();
+#endif
+
+ // Schedule First Tick Event, CPU will reschedule itself from here on out.
+ scheduleTickEvent(0);
+}
+
+
+void
+InOrderCPU::regStats()
+{
+ /* Register the Resource Pool's stats here.*/
+ resPool->regStats();
+
+ /* Register any of the InOrderCPU's stats here.*/
+ timesIdled
+ .name(name() + ".timesIdled")
+ .desc("Number of times that the entire CPU went into an idle state and"
+ " unscheduled itself")
+ .prereq(timesIdled);
+
+ idleCycles
+ .name(name() + ".idleCycles")
+ .desc("Total number of cycles that the CPU has spent unscheduled due "
+ "to idling")
+ .prereq(idleCycles);
+
+ threadCycles
+ .init(numThreads)
+ .name(name() + ".threadCycles")
+ .desc("Total Number of Cycles A Thread Was Active in CPU (Per-Thread)");
+
+ smtCycles
+ .name(name() + ".smtCycles")
+ .desc("Total number of cycles that the CPU was simultaneous multithreading.(SMT)");
+
+ committedInsts
+ .init(numThreads)
+ .name(name() + ".committedInsts")
+ .desc("Number of Instructions Simulated (Per-Thread)");
+
+ smtCommittedInsts
+ .init(numThreads)
+ .name(name() + ".smtCommittedInsts")
+ .desc("Number of SMT Instructions Simulated (Per-Thread)");
+
+ totalCommittedInsts
+ .name(name() + ".committedInsts_total")
+ .desc("Number of Instructions Simulated (Total)");
+
+ cpi
+ .name(name() + ".cpi")
+ .desc("CPI: Cycles Per Instruction (Per-Thread)")
+ .precision(6);
+ cpi = threadCycles / committedInsts;
+
+ smtCpi
+ .name(name() + ".smt_cpi")
+ .desc("CPI: Total SMT-CPI")
+ .precision(6);
+ smtCpi = smtCycles / smtCommittedInsts;
+
+ totalCpi
+ .name(name() + ".cpi_total")
+ .desc("CPI: Total CPI of All Threads")
+ .precision(6);
+ totalCpi = simTicks / totalCommittedInsts;
+
+ ipc
+ .name(name() + ".ipc")
+ .desc("IPC: Instructions Per Cycle (Per-Thread)")
+ .precision(6);
+ ipc = committedInsts / threadCycles;
+
+ smtIpc
+ .name(name() + ".smt_ipc")
+ .desc("IPC: Total SMT-IPC")
+ .precision(6);
+ smtIpc = smtCommittedInsts / smtCycles;
+
+ totalIpc
+ .name(name() + ".ipc_total")
+ .desc("IPC: Total IPC of All Threads")
+ .precision(6);
+ totalIpc = totalCommittedInsts / simTicks;
+
+ BaseCPU::regStats();
+}
+
+
+void
+InOrderCPU::tick()
+{
+ DPRINTF(InOrderCPU, "\n\nInOrderCPU: Ticking main, InOrderCPU.\n");
+
+ ++numCycles;
+
+ //Tick each of the stages
+ for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
+ pipelineStage[stNum]->tick();
+ }
+
+ // Now advance the time buffers one tick
+ timeBuffer.advance();
+ for (int sqNum=0; sqNum < NumStages - 1; sqNum++) {
+ stageQueue[sqNum]->advance();
+ }
+ activityRec.advance();
+
+ // Any squashed requests, events, or insts then remove them now
+ cleanUpRemovedReqs();
+ cleanUpRemovedEvents();
+ cleanUpRemovedInsts();
+
+ // Re-schedule CPU for this cycle
+ if (!tickEvent.scheduled()) {
+ if (_status == SwitchedOut) {
+ // increment stat
+ lastRunningCycle = curTick;
+ } else if (!activityRec.active()) {
+ DPRINTF(InOrderCPU, "sleeping CPU.\n");
+ lastRunningCycle = curTick;
+ timesIdled++;
+ } else {
+ //Tick next_tick = curTick + cycles(1);
+ //tickEvent.schedule(next_tick);
+ mainEventQueue.schedule(&tickEvent, nextCycle(curTick + 1));
+ DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n", nextCycle() + curTick);
+ }
+ }
+
+ tickThreadStats();
+ updateThreadPriority();
+}
+
+
+void
+InOrderCPU::init()
+{
+ if (!deferRegistration) {
+ registerThreadContexts();
+ }
+
+ // Set inSyscall so that the CPU doesn't squash when initially
+ // setting up registers.
+ for (int i = 0; i < number_of_threads; ++i)
+ thread[i]->inSyscall = true;
+
+ for (int tid=0; tid < number_of_threads; tid++) {
+
+ ThreadContext *src_tc = thread[tid]->getTC();
+
+ // Threads start in the Suspended State
+ if (src_tc->status() != ThreadContext::Suspended) {
+ continue;
+ }
+
+ }
+
+ // Clear inSyscall.
+ for (int i = 0; i < number_of_threads; ++i)
+ thread[i]->inSyscall = false;
+
+ // Call Initializiation Routine for Resource Pool
+ resPool->init();
+}
+
+void
+InOrderCPU::readFunctional(Addr addr, uint32_t &buffer)
+{
+ tcBase()->getMemPort()->readBlob(addr, (uint8_t*)&buffer, sizeof(uint32_t));
+ buffer = gtoh(buffer);
+}
+
+void
+InOrderCPU::reset()
+{
+ miscRegFile.reset(coreType, numThreads, numVirtProcs, dynamic_cast<BaseCPU*>(this));
+}
+
+Port*
+InOrderCPU::getPort(const std::string &if_name, int idx)
+{
+ return resPool->getPort(if_name, idx);
+}
+
+void
+InOrderCPU::trap(Fault fault, unsigned tid, int delay)
+{
+ scheduleCpuEvent(Trap, fault, tid, 0/*vpe*/, delay);
+}
+
+void
+InOrderCPU::trapCPU(Fault fault, unsigned tid)
+{
+ fault->invoke(tcBase(tid));
+}
+
+void
+InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
+ unsigned tid, unsigned vpe, unsigned delay)
+{
+ CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, vpe);
+
+ if (delay >= 0) {
+ DPRINTF(InOrderCPU, "Scheduling CPU Event Type #%i for cycle %i.\n",
+ c_event, curTick + delay);
+ mainEventQueue.schedule(cpu_event,curTick + delay);
+ } else {
+ cpu_event->process();
+ cpuEventRemoveList.push(cpu_event);
+ }
+
+ // Broadcast event to the Resource Pool
+ DynInstPtr dummy_inst = new InOrderDynInst(this, NULL, getNextEventNum(), tid);
+ resPool->scheduleEvent(c_event, dummy_inst, 0, 0, tid);
+}
+
+inline bool
+InOrderCPU::isThreadActive(unsigned tid)
+{
+ list<unsigned>::iterator isActive = std::find(
+ activeThreads.begin(), activeThreads.end(), tid);
+
+ return (isActive != activeThreads.end());
+}
+
+
+void
+InOrderCPU::activateThread(unsigned tid)
+{
+ if (!isThreadActive(tid)) {
+ DPRINTF(InOrderCPU, "Adding Thread %i to active threads list in CPU.\n",
+ tid);
+ activeThreads.push_back(tid);
+
+ wakeCPU();
+ }
+}
+
+void
+InOrderCPU::deactivateThread(unsigned tid)
+{
+ DPRINTF(InOrderCPU, "[tid:%i]: Calling deactivate thread.\n", tid);
+
+ if (isThreadActive(tid)) {
+ DPRINTF(InOrderCPU,"[tid:%i]: Removing from active threads list\n",
+ tid);
+ list<unsigned>::iterator thread_it = std::find(activeThreads.begin(),
+ activeThreads.end(), tid);
+
+ removePipelineStalls(*thread_it);
+
+ //@TODO: change stage status' to Idle?
+
+ activeThreads.erase(thread_it);
+ }
+}
+
+void
+InOrderCPU::removePipelineStalls(unsigned tid)
+{
+ DPRINTF(InOrderCPU,"[tid:%i]: Removing all pipeline stalls\n",
+ tid);
+
+ for (int stNum = 0; stNum < NumStages ; stNum++) {
+ pipelineStage[stNum]->removeStalls(tid);
+ }
+
+}
+bool
+InOrderCPU::isThreadInCPU(unsigned tid)
+{
+ list<unsigned>::iterator isCurrent = std::find(
+ currentThreads.begin(), currentThreads.end(), tid);
+
+ return (isCurrent != currentThreads.end());
+}
+
+void
+InOrderCPU::addToCurrentThreads(unsigned tid)
+{
+ if (!isThreadInCPU(tid)) {
+ DPRINTF(InOrderCPU, "Adding Thread %i to current threads list in CPU.\n",
+ tid);
+ currentThreads.push_back(tid);
+ }
+}
+
+void
+InOrderCPU::removeFromCurrentThreads(unsigned tid)
+{
+ if (isThreadInCPU(tid)) {
+ DPRINTF(InOrderCPU, "Adding Thread %i to current threads list in CPU.\n",
+ tid);
+ list<unsigned>::iterator isCurrent = std::find(
+ currentThreads.begin(), currentThreads.end(), tid);
+ currentThreads.erase(isCurrent);
+ }
+}
+
+bool
+InOrderCPU::isThreadSuspended(unsigned tid)
+{
+ list<unsigned>::iterator isSuspended = std::find(
+ suspendedThreads.begin(), suspendedThreads.end(), tid);
+
+ return (isSuspended!= suspendedThreads.end());
+}
+
+void
+InOrderCPU::enableVirtProcElement(unsigned vpe)
+{
+ DPRINTF(InOrderCPU, "[vpe:%i]: Scheduling "
+ "Enabling of concurrent virtual processor execution",
+ vpe);
+
+ scheduleCpuEvent(EnableVPEs, NoFault, 0/*tid*/, vpe);
+}
+
+void
+InOrderCPU::enableVPEs(unsigned vpe)
+{
+ DPRINTF(InOrderCPU, "[vpe:%i]: Enabling Concurrent Execution "
+ "virtual processors %i", vpe);
+
+ list<unsigned>::iterator thread_it = currentThreads.begin();
+
+ while (thread_it != currentThreads.end()) {
+ if (!isThreadSuspended(*thread_it)) {
+ activateThread(*thread_it);
+ }
+ thread_it++;
+ }
+}
+
+void
+InOrderCPU::disableVirtProcElement(unsigned tid, unsigned vpe)
+{
+ DPRINTF(InOrderCPU, "[vpe:%i]: Scheduling "
+ "Disabling of concurrent virtual processor execution",
+ vpe);
+
+ scheduleCpuEvent(DisableVPEs, NoFault, 0/*tid*/, vpe);
+}
+
+void
+InOrderCPU::disableVPEs(unsigned tid, unsigned vpe)
+{
+ DPRINTF(InOrderCPU, "[vpe:%i]: Disabling Concurrent Execution of "
+ "virtual processors %i", vpe);
+
+ unsigned base_vpe = TheISA::getVirtProcNum(tcBase(tid));
+
+ list<unsigned>::iterator thread_it = activeThreads.begin();
+
+ std::vector<list<unsigned>::iterator> removeList;
+
+ while (thread_it != activeThreads.end()) {
+ if (base_vpe != vpe) {
+ removeList.push_back(thread_it);
+ }
+ thread_it++;
+ }
+
+ for (int i = 0; i < removeList.size(); i++) {
+ activeThreads.erase(removeList[i]);
+ }
+}
+
+void
+InOrderCPU::enableMultiThreading(unsigned vpe)
+{
+ // Schedule event to take place at end of cycle
+ DPRINTF(InOrderCPU, "[vpe:%i]: Scheduling Enable Multithreading on "
+ "virtual processor %i", vpe);
+
+ scheduleCpuEvent(EnableThreads, NoFault, 0/*tid*/, vpe);
+}
+
+void
+InOrderCPU::enableThreads(unsigned vpe)
+{
+ DPRINTF(InOrderCPU, "[vpe:%i]: Enabling Multithreading on "
+ "virtual processor %i", vpe);
+
+ list<unsigned>::iterator thread_it = currentThreads.begin();
+
+ while (thread_it != currentThreads.end()) {
+ if (TheISA::getVirtProcNum(tcBase(*thread_it)) == vpe) {
+ if (!isThreadSuspended(*thread_it)) {
+ activateThread(*thread_it);
+ }
+ }
+ thread_it++;
+ }
+}
+void
+InOrderCPU::disableMultiThreading(unsigned tid, unsigned vpe)
+{
+ // Schedule event to take place at end of cycle
+ DPRINTF(InOrderCPU, "[tid:%i]: Scheduling Disable Multithreading on "
+ "virtual processor %i", tid, vpe);
+
+ scheduleCpuEvent(DisableThreads, NoFault, tid, vpe);
+}
+
+void
+InOrderCPU::disableThreads(unsigned tid, unsigned vpe)
+{
+ DPRINTF(InOrderCPU, "[tid:%i]: Disabling Multithreading on "
+ "virtual processor %i", tid, vpe);
+
+ list<unsigned>::iterator thread_it = activeThreads.begin();
+
+ std::vector<list<unsigned>::iterator> removeList;
+
+ while (thread_it != activeThreads.end()) {
+ if (TheISA::getVirtProcNum(tcBase(*thread_it)) == vpe) {
+ removeList.push_back(thread_it);
+ }
+ thread_it++;
+ }
+
+ for (int i = 0; i < removeList.size(); i++) {
+ activeThreads.erase(removeList[i]);
+ }
+}
+
+void
+InOrderCPU::updateThreadPriority()
+{
+ if (activeThreads.size() > 1)
+ {
+ //DEFAULT TO ROUND ROBIN SCHEME
+ //e.g. Move highest priority to end of thread list
+ list<unsigned>::iterator list_begin = activeThreads.begin();
+ list<unsigned>::iterator list_end = activeThreads.end();
+
+ unsigned high_thread = *list_begin;
+
+ activeThreads.erase(list_begin);
+
+ activeThreads.push_back(high_thread);
+ }
+}
+
+inline void
+InOrderCPU::tickThreadStats()
+{
+ /** Keep track of cycles that each thread is active */
+ list<unsigned>::iterator thread_it = activeThreads.begin();
+ while (thread_it != activeThreads.end()) {
+ threadCycles[*thread_it]++;
+ thread_it++;
+ }
+
+ // Keep track of cycles where SMT is active
+ if (activeThreads.size() > 1) {
+ smtCycles++;
+ }
+}
+
+void
+InOrderCPU::activateContext(unsigned tid, int delay)
+{
+ DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid);
+
+ scheduleCpuEvent(ActivateThread, NoFault, tid, 0/*vpe*/, delay);
+
+ // Be sure to signal that there's some activity so the CPU doesn't
+ // deschedule itself.
+ activityRec.activity();
+
+ _status = Running;
+}
+
+
+void
+InOrderCPU::suspendContext(unsigned tid, int delay)
+{
+ scheduleCpuEvent(SuspendThread, NoFault, tid, 0/*vpe*/, delay);
+ //_status = Idle;
+}
+
+void
+InOrderCPU::suspendThread(unsigned tid)
+{
+ DPRINTF(InOrderCPU,"[tid: %i]: Suspended ...\n", tid);
+ deactivateThread(tid);
+}
+
+void
+InOrderCPU::deallocateContext(unsigned tid, int delay)
+{
+ scheduleCpuEvent(DeallocateThread, NoFault, tid, 0/*vpe*/, delay);
+}
+
+void
+InOrderCPU::deallocateThread(unsigned tid)
+{
+ DPRINTF(InOrderCPU,"[tid:%i]: Deallocating ...", tid);
+
+ //removeThread(tid);
+
+ removeFromCurrentThreads(tid);
+
+ deactivateThread(tid);
+
+ squashThreadInPipeline(tid);
+}
+
+void
+InOrderCPU::squashThreadInPipeline(unsigned tid)
+{
+ //Squash all instructions in each stage
+ for (int stNum=NumStages - 1; stNum >= 0 ; stNum--) {
+ pipelineStage[stNum]->squash(0 /*seq_num*/, tid);
+ }
+}
+
+void
+InOrderCPU::haltContext(unsigned tid, int delay)
+{
+ DPRINTF(InOrderCPU, "[tid:%i]: Halt context called.\n", tid);
+
+ // Halt is same thing as deallocate for now
+ // @TODO: Differentiate between halt & deallocate in the CPU
+ // model
+ deallocateContext(tid, delay);
+}
+
+void
+InOrderCPU::insertThread(unsigned tid)
+{
+ panic("Unimplemented Function\n.");
+}
+
+void
+InOrderCPU::removeThread(unsigned tid)
+{
+ DPRINTF(InOrderCPU, "Removing Thread %i from CPU.\n", tid);
+
+ /** Broadcast to CPU resources*/
+}
+
+void
+InOrderCPU::activateWhenReady(int tid)
+{
+ panic("Unimplemented Function\n.");
+}
+
+
+void
+InOrderCPU::signalSwitched()
+{
+ panic("Unimplemented Function\n.");
+}
+
+
+void
+InOrderCPU::takeOverFrom(BaseCPU *oldCPU)
+{
+ panic("Take Over From Another CPU\n.");
+}
+
+uint64_t
+InOrderCPU::readPC(unsigned tid)
+{
+ return PC[tid];
+}
+
+
+void
+InOrderCPU::setPC(Addr new_PC, unsigned tid)
+{
+ PC[tid] = new_PC;
+}
+
+
+uint64_t
+InOrderCPU::readNextPC(unsigned tid)
+{
+ return nextPC[tid];
+}
+
+
+void
+InOrderCPU::setNextPC(uint64_t new_NPC, unsigned tid)
+{
+ nextPC[tid] = new_NPC;
+}
+
+
+uint64_t
+InOrderCPU::readNextNPC(unsigned tid)
+{
+ return nextNPC[tid];
+}
+
+
+void
+InOrderCPU::setNextNPC(uint64_t new_NNPC, unsigned tid)
+{
+ nextNPC[tid] = new_NNPC;
+}
+
+uint64_t
+InOrderCPU::readIntReg(int reg_idx, unsigned tid)
+{
+ return intRegFile[tid].readReg(reg_idx);
+}
+
+FloatReg
+InOrderCPU::readFloatReg(int reg_idx, unsigned tid, int width)
+{
+
+ return floatRegFile[tid].readReg(reg_idx, width);
+}
+
+FloatRegBits
+InOrderCPU::readFloatRegBits(int reg_idx, unsigned tid, int width)
+{;
+ return floatRegFile[tid].readRegBits(reg_idx, width);
+}
+
+void
+InOrderCPU::setIntReg(int reg_idx, uint64_t val, unsigned tid)
+{
+ intRegFile[tid].setReg(reg_idx, val);
+}
+
+
+void
+InOrderCPU::setFloatReg(int reg_idx, FloatReg val, unsigned tid, int width)
+{
+ floatRegFile[tid].setReg(reg_idx, val, width);
+}
+
+
+void
+InOrderCPU::setFloatRegBits(int reg_idx, FloatRegBits val, unsigned tid, int width)
+{
+ floatRegFile[tid].setRegBits(reg_idx, val, width);
+}
+
+uint64_t
+InOrderCPU::readRegOtherThread(unsigned reg_idx, unsigned tid)
+{
+ // If Default value is set, then retrieve target thread
+ if (tid == -1) {
+ tid = TheISA::getTargetThread(tcBase(tid));
+ }
+
+ if (reg_idx < FP_Base_DepTag) { // Integer Register File
+ return readIntReg(reg_idx, tid);
+ } else if (reg_idx < Ctrl_Base_DepTag) { // Float Register File
+ reg_idx -= FP_Base_DepTag;
+ return readFloatRegBits(reg_idx, tid);
+ } else {
+ reg_idx -= Ctrl_Base_DepTag;
+ return readMiscReg(reg_idx, tid); // Misc. Register File
+ }
+}
+void
+InOrderCPU::setRegOtherThread(unsigned reg_idx, const MiscReg &val, unsigned tid)
+{
+ // If Default value is set, then retrieve target thread
+ if (tid == -1) {
+ tid = TheISA::getTargetThread(tcBase(tid));
+ }
+
+ if (reg_idx < FP_Base_DepTag) { // Integer Register File
+ setIntReg(reg_idx, val, tid);
+ } else if (reg_idx < Ctrl_Base_DepTag) { // Float Register File
+ reg_idx -= FP_Base_DepTag;
+ setFloatRegBits(reg_idx, val, tid);
+ } else {
+ reg_idx -= Ctrl_Base_DepTag;
+ setMiscReg(reg_idx, val, tid); // Misc. Register File
+ }
+}
+
+MiscReg
+InOrderCPU::readMiscRegNoEffect(int misc_reg, unsigned tid)
+{
+ return miscRegFile.readRegNoEffect(misc_reg, tid);
+}
+
+MiscReg
+InOrderCPU::readMiscReg(int misc_reg, unsigned tid)
+{
+ return miscRegFile.readReg(misc_reg, tcBase(tid), tid);
+}
+
+void
+InOrderCPU::setMiscRegNoEffect(int misc_reg, const MiscReg &val, unsigned tid)
+{
+ miscRegFile.setRegNoEffect(misc_reg, val, tid);
+}
+
+void
+InOrderCPU::setMiscReg(int misc_reg, const MiscReg &val, unsigned tid)
+{
+ miscRegFile.setReg(misc_reg, val, tcBase(tid), tid);
+}
+
+
+InOrderCPU::ListIt
+InOrderCPU::addInst(DynInstPtr &inst)
+{
+ int tid = inst->readTid();
+
+ instList[tid].push_back(inst);
+
+ return --(instList[tid].end());
+}
+
+void
+InOrderCPU::instDone(DynInstPtr inst, unsigned tid)
+{
+ // Set the CPU's PCs - This contributes to the precise state of the CPU which can be used
+ // when restoring a thread to the CPU after a fork or after an exception
+ // @TODO: Set-Up Grad-Info/Committed-Info to let ThreadState know if it's a branch or not
+ setPC(inst->readPC(), tid);
+ setNextPC(inst->readNextPC(), tid);
+ setNextNPC(inst->readNextNPC(), tid);
+
+ // Finalize Trace Data For Instruction
+ if (inst->traceData) {
+ //inst->traceData->setCycle(curTick);
+ inst->traceData->setFetchSeq(inst->seqNum);
+ //inst->traceData->setCPSeq(cpu->tcBase(tid)->numInst);
+ inst->traceData->dump();
+ delete inst->traceData;
+ inst->traceData = NULL;
+ }
+
+ // Set Last Graduated Instruction In Thread State
+ //thread[tid]->lastGradInst = inst;
+
+ // Increment thread-state's instruction count
+ thread[tid]->numInst++;
+
+ // Increment thread-state's instruction stats
+ thread[tid]->numInsts++;
+
+ // Count committed insts per thread stats
+ committedInsts[tid]++;
+
+ // Count total insts committed stat
+ totalCommittedInsts++;
+
+ // Count SMT-committed insts per thread stat
+ if (numActiveThreads() > 1) {
+ smtCommittedInsts[tid]++;
+ }
+
+ // Check for instruction-count-based events.
+ comInstEventQueue[tid]->serviceEvents(thread[tid]->numInst);
+
+ // Broadcast to other resources an instruction
+ // has been completed
+ resPool->scheduleEvent((CPUEventType)ResourcePool::InstGraduated, inst, tid);
+
+ // Finally, remove instruction from CPU
+ removeInst(inst);
+}
+
+void
+InOrderCPU::addToRemoveList(DynInstPtr &inst)
+{
+ removeInstsThisCycle = true;
+
+ removeList.push(inst->getInstListIt());
+}
+
+void
+InOrderCPU::removeInst(DynInstPtr &inst)
+{
+ DPRINTF(InOrderCPU, "Removing graduated instruction [tid:%i] PC %#x "
+ "[sn:%lli]\n",
+ inst->threadNumber, inst->readPC(), inst->seqNum);
+
+ removeInstsThisCycle = true;
+
+ // Remove the instruction.
+ removeList.push(inst->getInstListIt());
+}
+
+void
+InOrderCPU::removeInstsUntil(const InstSeqNum &seq_num,
+ unsigned tid)
+{
+ //assert(!instList[tid].empty());
+
+ removeInstsThisCycle = true;
+
+ ListIt inst_iter = instList[tid].end();
+
+ inst_iter--;
+
+ DPRINTF(InOrderCPU, "Deleting instructions from CPU instruction "
+ "list that are from [tid:%i] and above [sn:%lli] (end=%lli).\n",
+ tid, seq_num, (*inst_iter)->seqNum);
+
+ while ((*inst_iter)->seqNum > seq_num) {
+
+ bool break_loop = (inst_iter == instList[tid].begin());
+
+ squashInstIt(inst_iter, tid);
+
+ inst_iter--;
+
+ if (break_loop)
+ break;
+ }
+}
+
+
+inline void
+InOrderCPU::squashInstIt(const ListIt &instIt, const unsigned &tid)
+{
+ if ((*instIt)->threadNumber == tid) {
+ DPRINTF(InOrderCPU, "Squashing instruction, "
+ "[tid:%i] [sn:%lli] PC %#x\n",
+ (*instIt)->threadNumber,
+ (*instIt)->seqNum,
+ (*instIt)->readPC());
+
+ (*instIt)->setSquashed();
+
+ removeList.push(instIt);
+ }
+}
+
+
+void
+InOrderCPU::cleanUpRemovedInsts()
+{
+ while (!removeList.empty()) {
+ DPRINTF(InOrderCPU, "Removing instruction, "
+ "[tid:%i] [sn:%lli] PC %#x\n",
+ (*removeList.front())->threadNumber,
+ (*removeList.front())->seqNum,
+ (*removeList.front())->readPC());
+
+ DynInstPtr inst = *removeList.front();
+ int tid = inst->threadNumber;
+
+ // Make Sure Resource Schedule Is Emptied Out
+ ThePipeline::ResSchedule *inst_sched = &inst->resSched;
+ while (!inst_sched->empty()) {
+ ThePipeline::ScheduleEntry* sch_entry = inst_sched->top();
+ inst_sched->pop();
+ delete sch_entry;
+ }
+
+ // Remove From Register Dependency Map, If Necessary
+ archRegDepMap[(*removeList.front())->threadNumber].
+ remove((*removeList.front()));
+
+
+ // Clear if Non-Speculative
+ if (inst->staticInst &&
+ inst->seqNum == nonSpecSeqNum[tid] &&
+ nonSpecInstActive[tid] == true) {
+ nonSpecInstActive[tid] = false;
+ }
+
+ instList[tid].erase(removeList.front());
+
+ removeList.pop();
+
+ DPRINTF(RefCount, "pop from remove list: [sn:%i]: Refcount = %i.\n",
+ inst->seqNum,
+ 0/*inst->curCount()*/);
+
+ }
+
+ removeInstsThisCycle = false;
+}
+
+void
+InOrderCPU::cleanUpRemovedReqs()
+{
+ while (!reqRemoveList.empty()) {
+ ResourceRequest *res_req = reqRemoveList.front();
+
+ DPRINTF(RefCount, "[tid:%i]: Removing Request, "
+ "[sn:%lli] [slot:%i] [stage_num:%i] [res:%s] [refcount:%i].\n",
+ res_req->inst->threadNumber,
+ res_req->inst->seqNum,
+ res_req->getSlot(),
+ res_req->getStageNum(),
+ res_req->res->name(),
+ 0/*res_req->inst->curCount()*/);
+
+ reqRemoveList.pop();
+
+ delete res_req;
+
+ DPRINTF(RefCount, "after remove request: [sn:%i]: Refcount = %i.\n",
+ res_req->inst->seqNum,
+ 0/*res_req->inst->curCount()*/);
+ }
+}
+
+void
+InOrderCPU::cleanUpRemovedEvents()
+{
+ while (!cpuEventRemoveList.empty()) {
+ Event *cpu_event = cpuEventRemoveList.front();
+ cpuEventRemoveList.pop();
+ delete cpu_event;
+ }
+}
+
+/*
+
+void
+InOrderCPU::removeAllInsts()
+{
+ instList.clear();
+}
+*/
+
+void
+InOrderCPU::dumpInsts()
+{
+ int num = 0;
+
+ ListIt inst_list_it = instList[0].begin();
+
+ cprintf("Dumping Instruction List\n");
+
+ while (inst_list_it != instList[0].end()) {
+ cprintf("Instruction:%i\nPC:%#x\n[tid:%i]\n[sn:%lli]\nIssued:%i\n"
+ "Squashed:%i\n\n",
+ num, (*inst_list_it)->readPC(), (*inst_list_it)->threadNumber,
+ (*inst_list_it)->seqNum, (*inst_list_it)->isIssued(),
+ (*inst_list_it)->isSquashed());
+ inst_list_it++;
+ ++num;
+ }
+}
+/*
+
+void
+InOrderCPU::wakeDependents(DynInstPtr &inst)
+{
+ iew.wakeDependents(inst);
+}
+*/
+
+void
+InOrderCPU::wakeCPU()
+{
+ if (/*activityRec.active() || */tickEvent.scheduled()) {
+ DPRINTF(Activity, "CPU already running.\n");
+ return;
+ }
+
+ DPRINTF(Activity, "Waking up CPU\n");
+
+ //idleCycles += (curTick - 1) - lastRunningCycle;
+
+ mainEventQueue.schedule(&tickEvent, curTick);
+}
+
+void
+InOrderCPU::syscall(int64_t callnum, int tid)
+{
+ DPRINTF(InOrderCPU, "[tid:%i] Executing syscall().\n\n", tid);
+
+ DPRINTF(Activity,"Activity: syscall() called.\n");
+
+ // Temporarily increase this by one to account for the syscall
+ // instruction.
+ ++(this->thread[tid]->funcExeInst);
+
+ // Execute the actual syscall.
+ this->thread[tid]->syscall(callnum);
+
+ // Decrease funcExeInst by one as the normal commit will handle
+ // incrementing it.
+ --(this->thread[tid]->funcExeInst);
+
+ // Clear Non-Speculative Block Variable
+ nonSpecInstActive[tid] = false;
+}
+
+IntReg
+InOrderCPU::getSyscallArg(int idx, int tid)
+{
+ return readIntReg(ArgumentReg0 + idx, tid);
+}
+
+void
+InOrderCPU::setSyscallArg(int idx, IntReg val, int tid)
+{
+ setIntReg(ArgumentReg0 + idx, val, tid);
+}
+
+void
+InOrderCPU::setSyscallReturn(SyscallReturn return_value, int tid)
+{
+ if (return_value.successful()) {
+ // no error
+ setIntReg(SyscallSuccessReg, 0, tid);
+ setIntReg(ReturnValueReg, return_value.value(), tid);
+ } else {
+ // got an error, return details
+ setIntReg(SyscallSuccessReg, (IntReg) -1, tid);
+ setIntReg(ReturnValueReg, -return_value.value(), tid);
+ }
+}
+
+Fault
+InOrderCPU::read(DynInstPtr inst)
+{
+ Resource *mem_res = resPool->getResource(dataPortIdx);
+ return mem_res->doDataAccess(inst);
+}
+
+Fault
+InOrderCPU::write(DynInstPtr inst)
+{
+ Resource *mem_res = resPool->getResource(dataPortIdx);
+ return mem_res->doDataAccess(inst);
+}
diff --git a/src/cpu/inorder/cpu.hh b/src/cpu/inorder/cpu.hh
new file mode 100644
index 000000000..cd1eb6f92
--- /dev/null
+++ b/src/cpu/inorder/cpu.hh
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_CPU_HH__
+#define __CPU_INORDER_CPU_HH__
+
+#include <iostream>
+#include <list>
+#include <queue>
+#include <set>
+#include <vector>
+
+#include "arch/isa_traits.hh"
+#include "base/statistics.hh"
+#include "base/timebuf.hh"
+#include "config/full_system.hh"
+#include "cpu/activity.hh"
+#include "cpu/base.hh"
+#include "cpu/simple_thread.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/pipeline_stage.hh"
+#include "cpu/inorder/thread_state.hh"
+#include "cpu/inorder/reg_dep_map.hh"
+#include "cpu/o3/dep_graph.hh"
+#include "cpu/o3/rename_map.hh"
+#include "mem/packet.hh"
+#include "mem/port.hh"
+#include "mem/request.hh"
+#include "sim/eventq.hh"
+#include "sim/process.hh"
+
+class ThreadContext;
+class MemInterface;
+class MemObject;
+class Process;
+class ResourcePool;
+
+class InOrderCPU : public BaseCPU
+{
+
+ protected:
+ typedef ThePipeline::Params Params;
+ typedef InOrderThreadState Thread;
+
+ //ISA TypeDefs
+ typedef TheISA::IntReg IntReg;
+ typedef TheISA::FloatReg FloatReg;
+ typedef TheISA::FloatRegBits FloatRegBits;
+ typedef TheISA::MiscReg MiscReg;
+ typedef TheISA::RegFile RegFile;
+ typedef SimpleRenameMap RenameMap;
+
+ //DynInstPtr TypeDefs
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+ typedef std::list<DynInstPtr>::iterator ListIt;
+
+ //TimeBuffer TypeDefs
+ typedef TimeBuffer<InterStageStruct> StageQueue;
+
+ friend class Resource;
+
+ public:
+ /** Constructs a CPU with the given parameters. */
+ InOrderCPU(Params *params);
+
+ /** CPU ID */
+ int cpu_id;
+
+ /** Type of core that this is */
+ std::string coreType;
+
+ int readCpuId() { return cpu_id; }
+
+ void setCpuId(int val) { cpu_id = val; }
+
+ Params *cpu_params;
+
+ TheISA::ITB * itb;
+ TheISA::DTB * dtb;
+
+ public:
+ enum Status {
+ Running,
+ Idle,
+ Halted,
+ Blocked,
+ SwitchedOut
+ };
+
+ /** Overall CPU status. */
+ Status _status;
+
+ private:
+ /** Define TickEvent for the CPU */
+ class TickEvent : public Event
+ {
+ private:
+ /** Pointer to the CPU. */
+ InOrderCPU *cpu;
+
+ public:
+ /** Constructs a tick event. */
+ TickEvent(InOrderCPU *c);
+
+ /** Processes a tick event, calling tick() on the CPU. */
+ void process();
+
+ /** Returns the description of the tick event. */
+ const char *description();
+ };
+
+ /** The tick event used for scheduling CPU ticks. */
+ TickEvent tickEvent;
+
+ /** Schedule tick event, regardless of its current state. */
+ void scheduleTickEvent(int delay)
+ {
+ if (tickEvent.squashed())
+ mainEventQueue.reschedule(&tickEvent, nextCycle(curTick + ticks(delay)));
+ else if (!tickEvent.scheduled())
+ mainEventQueue.schedule(&tickEvent, nextCycle(curTick + ticks(delay)));
+ }
+
+ /** Unschedule tick event, regardless of its current state. */
+ void unscheduleTickEvent()
+ {
+ if (tickEvent.scheduled())
+ tickEvent.squash();
+ }
+
+ public:
+ // List of Events That can be scheduled from
+ // within the CPU.
+ // NOTE(1): The Resource Pool also uses this event list
+ // to schedule events broadcast to all resources interfaces
+ // NOTE(2): CPU Events usually need to schedule a corresponding resource
+ // pool event.
+ enum CPUEventType {
+ ActivateThread,
+ DeallocateThread,
+ SuspendThread,
+ DisableThreads,
+ EnableThreads,
+ DisableVPEs,
+ EnableVPEs,
+ Trap,
+ InstGraduated,
+ SquashAll,
+ UpdatePCs,
+ NumCPUEvents
+ };
+
+ /** Define CPU Event */
+ class CPUEvent : public Event
+ {
+ protected:
+ InOrderCPU *cpu;
+
+ public:
+ CPUEventType cpuEventType;
+ unsigned tid;
+ unsigned vpe;
+ Fault fault;
+
+ public:
+ /** Constructs a CPU event. */
+ CPUEvent(InOrderCPU *_cpu, CPUEventType e_type, Fault fault,
+ unsigned _tid, unsigned _vpe);
+
+ /** Set Type of Event To Be Scheduled */
+ void setEvent(CPUEventType e_type, Fault _fault, unsigned _tid, unsigned _vpe)
+ {
+ fault = _fault;
+ cpuEventType = e_type;
+ tid = _tid;
+ vpe = _vpe;
+ }
+
+ /** Processes a resource event. */
+ virtual void process();
+
+ /** Returns the description of the resource event. */
+ const char *description();
+
+ /** Schedule Event */
+ void scheduleEvent(int delay);
+
+ /** Unschedule This Event */
+ void unscheduleEvent();
+ };
+
+ /** Schedule a CPU Event */
+ void scheduleCpuEvent(CPUEventType cpu_event, Fault fault, unsigned tid,
+ unsigned vpe, unsigned delay = 0);
+
+ public:
+ /** Interface between the CPU and CPU resources. */
+ ResourcePool *resPool;
+
+ /** Instruction used to signify that there is no *real* instruction in buffer slot */
+ DynInstPtr dummyBufferInst;
+
+ /** Used by resources to signify a denied access to a resource. */
+ ResourceRequest *dummyReq;
+
+ /** Identifies the resource id that identifies a fetch
+ * access unit.
+ */
+ unsigned fetchPortIdx;
+
+ /** Identifies the resource id that identifies a data
+ * access unit.
+ */
+ unsigned dataPortIdx;
+
+ /** The Pipeline Stages for the CPU */
+ PipelineStage *pipelineStage[ThePipeline::NumStages];
+
+ TheISA::IntReg PC[ThePipeline::MaxThreads];
+ TheISA::IntReg nextPC[ThePipeline::MaxThreads];
+ TheISA::IntReg nextNPC[ThePipeline::MaxThreads];
+
+ /** The Register File for the CPU */
+ /** @TODO: This regFile wont be a sufficient solution for out-of-order, add register
+ * files as a resource in order to handle ths problem
+ */
+ TheISA::IntRegFile intRegFile[ThePipeline::MaxThreads];;
+ TheISA::FloatRegFile floatRegFile[ThePipeline::MaxThreads];;
+ TheISA::MiscRegFile miscRegFile;
+
+ /** Dependency Tracker for Integer & Floating Point Regs */
+ RegDepMap archRegDepMap[ThePipeline::MaxThreads];
+
+ /** Global communication structure */
+ TimeBuffer<TimeStruct> timeBuffer;
+
+ /** Communication structure that sits in between pipeline stages */
+ StageQueue *stageQueue[ThePipeline::NumStages-1];
+
+ public:
+
+ /** Registers statistics. */
+ void regStats();
+
+ /** Ticks CPU, calling tick() on each stage, and checking the overall
+ * activity to see if the CPU should deschedule itself.
+ */
+ void tick();
+
+ /** Initialize the CPU */
+ void init();
+
+ /** Reset State in the CPU */
+ void reset();
+
+ /** Get a Memory Port */
+ Port* getPort(const std::string &if_name, int idx = 0);
+
+ /** trap() - sets up a trap event on the cpuTraps to handle given fault.
+ * trapCPU() - Traps to handle given fault
+ */
+ void trap(Fault fault, unsigned tid, int delay = 0);
+ void trapCPU(Fault fault, unsigned tid);
+
+ /** Setup CPU to insert a thread's context */
+ void insertThread(unsigned tid);
+
+ /** Remove all of a thread's context from CPU */
+ void removeThread(unsigned tid);
+
+ /** Add Thread to Active Threads List. */
+ void activateContext(unsigned tid, int delay = 0);
+ void activateThread(unsigned tid);
+
+ /** Remove Thread from Active Threads List */
+ void suspendContext(unsigned tid, int delay = 0);
+ void suspendThread(unsigned tid);
+
+ /** Remove Thread from Active Threads List &&
+ * Remove Thread Context from CPU.
+ */
+ void deallocateContext(unsigned tid, int delay = 0);
+ void deallocateThread(unsigned tid);
+ void deactivateThread(unsigned tid);
+
+ /** Remove Thread from Active Threads List &&
+ * Remove Thread Context from CPU.
+ */
+ void haltContext(unsigned tid, int delay = 0);
+
+ void removePipelineStalls(unsigned tid);
+
+ void squashThreadInPipeline(unsigned tid);
+
+ /// Notify the CPU to enable a virtual processor element.
+ virtual void enableVirtProcElement(unsigned vpe);
+ void enableVPEs(unsigned vpe);
+
+ /// Notify the CPU to disable a virtual processor element.
+ virtual void disableVirtProcElement(unsigned tid, unsigned vpe);
+ void disableVPEs(unsigned tid, unsigned vpe);
+
+ /// Notify the CPU that multithreading is enabled.
+ virtual void enableMultiThreading(unsigned vpe);
+ void enableThreads(unsigned vpe);
+
+ /// Notify the CPU that multithreading is disabled.
+ virtual void disableMultiThreading(unsigned tid, unsigned vpe);
+ void disableThreads(unsigned tid, unsigned vpe);
+
+ // Sets a thread-rescheduling condition.
+ void setThreadRescheduleCondition(uint32_t tid)
+ {
+ //@TODO: IMPLEMENT ME
+ }
+
+ /** Activate a Thread When CPU Resources are Available. */
+ void activateWhenReady(int tid);
+
+ /** Add or Remove a Thread Context in the CPU. */
+ void doContextSwitch();
+
+ /** Update The Order In Which We Process Threads. */
+ void updateThreadPriority();
+
+ /** Switches a Pipeline Stage to Active. (Unused currently) */
+ void switchToActive(int stage_idx)
+ { /*pipelineStage[stage_idx]->switchToActive();*/ }
+
+ /** Switches out this CPU. (Unused currently) */
+ //void switchOut(Sampler *sampler);
+
+ /** Signals to this CPU that a stage has completed switching out. (Unused currently)*/
+ void signalSwitched();
+
+ /** Takes over from another CPU. (Unused currently)*/
+ void takeOverFrom(BaseCPU *oldCPU);
+
+ /** Get the current instruction sequence number, and increment it. */
+ InstSeqNum getAndIncrementInstSeq(unsigned tid)
+ { return globalSeqNum[tid]++; }
+
+ /** Get the current instruction sequence number, and increment it. */
+ InstSeqNum nextInstSeqNum(unsigned tid)
+ { return globalSeqNum[tid]; }
+
+ /** Increment Instruction Sequence Number */
+ void incrInstSeqNum(unsigned tid)
+ { globalSeqNum[tid]++; }
+
+ /** Set Instruction Sequence Number */
+ void setInstSeqNum(unsigned tid, InstSeqNum seq_num)
+ {
+ globalSeqNum[tid] = seq_num;
+ }
+
+ InstSeqNum getNextEventNum()
+ {
+ return cpuEventNum++;
+ }
+
+ /** Get instruction asid. */
+ int getInstAsid(unsigned tid)
+ { return thread[tid]->getInstAsid(); }
+
+ /** Get data asid. */
+ int getDataAsid(unsigned tid)
+ { return thread[tid]->getDataAsid(); }
+
+ /** Register file accessors */
+ uint64_t readIntReg(int reg_idx, unsigned tid);
+
+ FloatReg readFloatReg(int reg_idx, unsigned tid,
+ int width = TheISA::SingleWidth);
+
+ FloatRegBits readFloatRegBits(int reg_idx, unsigned tid,
+ int width = TheISA::SingleWidth);
+
+ void setIntReg(int reg_idx, uint64_t val, unsigned tid);
+
+ void setFloatReg(int reg_idx, FloatReg val, unsigned tid,
+ int width = TheISA::SingleWidth);
+
+ void setFloatRegBits(int reg_idx, FloatRegBits val, unsigned tid,
+ int width = TheISA::SingleWidth);
+
+ /** Reads a miscellaneous register. */
+ MiscReg readMiscRegNoEffect(int misc_reg, unsigned tid);
+
+ /** Reads a misc. register, including any side effects the read
+ * might have as defined by the architecture.
+ */
+ MiscReg readMiscReg(int misc_reg, unsigned tid);
+
+ /** Sets a miscellaneous register. */
+ void setMiscRegNoEffect(int misc_reg, const MiscReg &val, unsigned tid);
+
+ /** Sets a misc. register, including any side effects the write
+ * might have as defined by the architecture.
+ */
+ void setMiscReg(int misc_reg, const MiscReg &val, unsigned tid);
+
+ /** Reads a int/fp/misc reg. from another thread depending on ISA-defined
+ * target thread
+ */
+ uint64_t readRegOtherThread(unsigned misc_reg, unsigned tid = -1);
+
+ /** Sets a int/fp/misc reg. from another thread depending on an ISA-defined
+ * target thread
+ */
+ void setRegOtherThread(unsigned misc_reg, const MiscReg &val, unsigned tid);
+
+ /** Reads the commit PC of a specific thread. */
+ uint64_t readPC(unsigned tid);
+
+ /** Sets the commit PC of a specific thread. */
+ void setPC(Addr new_PC, unsigned tid);
+
+ /** Reads the next PC of a specific thread. */
+ uint64_t readNextPC(unsigned tid);
+
+ /** Sets the next PC of a specific thread. */
+ void setNextPC(uint64_t val, unsigned tid);
+
+ /** Reads the next NPC of a specific thread. */
+ uint64_t readNextNPC(unsigned tid);
+
+ /** Sets the next NPC of a specific thread. */
+ void setNextNPC(uint64_t val, unsigned tid);
+
+ /** Add Destination Register To Dependency Maps */
+ //void addToRegDepMap(DynInstPtr &inst);
+
+ /** Function to add instruction onto the head of the list of the
+ * instructions. Used when new instructions are fetched.
+ */
+ ListIt addInst(DynInstPtr &inst);
+
+ /** Function to tell the CPU that an instruction has completed. */
+ void instDone(DynInstPtr inst, unsigned tid);
+
+ /** Add Instructions to the CPU Remove List*/
+ void addToRemoveList(DynInstPtr &inst);
+
+ /** Remove an instruction from CPU */
+ void removeInst(DynInstPtr &inst);
+
+ /** Remove all instructions younger than the given sequence number. */
+ void removeInstsUntil(const InstSeqNum &seq_num,unsigned tid);
+
+ /** Removes the instruction pointed to by the iterator. */
+ inline void squashInstIt(const ListIt &instIt, const unsigned &tid);
+
+ /** Cleans up all instructions on the instruction remove list. */
+ void cleanUpRemovedInsts();
+
+ /** Cleans up all instructions on the request remove list. */
+ void cleanUpRemovedReqs();
+
+ /** Cleans up all instructions on the CPU event remove list. */
+ void cleanUpRemovedEvents();
+
+ /** Debug function to print all instructions on the list. */
+ void dumpInsts();
+
+ /** Translates instruction requestion in syscall emulation mode. */
+ Fault translateInstReq(RequestPtr &req, Thread *thread)
+ {
+ return thread->getProcessPtr()->pTable->translate(req);
+ }
+
+ /** Translates data read request in syscall emulation mode. */
+ Fault translateDataReadReq(RequestPtr &req, Thread *thread)
+ {
+ return thread->getProcessPtr()->pTable->translate(req);
+ }
+
+ /** Translates data write request in syscall emulation mode. */
+ Fault translateDataWriteReq(RequestPtr &req, Thread *thread)
+ {
+ return thread->getProcessPtr()->pTable->translate(req);
+ }
+
+ /** Forwards an instruction read to the appropriate data
+ * resource (indexes into Resource Pool thru "dataPortIdx")
+ */
+ Fault read(DynInstPtr inst);
+
+ /** Forwards an instruction write. to the appropriate data
+ * resource (indexes into Resource Pool thru "dataPortIdx")
+ */
+ Fault write(DynInstPtr inst);
+
+ /** Executes a syscall.*/
+ void syscall(int64_t callnum, int tid);
+
+ /** Gets a syscall argument. */
+ IntReg getSyscallArg(int i, int tid);
+
+ /** Used to shift args for indirect syscall. */
+ void setSyscallArg(int i, IntReg val, int tid);
+
+ /** Sets the return value of a syscall. */
+ void setSyscallReturn(SyscallReturn return_value, int tid);
+
+ public:
+ /** Per-Thread List of all the instructions in flight. */
+ std::list<DynInstPtr> instList[ThePipeline::MaxThreads];
+
+ /** List of all the instructions that will be removed at the end of this
+ * cycle.
+ */
+ std::queue<ListIt> removeList;
+
+ /** List of all the resource requests that will be removed at the end of this
+ * cycle.
+ */
+ std::queue<ResourceRequest*> reqRemoveList;
+
+ /** List of all the cpu event requests that will be removed at the end of
+ * the current cycle.
+ */
+ std::queue<Event*> cpuEventRemoveList;
+
+#ifdef DEBUG
+ /** Debug structure to keep track of the sequence numbers still in
+ * flight.
+ */
+ std::set<InstSeqNum> snList;
+#endif
+
+ /** Records if instructions need to be removed this cycle due to
+ * being retired or squashed.
+ */
+ bool removeInstsThisCycle;
+
+ /** True if there is non-speculative Inst Active In Pipeline. Lets any
+ * execution unit know, NOT to execute while the instruction is active.
+ */
+ bool nonSpecInstActive[ThePipeline::MaxThreads];
+
+ /** Instruction Seq. Num of current non-speculative instruction. */
+ InstSeqNum nonSpecSeqNum[ThePipeline::MaxThreads];
+
+ /** Instruction Seq. Num of last instruction squashed in pipeline */
+ InstSeqNum squashSeqNum[ThePipeline::MaxThreads];
+
+ /** Last Cycle that the CPU squashed instruction end. */
+ Tick lastSquashCycle[ThePipeline::MaxThreads];
+
+ std::list<unsigned> fetchPriorityList;
+
+ /** Rename Map for architectural-to-physical register mappings.
+ * In a In-order processor, the mapping is fixed
+ * (e.g. Thread 1: 0-31, Thread 1: 32-63, etc.)
+ * In a Out-of-Order processor, this is used to maintain
+ * sequential consistency (?right word here?).
+ */
+ RenameMap renameMap[ThePipeline::MaxThreads];
+
+ protected:
+ /** Active Threads List */
+ std::list<unsigned> activeThreads;
+
+ /** Current Threads List */
+ std::list<unsigned> currentThreads;
+
+ /** Suspended Threads List */
+ std::list<unsigned> suspendedThreads;
+
+ /** Thread Status Functions (Unused Currently) */
+ bool isThreadInCPU(unsigned tid);
+ bool isThreadActive(unsigned tid);
+ bool isThreadSuspended(unsigned tid);
+ void addToCurrentThreads(unsigned tid);
+ void removeFromCurrentThreads(unsigned tid);
+
+ private:
+ /** The activity recorder; used to tell if the CPU has any
+ * activity remaining or if it can go to idle and deschedule
+ * itself.
+ */
+ ActivityRecorder activityRec;
+
+ public:
+ void readFunctional(Addr addr, uint32_t &buffer);
+
+ /** Number of Active Threads in the CPU */
+ int numActiveThreads() { return activeThreads.size(); }
+
+ /** Records that there was time buffer activity this cycle. */
+ void activityThisCycle() { activityRec.activity(); }
+
+ /** Changes a stage's status to active within the activity recorder. */
+ void activateStage(const int idx)
+ { activityRec.activateStage(idx); }
+
+ /** Changes a stage's status to inactive within the activity recorder. */
+ void deactivateStage(const int idx)
+ { activityRec.deactivateStage(idx); }
+
+ /** Wakes the CPU, rescheduling the CPU if it's not already active. */
+ void wakeCPU();
+
+ /** Gets a free thread id. Use if thread ids change across system. */
+ int getFreeTid();
+
+ // LL/SC debug functionality
+ unsigned stCondFails;
+ unsigned readStCondFailures() { return stCondFails; }
+ unsigned setStCondFailures(unsigned st_fails) { return stCondFails = st_fails; }
+
+ public:
+ /** Returns a pointer to a thread context. */
+ ThreadContext *tcBase(unsigned tid = 0)
+ {
+ return thread[tid]->getTC();
+ }
+
+ /** The global sequence number counter. */
+ InstSeqNum globalSeqNum[ThePipeline::MaxThreads];
+
+ /** The global event number counter. */
+ InstSeqNum cpuEventNum;
+
+ /** Counter of how many stages have completed switching out. */
+ int switchCount;
+
+ /** Pointers to all of the threads in the CPU. */
+ std::vector<Thread *> thread;
+
+ /** Pointer to the icache interface. */
+ MemInterface *icacheInterface;
+ /** Pointer to the dcache interface. */
+ MemInterface *dcacheInterface;
+
+ /** Whether or not the CPU should defer its registration. */
+ bool deferRegistration;
+
+ /** Per-Stage Instruction Tracing */
+ bool stageTracing;
+
+ /** Is there a context switch pending? */
+ bool contextSwitch;
+
+ /** Threads Scheduled to Enter CPU */
+ std::list<int> cpuWaitList;
+
+ /** The cycle that the CPU was last running, used for statistics. */
+ Tick lastRunningCycle;
+
+ /** Number of Threads the CPU can process */
+ unsigned numThreads;
+
+ /** Number of Virtual Processors the CPU can process */
+ unsigned numVirtProcs;
+
+ /** Update Thread , used for statistic purposes*/
+ inline void tickThreadStats();
+
+ /** Per-Thread Tick */
+ Stats::Vector<> threadCycles;
+
+ /** Tick for SMT */
+ Stats::Scalar<> smtCycles;
+
+ /** Stat for total number of times the CPU is descheduled. */
+ Stats::Scalar<> timesIdled;
+
+ /** Stat for total number of cycles the CPU spends descheduled. */
+ Stats::Scalar<> idleCycles;
+
+ /** Stat for the number of committed instructions per thread. */
+ Stats::Vector<> committedInsts;
+
+ /** Stat for the number of committed instructions per thread. */
+ Stats::Vector<> smtCommittedInsts;
+
+ /** Stat for the total number of committed instructions. */
+ Stats::Scalar<> totalCommittedInsts;
+
+ /** Stat for the CPI per thread. */
+ Stats::Formula cpi;
+
+ /** Stat for the SMT-CPI per thread. */
+ Stats::Formula smtCpi;
+
+ /** Stat for the total CPI. */
+ Stats::Formula totalCpi;
+
+ /** Stat for the IPC per thread. */
+ Stats::Formula ipc;
+
+ /** Stat for the total IPC. */
+ Stats::Formula smtIpc;
+
+ /** Stat for the total IPC. */
+ Stats::Formula totalIpc;
+};
+
+#endif // __CPU_O3_CPU_HH__
diff --git a/src/cpu/inorder/first_stage.cc b/src/cpu/inorder/first_stage.cc
new file mode 100644
index 000000000..ce30f8466
--- /dev/null
+++ b/src/cpu/inorder/first_stage.cc
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "base/str.hh"
+#include "cpu/inorder/first_stage.hh"
+#include "cpu/inorder/resources/resource_list.hh"
+#include "cpu/inorder/resource_pool.hh"
+#include "cpu/inorder/cpu.hh"
+#include "params/InOrderTrace.hh"
+
+using namespace std;
+using namespace ThePipeline;
+
+FirstStage::FirstStage(Params *params, unsigned stage_num)
+ : PipelineStage(params, stage_num)
+{
+ for(int tid=0; tid < this->numThreads; tid++) {
+ stageStatus[tid] = Running;
+ }
+
+ numFetchingThreads = 1;
+
+ fetchPolicy = RoundRobin;
+}
+
+void
+FirstStage::setCPU(InOrderCPU *cpu_ptr)
+{
+ cpu = cpu_ptr;
+
+ fetchPriorityList = &cpu->fetchPriorityList;
+
+ DPRINTF(InOrderStage, "Set CPU pointer.\n");
+}
+
+
+void
+FirstStage::squash(InstSeqNum squash_seq_num, unsigned tid)
+{
+ // Set status to squashing.
+ //stageStatus[tid] = Squashing;
+
+ // Clear the instruction list and skid buffer in case they have any
+ // insts in them.
+ DPRINTF(InOrderStage, "Removing instructions from stage instruction list.\n");
+ while (!insts[tid].empty()) {
+ if (insts[tid].front()->seqNum <= squash_seq_num) {
+ DPRINTF(InOrderStage,"[tid:%i]: Cannot remove [sn:%i] because it's <= "
+ "squashing seqNum %i.\n",
+ tid,
+ insts[tid].front()->seqNum,
+ squash_seq_num);
+
+ DPRINTF(InOrderStage, "[tid:%i]: Cannot remove incoming "
+ "instructions before delay slot [sn:%i]. %i insts"
+ "left.\n", tid, squash_seq_num,
+ insts[tid].size());
+ break;
+ }
+ DPRINTF(InOrderStage, "[tid:%i]: Removing instruction, [sn:%i] PC %08p.\n",
+ tid, insts[tid].front()->seqNum, insts[tid].front()->PC);
+ insts[tid].pop();
+ }
+
+ // Now that squash has propagated to the first stage,
+ // Alert CPU to remove instructions from the CPU instruction list.
+ // @todo: Move this to the CPU object.
+ cpu->removeInstsUntil(squash_seq_num, tid);
+}
+
+void
+FirstStage::processStage(bool &status_change)
+{
+ list<unsigned>::iterator threads = (*activeThreads).begin();
+
+ //Check stall and squash signals.
+ while (threads != (*activeThreads).end()) {
+ unsigned tid = *threads++;
+ status_change = checkSignalsAndUpdate(tid) || status_change;
+ }
+
+ for (int threadFetched = 0; threadFetched < numFetchingThreads;
+ threadFetched++) {
+ int tid = getFetchingThread(fetchPolicy);
+
+ if (tid >= 0) {
+ DPRINTF(InOrderStage, "Processing [tid:%i]\n",tid);
+ processThread(status_change, tid);
+ } else {
+ DPRINTF(InOrderStage, "No more threads to fetch from.\n");
+ }
+ }
+}
+
+//@TODO: Note in documentation, that when you make a pipeline stage change, then
+//make sure you change the first stage too
+void
+FirstStage::processInsts(unsigned tid)
+{
+ bool all_reqs_completed = true;
+
+ for (int insts_fetched = 0; insts_fetched < stageWidth && canSendInstToNextStage(); insts_fetched++) {
+ DynInstPtr inst;
+ bool new_inst = false;
+
+ if (!insts[tid].empty()) {
+ inst = insts[tid].front();
+ } else {
+ // Get new instruction.
+ new_inst = true;
+
+ inst = new InOrderDynInst(cpu,
+ cpu->thread[tid],
+ cpu->nextInstSeqNum(tid),
+ tid);
+
+#if TRACING_ON
+ inst->traceData =
+ tracer->getInstRecord(ThePipeline::NumStages,
+ cpu->stageTracing,
+ cpu->thread[tid]->getTC());
+
+#endif // TRACING_ON
+
+ DPRINTF(RefCount, "creation: [tid:%i]: [sn:%i]: Refcount = %i.\n",
+ inst->readTid(),
+ inst->seqNum,
+ 0/*inst->curCount()*/);
+
+ // Add instruction to the CPU's list of instructions.
+ inst->setInstListIt(cpu->addInst(inst));
+
+ DPRINTF(RefCount, "after add to CPU List: [tid:%i]: [sn:%i]: Refcount = %i.\n",
+ inst->readTid(),
+ inst->seqNum,
+ 0/*inst->curCount()*/);
+
+ // Create Front-End Resource Schedule For Instruction
+ ThePipeline::createFrontEndSchedule(inst);
+ }
+
+ // Don't let instruction pass to next stage if it hasnt completed
+ // all of it's requests for this stage.
+ all_reqs_completed = processInstSchedule(inst);
+
+ if (!all_reqs_completed) {
+ if (new_inst) {
+ DPRINTF(InOrderStage, "[tid:%u]: [sn:%u] Did not finish all "
+ "requests for this stage. Keep in stage inst. "
+ "list.\n", tid, inst->seqNum);
+ insts[tid].push(inst);
+ }
+ break;
+ } else if (!insts[tid].empty()){
+ insts[tid].pop();
+ }
+
+ sendInstToNextStage(inst);
+ //++stageProcessedInsts;
+ }
+
+ // Record that stage has written to the time buffer for activity
+ // tracking.
+ if (toNextStageIndex) {
+ wroteToTimeBuffer = true;
+ }
+}
+
+int
+FirstStage::getFetchingThread(FetchPriority &fetch_priority)
+{
+ if (numThreads > 1) {
+ switch (fetch_priority) {
+
+ case SingleThread:
+ return 0;
+
+ case RoundRobin:
+ return roundRobin();
+
+ default:
+ return -1;
+ }
+ } else {
+ int tid = *((*activeThreads).begin());
+
+ if (stageStatus[tid] == Running ||
+ stageStatus[tid] == Idle) {
+ return tid;
+ } else {
+ return -1;
+ }
+ }
+
+}
+
+int
+FirstStage::roundRobin()
+{
+ list<unsigned>::iterator pri_iter = (*fetchPriorityList).begin();
+ list<unsigned>::iterator end = (*fetchPriorityList).end();
+
+ int high_pri;
+
+ while (pri_iter != end) {
+ high_pri = *pri_iter;
+
+ assert(high_pri <= numThreads);
+
+ if (stageStatus[high_pri] == Running ||
+ stageStatus[high_pri] == Idle) {
+
+ (*fetchPriorityList).erase(pri_iter);
+ (*fetchPriorityList).push_back(high_pri);
+
+ return high_pri;
+ }
+
+ pri_iter++;
+ }
+
+ return -1;
+}
diff --git a/src/cpu/inorder/first_stage.hh b/src/cpu/inorder/first_stage.hh
new file mode 100644
index 000000000..55914c85c
--- /dev/null
+++ b/src/cpu/inorder/first_stage.hh
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_FIRST_STAGE_HH__
+#define __CPU_INORDER_FIRST_STAGE_HH__
+
+#include <queue>
+#include <vector>
+
+#include "base/statistics.hh"
+#include "base/timebuf.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/comm.hh"
+#include "cpu/inorder/params.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/pipeline_stage.hh"
+
+class InOrderCPU;
+
+class FirstStage : public PipelineStage {
+ public:
+ FirstStage(ThePipeline::Params *params, unsigned stage_num);
+
+ /** Set Pointer to CPU */
+ void setCPU(InOrderCPU *cpu_ptr);
+
+ /** Evaluate Stage Info. & Execute Stage */
+ void processStage(bool &status_change);
+
+ /** Process All Instructions Available */
+ void processInsts(unsigned tid);
+
+ /** Squash Instructions Above a Seq. Num */
+ void squash(InstSeqNum squash_seq_num, unsigned tid);
+
+ /** There are no insts. coming from previous stages, so there is
+ * no need to sort insts here
+ */
+ void sortInsts() {}
+
+ /** There are no skidBuffers for the first stage. So
+ * just use an empty function.
+ */
+ void skidInsert(unsigned tid) { }
+
+ /** The number of fetching threads in the CPU */
+ int numFetchingThreads;
+
+ //@TODO: Add fetch priority information to a resource class...
+ /** Fetching Policy, Add new policies here.*/
+ enum FetchPriority {
+ SingleThread,
+ RoundRobin
+ };
+
+ /** Fetch policy. */
+ FetchPriority fetchPolicy;
+
+ /** List that has the threads organized by priority. */
+ std::list<unsigned> *fetchPriorityList;
+
+ /** Return the next fetching thread */
+ int getFetchingThread(FetchPriority &fetch_priority);
+
+ /** Return next thred given Round Robin Policy for Thread Fetching */
+ int roundRobin();
+};
+
+#endif // __CPU_INORDER_FIRST_STAGE_HH__
diff --git a/src/cpu/inorder/inorder_cpu_builder.cc b/src/cpu/inorder/inorder_cpu_builder.cc
new file mode 100644
index 000000000..b1b4bea80
--- /dev/null
+++ b/src/cpu/inorder/inorder_cpu_builder.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include <string>
+
+#include "cpu/base.hh"
+#include "cpu/inst_seq.hh"
+#include "cpu/static_inst.hh"
+#include "cpu/inorder/cpu.hh"
+//#include "cpu/inorder/params.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "params/InOrderCPU.hh"
+
+InOrderCPU *
+InOrderCPUParams::create()
+{
+ int actual_num_threads =
+ (numThreads >= workload.size()) ? numThreads : workload.size();
+
+ if (workload.size() == 0) {
+ fatal("Must specify at least one workload!");
+ }
+
+ numThreads = actual_num_threads;
+
+ instShiftAmt = 2;
+
+ return new InOrderCPU(this);
+}
+
+
+
diff --git a/src/cpu/inorder/inorder_dyn_inst.cc b/src/cpu/inorder/inorder_dyn_inst.cc
new file mode 100644
index 000000000..89362c656
--- /dev/null
+++ b/src/cpu/inorder/inorder_dyn_inst.cc
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include <iostream>
+#include <set>
+#include <string>
+#include <sstream>
+
+#include "base/cprintf.hh"
+#include "base/trace.hh"
+
+#include "arch/faults.hh"
+#include "cpu/exetrace.hh"
+#include "mem/request.hh"
+
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/cpu.hh"
+
+using namespace std;
+using namespace TheISA;
+using namespace ThePipeline;
+
+InOrderDynInst::InOrderDynInst(TheISA::ExtMachInst machInst, Addr inst_PC,
+ Addr pred_PC, InstSeqNum seq_num,
+ InOrderCPU *cpu)
+ : staticInst(machInst, inst_PC), traceData(NULL), cpu(cpu)
+{
+ seqNum = seq_num;
+
+ PC = inst_PC;
+ nextPC = PC + sizeof(MachInst);
+ nextNPC = nextPC + sizeof(MachInst);
+ predPC = pred_PC;
+
+ initVars();
+}
+
+InOrderDynInst::InOrderDynInst(InOrderCPU *cpu,
+ InOrderThreadState *state,
+ InstSeqNum seq_num,
+ unsigned tid)
+ : traceData(NULL), cpu(cpu)
+{
+ seqNum = seq_num;
+ thread = state;
+ threadNumber = tid;
+ initVars();
+}
+
+InOrderDynInst::InOrderDynInst(StaticInstPtr &_staticInst)
+ : staticInst(_staticInst), traceData(NULL)
+{
+ seqNum = 0;
+ initVars();
+}
+
+InOrderDynInst::InOrderDynInst()
+ : traceData(NULL), cpu(cpu)
+{ initVars(); }
+
+int InOrderDynInst::instcount = 0;
+
+
+void
+InOrderDynInst::setMachInst(ExtMachInst machInst)
+{
+ staticInst = StaticInst::decode(machInst, PC);
+
+ for (int i = 0; i < this->staticInst->numDestRegs(); i++) {
+ _destRegIdx[i] = this->staticInst->destRegIdx(i);
+ }
+
+ for (int i = 0; i < this->staticInst->numSrcRegs(); i++) {
+ _srcRegIdx[i] = this->staticInst->srcRegIdx(i);
+ this->_readySrcRegIdx[i] = 0;
+ }
+}
+
+void
+InOrderDynInst::initVars()
+{
+ req = NULL;
+ effAddr = 0;
+ physEffAddr = 0;
+
+ readyRegs = 0;
+
+ nextStage = 0;
+ nextInstStageNum = 0;
+
+ for(int i = 0; i < MaxInstDestRegs; i++)
+ instResult[i].val.integer = 0;
+
+ status.reset();
+
+ memAddrReady = false;
+ eaCalcDone = false;
+ memOpDone = false;
+
+ predictTaken = false;
+ procDelaySlotOnMispred = false;
+
+ lqIdx = -1;
+ sqIdx = -1;
+
+ // Also make this a parameter, or perhaps get it from xc or cpu.
+ asid = 0;
+
+ virtProcNumber = 0;
+
+ // Initialize the fault to be NoFault.
+ fault = NoFault;
+
+ // Make sure to have the renamed register entries set to the same
+ // as the normal register entries. It will allow the IQ to work
+ // without any modifications.
+ if (this->staticInst) {
+ for (int i = 0; i < this->staticInst->numDestRegs(); i++) {
+ _destRegIdx[i] = this->staticInst->destRegIdx(i);
+ }
+
+ for (int i = 0; i < this->staticInst->numSrcRegs(); i++) {
+ _srcRegIdx[i] = this->staticInst->srcRegIdx(i);
+ this->_readySrcRegIdx[i] = 0;
+ }
+ }
+
+ // Update Instruction Count for this instruction
+ ++instcount;
+ if (instcount > 500) {
+ fatal("Number of Active Instructions in CPU is too high. "
+ "(Not Dereferencing Ptrs. Correctly?)\n");
+ }
+
+
+
+ DPRINTF(InOrderDynInst, "DynInst: [tid:%i] [sn:%lli] Instruction created. (active insts: %i)\n",
+ threadNumber, seqNum, instcount);
+
+#ifdef DEBUG
+ cpu->snList.insert(seqNum);
+#endif
+}
+
+
+InOrderDynInst::~InOrderDynInst()
+{
+ if (req) {
+ delete req;
+ }
+
+ if (traceData) {
+ delete traceData;
+ }
+
+ fault = NoFault;
+
+ --instcount;
+
+ deleteStages();
+
+ DPRINTF(InOrderDynInst, "DynInst: [tid:%i] [sn:%lli] Instruction destroyed. (active insts: %i)\n",
+ threadNumber, seqNum, instcount);
+#ifdef DEBUG
+ cpu->snList.erase(seqNum);
+#endif
+}
+
+void
+InOrderDynInst::setStaticInst(StaticInstPtr &static_inst)
+{
+ this->staticInst = static_inst;
+
+ // Make sure to have the renamed register entries set to the same
+ // as the normal register entries. It will allow the IQ to work
+ // without any modifications.
+ if (this->staticInst) {
+ for (int i = 0; i < this->staticInst->numDestRegs(); i++) {
+ _destRegIdx[i] = this->staticInst->destRegIdx(i);
+ }
+
+ for (int i = 0; i < this->staticInst->numSrcRegs(); i++) {
+ _srcRegIdx[i] = this->staticInst->srcRegIdx(i);
+ this->_readySrcRegIdx[i] = 0;
+ }
+ }
+}
+
+Fault
+InOrderDynInst::execute()
+{
+ // @todo: Pretty convoluted way to avoid squashing from happening
+ // when using the TC during an instruction's execution
+ // (specifically for instructions that have side-effects that use
+ // the TC). Fix this.
+ bool in_syscall = this->thread->inSyscall;
+ this->thread->inSyscall = true;
+
+ this->fault = this->staticInst->execute(this, this->traceData);
+
+ this->thread->inSyscall = in_syscall;
+
+ return this->fault;
+}
+
+Fault
+InOrderDynInst::initiateAcc()
+{
+ // @todo: Pretty convoluted way to avoid squashing from happening
+ // when using the TC during an instruction's execution
+ // (specifically for instructions that have side-effects that use
+ // the TC). Fix this.
+ bool in_syscall = this->thread->inSyscall;
+ this->thread->inSyscall = true;
+
+ this->fault = this->staticInst->initiateAcc(this, this->traceData);
+
+ this->thread->inSyscall = in_syscall;
+
+ return this->fault;
+}
+
+
+Fault
+InOrderDynInst::completeAcc(Packet *pkt)
+{
+ this->fault = this->staticInst->completeAcc(pkt, this, this->traceData);
+
+ return this->fault;
+}
+
+InstStage *InOrderDynInst::addStage()
+{
+ this->currentInstStage = new InstStage(this, nextInstStageNum++);
+ instStageList.push_back( this->currentInstStage );
+ return this->currentInstStage;
+}
+
+InstStage *InOrderDynInst::addStage(int stage_num)
+{
+ nextInstStageNum = stage_num;
+ return InOrderDynInst::addStage();
+}
+
+void InOrderDynInst::deleteStages() {
+ std::list<InstStage*>::iterator list_it = instStageList.begin();
+ std::list<InstStage*>::iterator list_end = instStageList.end();
+
+ while(list_it != list_end) {
+ delete *list_it;
+ list_it++;
+ }
+}
+
+Fault
+InOrderDynInst::calcEA()
+{
+ return staticInst->eaCompInst()->execute(this, this->traceData);
+}
+
+Fault
+InOrderDynInst::memAccess()
+{
+ //return staticInst->memAccInst()->execute(this, this->traceData);
+ return initiateAcc( );
+}
+
+void
+InOrderDynInst::syscall(int64_t callnum)
+{
+ cpu->syscall(callnum, this->threadNumber);
+}
+
+void
+InOrderDynInst::prefetch(Addr addr, unsigned flags)
+{
+ // This is the "functional" implementation of prefetch. Not much
+ // happens here since prefetches don't affect the architectural
+ // state.
+/*
+ // Generate a MemReq so we can translate the effective address.
+ MemReqPtr req = new MemReq(addr, thread->getXCProxy(), 1, flags);
+ req->asid = asid;
+
+ // Prefetches never cause faults.
+ fault = NoFault;
+
+ // note this is a local, not InOrderDynInst::fault
+ Fault trans_fault = cpu->translateDataReadReq(req);
+
+ if (trans_fault == NoFault && !(req->flags & UNCACHEABLE)) {
+ // It's a valid address to cacheable space. Record key MemReq
+ // parameters so we can generate another one just like it for
+ // the timing access without calling translate() again (which
+ // might mess up the TLB).
+ effAddr = req->vaddr;
+ physEffAddr = req->paddr;
+ memReqFlags = req->flags;
+ } else {
+ // Bogus address (invalid or uncacheable space). Mark it by
+ // setting the eff_addr to InvalidAddr.
+ effAddr = physEffAddr = MemReq::inval_addr;
+ }
+
+ if (traceData) {
+ traceData->setAddr(addr);
+ }
+*/
+}
+
+void
+InOrderDynInst::writeHint(Addr addr, int size, unsigned flags)
+{
+ // Not currently supported.
+}
+
+/**
+ * @todo Need to find a way to get the cache block size here.
+ */
+Fault
+InOrderDynInst::copySrcTranslate(Addr src)
+{
+ // Not currently supported.
+ return NoFault;
+}
+
+/**
+ * @todo Need to find a way to get the cache block size here.
+ */
+Fault
+InOrderDynInst::copy(Addr dest)
+{
+ // Not currently supported.
+ return NoFault;
+}
+
+void
+InOrderDynInst::releaseReq(ResourceRequest* req)
+{
+ std::list<ResourceRequest*>::iterator list_it = reqList.begin();
+ std::list<ResourceRequest*>::iterator list_end = reqList.end();
+
+ while(list_it != list_end) {
+ if((*list_it)->getResIdx() == req->getResIdx() &&
+ (*list_it)->getSlot() == req->getSlot()) {
+ DPRINTF(InOrderDynInst, "[tid:%u]: [sn:%i] Done with request to %s.\n",
+ threadNumber, seqNum, req->res->name());
+ reqList.erase(list_it);
+ return;
+ }
+ list_it++;
+ }
+
+ panic("Releasing Res. Request That Isnt There!\n");
+}
+
+/** Records an integer source register being set to a value. */
+void
+InOrderDynInst::setIntSrc(int idx, uint64_t val)
+{
+ DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Source Value %i being set to %#x.\n",
+ threadNumber, seqNum, idx, val);
+ instSrc[idx].integer = val;
+}
+
+/** Records an fp register being set to a value. */
+void
+InOrderDynInst::setFloatSrc(int idx, FloatReg val, int width)
+{
+ if (width == 32)
+ instSrc[idx].fp = val;
+ else if (width == 64)
+ instSrc[idx].dbl = val;
+ else
+ panic("Unsupported width!");
+}
+
+/** Records an fp register being set to an integer value. */
+void
+InOrderDynInst::setFloatRegBitsSrc(int idx, uint64_t val)
+{
+ instSrc[idx].integer = val;
+}
+
+/** Reads a integer register. */
+IntReg
+InOrderDynInst::readIntRegOperand(const StaticInst *si, int idx, unsigned tid)
+{
+ DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Source Value %i read as %#x.\n",
+ threadNumber, seqNum, idx, instSrc[idx].integer);
+ return instSrc[idx].integer;
+}
+
+/** Reads a FP register. */
+FloatReg
+InOrderDynInst::readFloatRegOperand(const StaticInst *si, int idx, int width)
+{
+ return instSrc[idx].fp;
+}
+
+
+/** Reads a FP register as a integer. */
+FloatRegBits
+InOrderDynInst::readFloatRegOperandBits(const StaticInst *si, int idx, int width)
+{
+ return instSrc[idx].integer;
+}
+
+/** Reads a miscellaneous register. */
+MiscReg
+InOrderDynInst::readMiscReg(int misc_reg)
+{
+ return this->cpu->readMiscReg(misc_reg, threadNumber);
+}
+
+/** Reads a misc. register, including any side-effects the read
+ * might have as defined by the architecture.
+ */
+MiscReg
+InOrderDynInst::readMiscRegNoEffect(int misc_reg)
+{
+ return this->cpu->readMiscRegNoEffect(misc_reg, threadNumber);
+}
+
+/** Reads a miscellaneous register. */
+MiscReg
+InOrderDynInst::readMiscRegOperandNoEffect(const StaticInst *si, int idx)
+{
+ return this->cpu->readMiscRegNoEffect(
+ si->srcRegIdx(idx) - TheISA::Ctrl_Base_DepTag,
+ this->threadNumber);
+}
+
+/** Reads a misc. register, including any side-effects the read
+ * might have as defined by the architecture.
+ */
+MiscReg
+InOrderDynInst::readMiscRegOperand(const StaticInst *si, int idx)
+{
+ return this->cpu->readMiscReg(
+ si->srcRegIdx(idx) - TheISA::Ctrl_Base_DepTag,
+ this->threadNumber);
+}
+
+/** Sets a misc. register. */
+void
+InOrderDynInst::setMiscRegOperandNoEffect(const StaticInst * si, int idx, const MiscReg &val)
+{
+ instResult[si->destRegIdx(idx)].val.integer = val;
+ instResult[si->destRegIdx(idx)].tick = curTick;
+
+ this->cpu->setMiscRegNoEffect(
+ si->destRegIdx(idx) - TheISA::Ctrl_Base_DepTag,
+ val, this->threadNumber);
+}
+
+/** Sets a misc. register, including any side-effects the write
+ * might have as defined by the architecture.
+ */
+void
+InOrderDynInst::setMiscRegOperand(const StaticInst *si, int idx,
+ const MiscReg &val)
+{
+ instResult[si->destRegIdx(idx)].val.integer = val;
+ instResult[si->destRegIdx(idx)].tick = curTick;
+
+ this->cpu->setMiscReg(
+ si->destRegIdx(idx) - TheISA::Ctrl_Base_DepTag,
+ val, this->threadNumber);
+}
+
+MiscReg
+InOrderDynInst::readRegOtherThread(unsigned reg_idx, int tid)
+{
+ if (tid == -1) {
+ tid = TheISA::getTargetThread(this->cpu->tcBase(threadNumber));
+ }
+
+ if (reg_idx < FP_Base_DepTag) { // Integer Register File
+ return this->cpu->readIntReg(reg_idx, tid);
+ } else if (reg_idx < Ctrl_Base_DepTag) { // Float Register File
+ reg_idx -= FP_Base_DepTag;
+ return this->cpu->readFloatRegBits(reg_idx, tid);
+ } else {
+ reg_idx -= Ctrl_Base_DepTag;
+ return this->cpu->readMiscReg(reg_idx, tid); // Misc. Register File
+ }
+}
+
+/** Sets a Integer register. */
+void
+InOrderDynInst::setIntRegOperand(const StaticInst *si, int idx, IntReg val)
+{
+ instResult[idx].val.integer = val;
+ instResult[idx].tick = curTick;
+}
+
+/** Sets a FP register. */
+void
+InOrderDynInst::setFloatRegOperand(const StaticInst *si, int idx, FloatReg val, int width)
+{
+ if (width == 32)
+ instResult[idx].val.fp = val;
+ else if (width == 64)
+ instResult[idx].val.dbl = val;
+ else
+ panic("Unsupported Floating Point Width!");
+
+ instResult[idx].tick = curTick;
+}
+
+/** Sets a FP register as a integer. */
+void
+InOrderDynInst::setFloatRegOperandBits(const StaticInst *si, int idx,
+ FloatRegBits val, int width)
+{
+ instResult[idx].val.integer = val;
+ instResult[idx].tick = curTick;
+}
+
+/** Sets a misc. register. */
+/* Alter this when wanting to *speculate* on Miscellaneous registers */
+void
+InOrderDynInst::setMiscRegNoEffect(int misc_reg, const MiscReg &val)
+{
+ this->cpu->setMiscRegNoEffect(misc_reg, val, threadNumber);
+}
+
+/** Sets a misc. register, including any side-effects the write
+ * might have as defined by the architecture.
+ */
+/* Alter this if/when wanting to *speculate* on Miscellaneous registers */
+void
+InOrderDynInst::setMiscReg(int misc_reg, const MiscReg &val)
+{
+ this->cpu->setMiscReg(misc_reg, val, threadNumber);
+}
+
+void
+InOrderDynInst::setRegOtherThread(unsigned reg_idx, const MiscReg &val, int tid)
+{
+ if (tid == -1) {
+ tid = TheISA::getTargetThread(this->cpu->tcBase(threadNumber));
+ }
+
+ if (reg_idx < FP_Base_DepTag) { // Integer Register File
+ this->cpu->setIntReg(reg_idx, val, tid);
+ } else if (reg_idx < Ctrl_Base_DepTag) { // Float Register File
+ reg_idx -= FP_Base_DepTag;
+ this->cpu->setFloatRegBits(reg_idx, val, tid);
+ } else {
+ reg_idx -= Ctrl_Base_DepTag;
+ this->cpu->setMiscReg(reg_idx, val, tid); // Misc. Register File
+ }
+}
+
+void
+InOrderDynInst::deallocateContext(int thread_num)
+{
+ this->cpu->deallocateContext(thread_num);
+}
+
+void
+InOrderDynInst::enableVirtProcElement(unsigned vpe)
+{
+ this->cpu->enableVirtProcElement(vpe);
+}
+
+void
+InOrderDynInst::disableVirtProcElement(unsigned vpe)
+{
+ this->cpu->disableVirtProcElement(threadNumber, vpe);
+}
+
+void
+InOrderDynInst::enableMultiThreading(unsigned vpe)
+{
+ this->cpu->enableMultiThreading(vpe);
+}
+
+void
+InOrderDynInst::disableMultiThreading(unsigned vpe)
+{
+ this->cpu->disableMultiThreading(threadNumber, vpe);
+}
+
+void
+InOrderDynInst::setThreadRescheduleCondition(uint32_t cond)
+{
+ this->cpu->setThreadRescheduleCondition(cond);
+}
+
+template<class T>
+inline Fault
+InOrderDynInst::read(Addr addr, T &data, unsigned flags)
+{
+ return cpu->read(this);
+}
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+template
+Fault
+InOrderDynInst::read(Addr addr, uint64_t &data, unsigned flags);
+
+template
+Fault
+InOrderDynInst::read(Addr addr, uint32_t &data, unsigned flags);
+
+template
+Fault
+InOrderDynInst::read(Addr addr, uint16_t &data, unsigned flags);
+
+template
+Fault
+InOrderDynInst::read(Addr addr, uint8_t &data, unsigned flags);
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
+
+template<>
+Fault
+InOrderDynInst::read(Addr addr, double &data, unsigned flags)
+{
+ return read(addr, *(uint64_t*)&data, flags);
+}
+
+template<>
+Fault
+InOrderDynInst::read(Addr addr, float &data, unsigned flags)
+{
+ return read(addr, *(uint32_t*)&data, flags);
+}
+
+template<>
+Fault
+InOrderDynInst::read(Addr addr, int32_t &data, unsigned flags)
+{
+ return read(addr, (uint32_t&)data, flags);
+}
+
+template<class T>
+inline Fault
+InOrderDynInst::write(T data, Addr addr, unsigned flags, uint64_t *res)
+{
+ //memcpy(memData, gtoh(data), sizeof(T));
+ storeData = data;
+
+ DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] Setting store data to %#x.\n",
+ threadNumber, seqNum, memData);
+ return cpu->write(this);
+}
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+template
+Fault
+InOrderDynInst::write(uint64_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+template
+Fault
+InOrderDynInst::write(uint32_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+template
+Fault
+InOrderDynInst::write(uint16_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+template
+Fault
+InOrderDynInst::write(uint8_t data, Addr addr,
+ unsigned flags, uint64_t *res);
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
+
+template<>
+Fault
+InOrderDynInst::write(double data, Addr addr, unsigned flags, uint64_t *res)
+{
+ return write(*(uint64_t*)&data, addr, flags, res);
+}
+
+template<>
+Fault
+InOrderDynInst::write(float data, Addr addr, unsigned flags, uint64_t *res)
+{
+ return write(*(uint32_t*)&data, addr, flags, res);
+}
+
+
+template<>
+Fault
+InOrderDynInst::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
+{
+ return write((uint32_t)data, addr, flags, res);
+}
+
+
+void
+InOrderDynInst::dump()
+{
+ cprintf("T%d : %#08d `", threadNumber, PC);
+ cout << staticInst->disassemble(PC);
+ cprintf("'\n");
+}
+
+void
+InOrderDynInst::dump(std::string &outstring)
+{
+ std::ostringstream s;
+ s << "T" << threadNumber << " : 0x" << PC << " "
+ << staticInst->disassemble(PC);
+
+ outstring = s.str();
+}
+
+
+#define NOHASH
+#ifndef NOHASH
+
+#include "base/hashmap.hh"
+
+unsigned int MyHashFunc(const InOrderDynInst *addr)
+{
+ unsigned a = (unsigned)addr;
+ unsigned hash = (((a >> 14) ^ ((a >> 2) & 0xffff))) & 0x7FFFFFFF;
+
+ return hash;
+}
+
+typedef m5::hash_map<const InOrderDynInst *, const InOrderDynInst *, MyHashFunc>
+my_hash_t;
+
+my_hash_t thishash;
+#endif
+
+#ifdef DEBUG
+
+void
+InOrderDynInst::dumpSNList()
+{
+ std::set<InstSeqNum>::iterator sn_it = cpu->snList.begin();
+
+ int count = 0;
+ while (sn_it != cpu->snList.end()) {
+ cprintf("%i: [sn:%lli] not destroyed\n", count, (*sn_it));
+ count++;
+ sn_it++;
+ }
+}
+#endif
+
diff --git a/src/cpu/inorder/inorder_dyn_inst.hh b/src/cpu/inorder/inorder_dyn_inst.hh
new file mode 100644
index 000000000..9f52f954f
--- /dev/null
+++ b/src/cpu/inorder/inorder_dyn_inst.hh
@@ -0,0 +1,975 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * Copyright (c) 2004-2006 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Kevin Lim
+ * Korey Sewell
+ */
+
+#ifndef __CPU_INORDER_DYN_INST_HH__
+#define __CPU_INORDER_DYN_INST_HH__
+
+#include <bitset>
+#include <list>
+#include <string>
+
+#include "arch/faults.hh"
+#include "base/fast_alloc.hh"
+#include "base/trace.hh"
+#include "cpu/inorder/inorder_trace.hh"
+#include "config/full_system.hh"
+#include "cpu/thread_context.hh"
+#include "cpu/exetrace.hh"
+#include "cpu/inst_seq.hh"
+#include "cpu/op_class.hh"
+#include "cpu/static_inst.hh"
+#include "cpu/inorder/thread_state.hh"
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "mem/packet.hh"
+#include "sim/system.hh"
+
+/**
+ * @file
+ * Defines a dynamic instruction context for a inorder CPU model.
+ */
+
+// Forward declaration.
+class StaticInstPtr;
+class ResourceRequest;
+
+class InOrderDynInst : public FastAlloc, public RefCounted
+{
+ public:
+ // Binary machine instruction type.
+ typedef TheISA::MachInst MachInst;
+ // Extended machine instruction type
+ typedef TheISA::ExtMachInst ExtMachInst;
+ // Logical register index type.
+ typedef TheISA::RegIndex RegIndex;
+ // Integer register type.
+ typedef TheISA::IntReg IntReg;
+ // Floating point register type.
+ typedef TheISA::FloatReg FloatReg;
+ // Floating point register type.
+ typedef TheISA::MiscReg MiscReg;
+
+ typedef short int PhysRegIndex;
+
+ /** The refcounted DynInst pointer to be used. In most cases this is
+ * what should be used, and not DynInst*.
+ */
+ typedef RefCountingPtr<InOrderDynInst> DynInstPtr;
+
+ // The list of instructions iterator type.
+ typedef std::list<DynInstPtr>::iterator ListIt;
+
+ enum {
+ MaxInstSrcRegs = TheISA::MaxInstSrcRegs, /// Max source regs
+ MaxInstDestRegs = TheISA::MaxInstDestRegs, /// Max dest regs
+ };
+
+ public:
+ /** BaseDynInst constructor given a binary instruction.
+ * @param inst The binary instruction.
+ * @param PC The PC of the instruction.
+ * @param pred_PC The predicted next PC.
+ * @param seq_num The sequence number of the instruction.
+ * @param cpu Pointer to the instruction's CPU.
+ */
+ InOrderDynInst(ExtMachInst inst, Addr PC, Addr pred_PC, InstSeqNum seq_num,
+ InOrderCPU *cpu);
+
+ /** BaseDynInst constructor given a binary instruction.
+ * @param seq_num The sequence number of the instruction.
+ * @param cpu Pointer to the instruction's CPU.
+ * NOTE: Must set Binary Instrution through Member Function
+ */
+ InOrderDynInst(InOrderCPU *cpu, InOrderThreadState *state, InstSeqNum seq_num,
+ unsigned tid);
+
+ /** BaseDynInst constructor given a StaticInst pointer.
+ * @param _staticInst The StaticInst for this BaseDynInst.
+ */
+ InOrderDynInst(StaticInstPtr &_staticInst);
+
+ /** Skeleton Constructor. */
+ InOrderDynInst();
+
+ /** InOrderDynInst destructor. */
+ ~InOrderDynInst();
+
+ public:
+ /** The sequence number of the instruction. */
+ InstSeqNum seqNum;
+
+ /** The sequence number of the instruction. */
+ InstSeqNum bdelaySeqNum;
+
+ enum Status {
+ RegDepMapEntry, /// Instruction has been entered onto the RegDepMap
+ IqEntry, /// Instruction is in the IQ
+ RobEntry, /// Instruction is in the ROB
+ LsqEntry, /// Instruction is in the LSQ
+ Completed, /// Instruction has completed
+ ResultReady, /// Instruction has its result
+ CanIssue, /// Instruction can issue and execute
+ Issued, /// Instruction has issued
+ Executed, /// Instruction has executed
+ CanCommit, /// Instruction can commit
+ AtCommit, /// Instruction has reached commit
+ Committed, /// Instruction has committed
+ Squashed, /// Instruction is squashed
+ SquashedInIQ, /// Instruction is squashed in the IQ
+ SquashedInLSQ, /// Instruction is squashed in the LSQ
+ SquashedInROB, /// Instruction is squashed in the ROB
+ RecoverInst, /// Is a recover instruction
+ BlockingInst, /// Is a blocking instruction
+ ThreadsyncWait, /// Is a thread synchronization instruction
+ SerializeBefore, /// Needs to serialize on
+ /// instructions ahead of it
+ SerializeAfter, /// Needs to serialize instructions behind it
+ SerializeHandled, /// Serialization has been handled
+ NumStatus
+ };
+
+ /** The status of this BaseDynInst. Several bits can be set. */
+ std::bitset<NumStatus> status;
+
+ /** The thread this instruction is from. */
+ short threadNumber;
+
+ /** data address space ID, for loads & stores. */
+ short asid;
+
+ /** The virtual processor number */
+ short virtProcNumber;
+
+ /** The StaticInst used by this BaseDynInst. */
+ StaticInstPtr staticInst;
+
+ /** InstRecord that tracks this instructions. */
+ Trace::InOrderTraceRecord *traceData;
+
+ /** Pointer to the Impl's CPU object. */
+ InOrderCPU *cpu;
+
+ /** Pointer to the thread state. */
+ InOrderThreadState *thread;
+
+ /** The kind of fault this instruction has generated. */
+ Fault fault;
+
+ /** The memory request. */
+ Request *req;
+
+ /** Pointer to the data for the memory access. */
+ uint8_t *memData;
+
+ /** Data used for a store for operation. */
+ uint64_t loadData;
+
+ /** Data used for a store for operation. */
+ uint64_t storeData;
+
+ /** The resource schedule for this inst */
+ ThePipeline::ResSchedule resSched;
+
+ /** List of active resource requests for this instruction */
+ std::list<ResourceRequest*> reqList;
+
+ /** The effective virtual address (lds & stores only). */
+ Addr effAddr;
+
+ /** The effective physical address. */
+ Addr physEffAddr;
+
+ /** Effective virtual address for a copy source. */
+ Addr copySrcEffAddr;
+
+ /** Effective physical address for a copy source. */
+ Addr copySrcPhysEffAddr;
+
+ /** The memory request flags (from translation). */
+ unsigned memReqFlags;
+
+ /** How many source registers are ready. */
+ unsigned readyRegs;
+
+ /** An instruction src/dest has to be one of these types */
+ union InstValue {
+ uint64_t integer;
+ float fp;
+ double dbl;
+ };
+
+ /** Result of an instruction execution */
+ struct InstResult {
+ InstValue val;
+ Tick tick;
+ };
+
+ /** The source of the instruction; assumes for now that there's only one
+ * destination register.
+ */
+ InstValue instSrc[MaxInstSrcRegs];
+
+ /** The result of the instruction; assumes for now that there's only one
+ * destination register.
+ */
+ InstResult instResult[MaxInstDestRegs];
+
+ /** PC of this instruction. */
+ Addr PC;
+
+ /** Next non-speculative PC. It is not filled in at fetch, but rather
+ * once the target of the branch is truly known (either decode or
+ * execute).
+ */
+ Addr nextPC;
+
+ /** Next next non-speculative PC. It is not filled in at fetch, but rather
+ * once the target of the branch is truly known (either decode or
+ * execute).
+ */
+ Addr nextNPC;
+
+ /** Predicted next PC. */
+ Addr predPC;
+
+ /** Address to fetch from */
+ Addr fetchAddr;
+
+ /** Address to get/write data from/to */
+ Addr memAddr;
+
+ /** Whether or not the source register is ready.
+ * @todo: Not sure this should be here vs the derived class.
+ */
+ bool _readySrcRegIdx[MaxInstSrcRegs];
+
+ /** Physical register index of the destination registers of this
+ * instruction.
+ */
+ PhysRegIndex _destRegIdx[MaxInstDestRegs];
+
+ /** Physical register index of the source registers of this
+ * instruction.
+ */
+ PhysRegIndex _srcRegIdx[MaxInstSrcRegs];
+
+ /** Physical register index of the previous producers of the
+ * architected destinations.
+ */
+ PhysRegIndex _prevDestRegIdx[MaxInstDestRegs];
+
+ int nextStage;
+
+ /* vars to keep track of InstStage's - used for resource sched defn */
+ int nextInstStageNum;
+ ThePipeline::InstStage *currentInstStage;
+ std::list<ThePipeline::InstStage*> instStageList;
+
+ private:
+ /** Function to initialize variables in the constructors. */
+ void initVars();
+
+ public:
+ Tick memTime;
+
+ ////////////////////////////////////////////////////////////
+ //
+ // BASE INSTRUCTION INFORMATION.
+ //
+ ////////////////////////////////////////////////////////////
+ void setMachInst(ExtMachInst inst);
+
+ /** Sets the StaticInst. */
+ void setStaticInst(StaticInstPtr &static_inst);
+
+ /** Sets the sequence number. */
+ void setSeqNum(InstSeqNum seq_num) { seqNum = seq_num; }
+
+ /** Sets the ASID. */
+ void setASID(short addr_space_id) { asid = addr_space_id; }
+
+ /** Reads the thread id. */
+ short readTid() { return threadNumber; }
+
+ /** Sets the thread id. */
+ void setTid(unsigned tid) { threadNumber = tid; }
+
+ void setVpn(int id) { virtProcNumber = id; }
+
+ int readVpn() { return virtProcNumber; }
+
+ /** Sets the pointer to the thread state. */
+ void setThreadState(InOrderThreadState *state) { thread = state; }
+
+ /** Returns the thread context. */
+ ThreadContext *tcBase() { return thread->getTC(); }
+
+ /** Returns the fault type. */
+ Fault getFault() { return fault; }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // INSTRUCTION TYPES - Forward checks to StaticInst object.
+ //
+ ////////////////////////////////////////////////////////////
+ bool isNop() const { return staticInst->isNop(); }
+ bool isMemRef() const { return staticInst->isMemRef(); }
+ bool isLoad() const { return staticInst->isLoad(); }
+ bool isStore() const { return staticInst->isStore(); }
+ bool isStoreConditional() const
+ { return staticInst->isStoreConditional(); }
+ bool isInstPrefetch() const { return staticInst->isInstPrefetch(); }
+ bool isDataPrefetch() const { return staticInst->isDataPrefetch(); }
+ bool isCopy() const { return staticInst->isCopy(); }
+ bool isInteger() const { return staticInst->isInteger(); }
+ bool isFloating() const { return staticInst->isFloating(); }
+ bool isControl() const { return staticInst->isControl(); }
+ bool isCall() const { return staticInst->isCall(); }
+ bool isReturn() const { return staticInst->isReturn(); }
+ bool isDirectCtrl() const { return staticInst->isDirectCtrl(); }
+ bool isIndirectCtrl() const { return staticInst->isIndirectCtrl(); }
+ bool isCondCtrl() const { return staticInst->isCondCtrl(); }
+ bool isUncondCtrl() const { return staticInst->isUncondCtrl(); }
+ bool isCondDelaySlot() const { return staticInst->isCondDelaySlot(); }
+
+ bool isThreadSync() const { return staticInst->isThreadSync(); }
+ bool isSerializing() const { return staticInst->isSerializing(); }
+ bool isSerializeBefore() const
+ { return staticInst->isSerializeBefore() || status[SerializeBefore]; }
+ bool isSerializeAfter() const
+ { return staticInst->isSerializeAfter() || status[SerializeAfter]; }
+ bool isMemBarrier() const { return staticInst->isMemBarrier(); }
+ bool isWriteBarrier() const { return staticInst->isWriteBarrier(); }
+ bool isNonSpeculative() const { return staticInst->isNonSpeculative(); }
+ bool isQuiesce() const { return staticInst->isQuiesce(); }
+ bool isIprAccess() const { return staticInst->isIprAccess(); }
+ bool isUnverifiable() const { return staticInst->isUnverifiable(); }
+
+ /////////////////////////////////////////////
+ //
+ // RESOURCE SCHEDULING
+ //
+ /////////////////////////////////////////////
+
+ void setNextStage(int stage_num) { nextStage = stage_num; }
+ int getNextStage() { return nextStage; }
+
+ ThePipeline::InstStage *addStage();
+ ThePipeline::InstStage *addStage(int stage);
+ ThePipeline::InstStage *currentStage() { return currentInstStage; }
+ void deleteStages();
+
+ /** Add A Entry To Reource Schedule */
+ void addToSched(ThePipeline::ScheduleEntry* sched_entry)
+ { resSched.push(sched_entry); }
+
+
+ /** Print Resource Schedule */
+ void printSched()
+ {
+ using namespace ThePipeline;
+
+ ResSchedule tempSched;
+ std::cerr << "\tInst. Res. Schedule: ";
+ while (!resSched.empty()) {
+ std::cerr << '\t' << resSched.top()->stageNum << "-"
+ << resSched.top()->resNum << ", ";
+
+ tempSched.push(resSched.top());
+ resSched.pop();
+ }
+
+ std::cerr << std::endl;
+ resSched = tempSched;
+ }
+
+ /** Return Next Resource Stage To Be Used */
+ int nextResStage()
+ {
+ if (resSched.empty())
+ return -1;
+ else
+ return resSched.top()->stageNum;
+ }
+
+
+ /** Return Next Resource To Be Used */
+ int nextResource()
+ {
+ if (resSched.empty())
+ return -1;
+ else
+ return resSched.top()->resNum;
+ }
+
+ /** Remove & Deallocate a schedule entry */
+ void popSchedEntry()
+ {
+ if (!resSched.empty()) {
+ ThePipeline::ScheduleEntry* sked = resSched.top();
+ resSched.pop();
+ delete sked;
+ }
+ }
+
+ /** Release a Resource Request (Currently Unused) */
+ void releaseReq(ResourceRequest* req);
+
+ ////////////////////////////////////////////
+ //
+ // INSTRUCTION EXECUTION
+ //
+ ////////////////////////////////////////////
+ /** Returns the opclass of this instruction. */
+ OpClass opClass() const { return staticInst->opClass(); }
+
+ /** Executes the instruction.*/
+ Fault execute();
+
+ unsigned curResSlot;
+
+ unsigned getCurResSlot() { return curResSlot; }
+
+ void setCurResSlot(unsigned slot_num) { curResSlot = slot_num; }
+
+ /** Calls a syscall. */
+ void syscall(int64_t callnum);
+ void prefetch(Addr addr, unsigned flags);
+ void writeHint(Addr addr, int size, unsigned flags);
+ Fault copySrcTranslate(Addr src);
+ Fault copy(Addr dest);
+
+ ////////////////////////////////////////////////////////////
+ //
+ // MULTITHREADING INTERFACE TO CPU MODELS
+ //
+ ////////////////////////////////////////////////////////////
+ virtual void deallocateContext(int thread_num);
+
+ virtual void enableVirtProcElement(unsigned vpe);
+ virtual void disableVirtProcElement(unsigned vpe);
+
+ virtual void enableMultiThreading(unsigned vpe);
+ virtual void disableMultiThreading(unsigned vpe);
+
+ virtual void setThreadRescheduleCondition(uint32_t cond);
+
+ ////////////////////////////////////////////////////////////
+ //
+ // PROGRAM COUNTERS - PC/NPC/NPC
+ //
+ ////////////////////////////////////////////////////////////
+ /** Read the PC of this instruction. */
+ const Addr readPC() const { return PC; }
+
+ /** Sets the PC of this instruction. */
+ void setPC(Addr pc) { PC = pc; }
+
+ /** Returns the next PC. This could be the speculative next PC if it is
+ * called prior to the actual branch target being calculated.
+ */
+ Addr readNextPC() { return nextPC; }
+
+ /** Set the next PC of this instruction (its actual target). */
+ void setNextPC(uint64_t val) { nextPC = val; }
+
+ /** Returns the next NPC. This could be the speculative next NPC if it is
+ * called prior to the actual branch target being calculated.
+ */
+ Addr readNextNPC() { return nextNPC; }
+
+ /** Set the next PC of this instruction (its actual target). */
+ void setNextNPC(uint64_t val) { nextNPC = val; }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // BRANCH PREDICTION
+ //
+ ////////////////////////////////////////////////////////////
+ /** Set the predicted target of this current instruction. */
+ void setPredTarg(Addr predicted_PC) { predPC = predicted_PC; }
+
+ /** Returns the predicted target of the branch. */
+ Addr readPredTarg() { return predPC; }
+
+ /** Returns whether the instruction was predicted taken or not. */
+ bool predTaken() { return predictTaken; }
+
+ /** Returns whether the instruction mispredicted. */
+ bool mispredicted()
+ {
+ // Special case since a not-taken, cond. delay slot, effectively
+ // nullifies the delay slot instruction
+ if (isCondDelaySlot() && !predictTaken) {
+ return predPC != nextPC;
+ } else {
+ return predPC != nextNPC;
+ }
+ }
+
+ /** Returns whether the instruction mispredicted. */
+ bool mistargeted() { return predPC != nextNPC; }
+
+ /** Returns the branch target address. */
+ Addr branchTarget() const { return staticInst->branchTarget(PC); }
+
+ /** Checks whether or not this instruction has had its branch target
+ * calculated yet. For now it is not utilized and is hacked to be
+ * always false.
+ * @todo: Actually use this instruction.
+ */
+ bool doneTargCalc() { return false; }
+
+ void setBranchPred(bool prediction) { predictTaken = prediction; }
+
+ int squashingStage;
+
+ bool predictTaken;
+
+ bool procDelaySlotOnMispred;
+
+ ////////////////////////////////////////////
+ //
+ // MEMORY ACCESS
+ //
+ ////////////////////////////////////////////
+ /**
+ * Does a read to a given address.
+ * @param addr The address to read.
+ * @param data The read's data is written into this parameter.
+ * @param flags The request's flags.
+ * @return Returns any fault due to the read.
+ */
+ template <class T>
+ Fault read(Addr addr, T &data, unsigned flags);
+
+ /**
+ * Does a write to a given address.
+ * @param data The data to be written.
+ * @param addr The address to write to.
+ * @param flags The request's flags.
+ * @param res The result of the write (for load locked/store conditionals).
+ * @return Returns any fault due to the write.
+ */
+ template <class T>
+ Fault write(T data, Addr addr, unsigned flags,
+ uint64_t *res);
+
+ /** Initiates a memory access - Calculate Eff. Addr & Initiate Memory Access
+ * Only valid for memory operations.
+ */
+ Fault initiateAcc();
+
+ /** Completes a memory access - Only valid for memory operations. */
+ Fault completeAcc(Packet *pkt);
+
+ /** Calculates Eff. Addr. part of a memory instruction. */
+ Fault calcEA();
+
+ /** Read Effective Address from instruction & do memory access */
+ Fault memAccess();
+
+ RequestPtr memReq;
+
+ bool memAddrReady;
+
+ bool validMemAddr()
+ { return memAddrReady; }
+
+ void setMemAddr(Addr addr)
+ { memAddr = addr; memAddrReady = true;}
+
+ void unsetMemAddr()
+ { memAddrReady = false;}
+
+ Addr getMemAddr()
+ { return memAddr; }
+
+ int getMemAccSize() { return staticInst->memAccSize(this); }
+
+ int getMemFlags() { return staticInst->memAccFlags(); }
+
+ /** Sets the effective address. */
+ void setEA(Addr &ea) { instEffAddr = ea; eaCalcDone = true; }
+
+ /** Returns the effective address. */
+ const Addr &getEA() const { return instEffAddr; }
+
+ /** Returns whether or not the eff. addr. calculation has been completed. */
+ bool doneEACalc() { return eaCalcDone; }
+
+ /** Returns whether or not the eff. addr. source registers are ready.
+ * Assume that src registers 1..n-1 are the ones that the
+ * EA calc depends on. (i.e. src reg 0 is the source of the data to be
+ * stored)
+ */
+ bool eaSrcsReady()
+ {
+ for (int i = 1; i < numSrcRegs(); ++i) {
+ if (!_readySrcRegIdx[i])
+ return false;
+ }
+
+ return true;
+ }
+
+ //////////////////////////////////////////////////
+ //
+ // SOURCE-DESTINATION REGISTER INDEXING
+ //
+ //////////////////////////////////////////////////
+ /** Returns the number of source registers. */
+ int8_t numSrcRegs() const { return staticInst->numSrcRegs(); }
+
+ /** Returns the number of destination registers. */
+ int8_t numDestRegs() const { return staticInst->numDestRegs(); }
+
+ // the following are used to track physical register usage
+ // for machines with separate int & FP reg files
+ int8_t numFPDestRegs() const { return staticInst->numFPDestRegs(); }
+ int8_t numIntDestRegs() const { return staticInst->numIntDestRegs(); }
+
+ /** Returns the logical register index of the i'th destination register. */
+ RegIndex destRegIdx(int i) const { return staticInst->destRegIdx(i); }
+
+ /** Returns the logical register index of the i'th source register. */
+ RegIndex srcRegIdx(int i) const { return staticInst->srcRegIdx(i); }
+
+ //////////////////////////////////////////////////
+ //
+ // RENAME/PHYSICAL REGISTER FILE SUPPORT
+ //
+ //////////////////////////////////////////////////
+ /** Returns the physical register index of the i'th destination
+ * register.
+ */
+ PhysRegIndex renamedDestRegIdx(int idx) const
+ {
+ return _destRegIdx[idx];
+ }
+
+ /** Returns the physical register index of the i'th source register. */
+ PhysRegIndex renamedSrcRegIdx(int idx) const
+ {
+ return _srcRegIdx[idx];
+ }
+
+ /** Returns the physical register index of the previous physical register
+ * that remapped to the same logical register index.
+ */
+ PhysRegIndex prevDestRegIdx(int idx) const
+ {
+ return _prevDestRegIdx[idx];
+ }
+
+ /** Returns if a source register is ready. */
+ bool isReadySrcRegIdx(int idx) const
+ {
+ return this->_readySrcRegIdx[idx];
+ }
+
+ /** Records that one of the source registers is ready. */
+ void markSrcRegReady()
+ {
+ if (++readyRegs == numSrcRegs()) {
+ status.set(CanIssue);
+ }
+ }
+
+ /** Marks a specific register as ready. */
+ void markSrcRegReady(RegIndex src_idx)
+ {
+ _readySrcRegIdx[src_idx] = true;
+
+ markSrcRegReady();
+ }
+
+ /** Renames a destination register to a physical register. Also records
+ * the previous physical register that the logical register mapped to.
+ */
+ void renameDestReg(int idx,
+ PhysRegIndex renamed_dest,
+ PhysRegIndex previous_rename)
+ {
+ _destRegIdx[idx] = renamed_dest;
+ _prevDestRegIdx[idx] = previous_rename;
+ }
+
+ /** Renames a source logical register to the physical register which
+ * has/will produce that logical register's result.
+ * @todo: add in whether or not the source register is ready.
+ */
+ void renameSrcReg(int idx, PhysRegIndex renamed_src)
+ {
+ _srcRegIdx[idx] = renamed_src;
+ }
+
+
+ PhysRegIndex readDestRegIdx(int idx)
+ {
+ return _destRegIdx[idx];
+ }
+
+ void setDestRegIdx(int idx, PhysRegIndex dest_idx)
+ {
+ _destRegIdx[idx] = dest_idx;
+ }
+
+ int getDestIdxNum(PhysRegIndex dest_idx)
+ {
+ for (int i=0; i < staticInst->numDestRegs(); i++) {
+ if (_destRegIdx[i] == dest_idx)
+ return i;
+ }
+
+ return -1;
+ }
+
+ PhysRegIndex readSrcRegIdx(int idx)
+ {
+ return _srcRegIdx[idx];
+ }
+
+ void setSrcRegIdx(int idx, PhysRegIndex src_idx)
+ {
+ _srcRegIdx[idx] = src_idx;
+ }
+
+ int getSrcIdxNum(PhysRegIndex src_idx)
+ {
+ for (int i=0; i < staticInst->numSrcRegs(); i++) {
+ if (_srcRegIdx[i] == src_idx)
+ return i;
+ }
+
+ return -1;
+ }
+
+ ////////////////////////////////////////////////////
+ //
+ // SOURCE-DESTINATION REGISTER VALUES
+ //
+ ////////////////////////////////////////////////////
+
+ /** Functions that sets an integer or floating point
+ * source register to a value. */
+ void setIntSrc(int idx, uint64_t val);
+ void setFloatSrc(int idx, FloatReg val, int width = 32);
+ void setFloatRegBitsSrc(int idx, uint64_t val);
+
+ uint64_t* getIntSrcPtr(int idx) { return &instSrc[idx].integer; }
+ uint64_t readIntSrc(int idx) { return instSrc[idx].integer; }
+
+ /** These Instructions read a integer/float/misc. source register
+ * value in the instruction. The instruction's execute function will
+ * call these and it is the interface that is used by the ISA descr.
+ * language (which is why the name isnt readIntSrc(...)) Note: That
+ * the source reg. value is set using the setSrcReg() function.
+ */
+ IntReg readIntRegOperand(const StaticInst *si, int idx, unsigned tid=0);
+ FloatReg readFloatRegOperand(const StaticInst *si, int idx,
+ int width = TheISA::SingleWidth);
+ FloatRegBits readFloatRegOperandBits(const StaticInst *si, int idx,
+ int width = TheISA::SingleWidth);
+ MiscReg readMiscReg(int misc_reg);
+ MiscReg readMiscRegNoEffect(int misc_reg);
+ MiscReg readMiscRegOperand(const StaticInst *si, int idx);
+ MiscReg readMiscRegOperandNoEffect(const StaticInst *si, int idx);
+
+ /** Returns the result value instruction. */
+ uint64_t readIntResult(int idx) { return instResult[idx].val.integer; }
+ float readFloatResult(int idx) { return instResult[idx].val.fp; }
+ double readDoubleResult(int idx) { return instResult[idx].val.dbl; }
+ Tick readResultTime(int idx) { return instResult[idx].tick; }
+
+ uint64_t* getIntResultPtr(int idx) { return &instResult[idx].val.integer; }
+
+ /** This is the interface that an instruction will use to write
+ * it's destination register.
+ */
+ void setIntRegOperand(const StaticInst *si, int idx, IntReg val);
+ void setFloatRegOperand(const StaticInst *si, int idx, FloatReg val,
+ int width = TheISA::SingleWidth);
+ void setFloatRegOperandBits(const StaticInst *si, int idx, FloatRegBits val,
+ int width = TheISA::SingleWidth);
+ void setMiscReg(int misc_reg, const MiscReg &val);
+ void setMiscRegNoEffect(int misc_reg, const MiscReg &val);
+ void setMiscRegOperand(const StaticInst *si, int idx, const MiscReg &val);
+ void setMiscRegOperandNoEffect(const StaticInst *si, int idx, const MiscReg &val);
+
+ virtual uint64_t readRegOtherThread(unsigned idx, int tid = -1);
+ virtual void setRegOtherThread(unsigned idx, const uint64_t &val, int tid = -1);
+
+ //////////////////////////////////////////////////////////////
+ //
+ // INSTRUCTION STATUS FLAGS (READ/SET)
+ //
+ //////////////////////////////////////////////////////////////
+ /** Sets this instruction as entered on the CPU Reg Dep Map */
+ void setRegDepEntry() { status.set(RegDepMapEntry); }
+
+ /** Returns whether or not the entry is on the CPU Reg Dep Map */
+ bool isRegDepEntry() const { return status[RegDepMapEntry]; }
+
+ /** Sets this instruction as completed. */
+ void setCompleted() { status.set(Completed); }
+
+ /** Returns whether or not this instruction is completed. */
+ bool isCompleted() const { return status[Completed]; }
+
+ /** Marks the result as ready. */
+ void setResultReady() { status.set(ResultReady); }
+
+ /** Returns whether or not the result is ready. */
+ bool isResultReady() const { return status[ResultReady]; }
+
+ /** Sets this instruction as ready to issue. */
+ void setCanIssue() { status.set(CanIssue); }
+
+ /** Returns whether or not this instruction is ready to issue. */
+ bool readyToIssue() const { return status[CanIssue]; }
+
+ /** Sets this instruction as issued from the IQ. */
+ void setIssued() { status.set(Issued); }
+
+ /** Returns whether or not this instruction has issued. */
+ bool isIssued() const { return status[Issued]; }
+
+ /** Sets this instruction as executed. */
+ void setExecuted() { status.set(Executed); }
+
+ /** Returns whether or not this instruction has executed. */
+ bool isExecuted() const { return status[Executed]; }
+
+ /** Sets this instruction as ready to commit. */
+ void setCanCommit() { status.set(CanCommit); }
+
+ /** Clears this instruction as being ready to commit. */
+ void clearCanCommit() { status.reset(CanCommit); }
+
+ /** Returns whether or not this instruction is ready to commit. */
+ bool readyToCommit() const { return status[CanCommit]; }
+
+ void setAtCommit() { status.set(AtCommit); }
+
+ bool isAtCommit() { return status[AtCommit]; }
+
+ /** Sets this instruction as committed. */
+ void setCommitted() { status.set(Committed); }
+
+ /** Returns whether or not this instruction is committed. */
+ bool isCommitted() const { return status[Committed]; }
+
+ /** Sets this instruction as squashed. */
+ void setSquashed() { status.set(Squashed); }
+
+ /** Returns whether or not this instruction is squashed. */
+ bool isSquashed() const { return status[Squashed]; }
+
+ /** Temporarily sets this instruction as a serialize before instruction. */
+ void setSerializeBefore() { status.set(SerializeBefore); }
+
+ /** Clears the serializeBefore part of this instruction. */
+ void clearSerializeBefore() { status.reset(SerializeBefore); }
+
+ /** Checks if this serializeBefore is only temporarily set. */
+ bool isTempSerializeBefore() { return status[SerializeBefore]; }
+
+ /** Temporarily sets this instruction as a serialize after instruction. */
+ void setSerializeAfter() { status.set(SerializeAfter); }
+
+ /** Clears the serializeAfter part of this instruction.*/
+ void clearSerializeAfter() { status.reset(SerializeAfter); }
+
+ /** Checks if this serializeAfter is only temporarily set. */
+ bool isTempSerializeAfter() { return status[SerializeAfter]; }
+
+ /** Sets the serialization part of this instruction as handled. */
+ void setSerializeHandled() { status.set(SerializeHandled); }
+
+ /** Checks if the serialization part of this instruction has been
+ * handled. This does not apply to the temporary serializing
+ * state; it only applies to this instruction's own permanent
+ * serializing state.
+ */
+ bool isSerializeHandled() { return status[SerializeHandled]; }
+
+ private:
+ /** Instruction effective address.
+ * @todo: Consider if this is necessary or not.
+ */
+ Addr instEffAddr;
+
+ /** Whether or not the effective address calculation is completed.
+ * @todo: Consider if this is necessary or not.
+ */
+ bool eaCalcDone;
+
+ public:
+ /** Whether or not the memory operation is done. */
+ bool memOpDone;
+
+ public:
+ /** Load queue index. */
+ int16_t lqIdx;
+
+ /** Store queue index. */
+ int16_t sqIdx;
+
+ /** Iterator pointing to this BaseDynInst in the list of all insts. */
+ ListIt instListIt;
+
+ /** Returns iterator to this instruction in the list of all insts. */
+ ListIt &getInstListIt() { return instListIt; }
+
+ /** Sets iterator for this instruction in the list of all insts. */
+ void setInstListIt(ListIt _instListIt) { instListIt = _instListIt; }
+
+ /** Count of total number of dynamic instructions. */
+ static int instcount;
+
+#ifdef DEBUG
+ void dumpSNList();
+#endif
+
+ /** Dumps out contents of this BaseDynInst. */
+ void dump();
+
+ /** Dumps out contents of this BaseDynInst into given string. */
+ void dump(std::string &outstring);
+
+
+ //inline int curCount() { return curCount(); }
+};
+
+
+#endif // __CPU_BASE_DYN_INST_HH__
diff --git a/src/cpu/inorder/inorder_trace.cc b/src/cpu/inorder/inorder_trace.cc
new file mode 100644
index 000000000..f12a1b7a9
--- /dev/null
+++ b/src/cpu/inorder/inorder_trace.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ */
+
+#include <iomanip>
+
+#include "cpu/exetrace.hh"
+#include "cpu/inorder/inorder_trace.hh"
+#include "cpu/static_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/thread_context.hh"
+#include "params/InOrderTrace.hh"
+
+using namespace std;
+using namespace TheISA;
+
+namespace Trace {
+
+inline void
+Trace::InOrderTraceRecord::dumpTicks(std::ostream &outs)
+{
+ if (!stageTrace) {
+ ccprintf(outs, "%7d: ", when);
+ } else {
+ ccprintf(outs, "");
+ for (int i=0; i < stageCycle.size(); i++) {
+ if (i < stageCycle.size() - 1)
+ outs << dec << stageCycle[i] << "-";
+ else
+ outs << dec << stageCycle[i] << ":";
+ }
+ }
+}
+
+InOrderTraceRecord *
+InOrderTrace::getInstRecord(unsigned num_stages, bool stage_tracing,
+ ThreadContext *tc)
+{
+ if (!IsOn(ExecEnable))
+ return NULL;
+
+ if (!Trace::enabled)
+ return NULL;
+
+ return new InOrderTraceRecord(num_stages, stage_tracing, tc);
+}
+
+InOrderTraceRecord *
+InOrderTrace::getInstRecord(Tick when, ThreadContext *tc,
+ const StaticInstPtr staticInst, Addr pc,
+ const StaticInstPtr macroStaticInst, MicroPC upc)
+{
+ return new InOrderTraceRecord(ThePipeline::NumStages, true, tc);
+}
+
+/* namespace Trace */ }
+
+////////////////////////////////////////////////////////////////////////
+//
+// ExeTracer Simulation Object
+//
+Trace::InOrderTrace *
+InOrderTraceParams::create()
+{
+ return new Trace::InOrderTrace(this);
+};
+
diff --git a/src/cpu/inorder/inorder_trace.hh b/src/cpu/inorder/inorder_trace.hh
new file mode 100644
index 000000000..4338b438c
--- /dev/null
+++ b/src/cpu/inorder/inorder_trace.hh
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ */
+
+#ifndef __INORDERTRACE_HH__
+#define __INORDERTRACE_HH__
+
+#include "base/trace.hh"
+#include "cpu/static_inst.hh"
+#include "sim/host.hh"
+#include "sim/insttracer.hh"
+#include "params/InOrderTrace.hh"
+#include "cpu/exetrace.hh"
+
+class ThreadContext;
+
+
+namespace Trace {
+
+class InOrderTraceRecord : public ExeTracerRecord
+{
+ public:
+ InOrderTraceRecord(unsigned num_stages, bool _stage_tracing,
+ ThreadContext *_thread, bool spec = false)
+ : ExeTracerRecord(0, _thread, NULL, 0, spec)
+ {
+ stageTrace = _stage_tracing;
+ stageCycle.resize(num_stages);
+ }
+
+ // Trace stage-by-stage execution of instructions.
+ bool stageTrace;
+ std::vector<Tick> stageCycle;
+
+ void dumpTicks(std::ostream &outs);
+
+ void
+ setStageCycle(int num_stage, Tick cur_cycle)
+ {
+ if (stageTrace) {
+ stageCycle[num_stage] = cur_cycle;
+ } else {
+ when = cur_cycle;
+ }
+ }
+
+ void
+ setStaticInst(const StaticInstPtr &_staticInst)
+ {
+ staticInst = _staticInst;
+ }
+ void setPC(Addr _pc) { PC = _pc; }
+};
+
+class InOrderTrace : public InstTracer
+{
+ public:
+ InOrderTrace(const InOrderTraceParams *p) : InstTracer(p)
+ {}
+
+ InOrderTraceRecord *
+ getInstRecord(unsigned num_stages, bool stage_tracing, ThreadContext *tc);
+
+ virtual InOrderTraceRecord *getInstRecord(Tick when, ThreadContext *tc,
+ const StaticInstPtr staticInst, Addr pc,
+ const StaticInstPtr macroStaticInst = NULL, MicroPC upc = 0);
+};
+
+/* namespace Trace */ }
+
+#endif // __EXETRACE_HH__
diff --git a/src/cpu/inorder/params.hh b/src/cpu/inorder/params.hh
new file mode 100644
index 000000000..67f8f47f0
--- /dev/null
+++ b/src/cpu/inorder/params.hh
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ */
+
+#ifndef __CPU_INORDER_PARAMS_HH__
+#define __CPU_INORDER_PARAMS_HH__
+
+#include "cpu/base.hh"
+
+//Forward declarations
+class FunctionalMemory;
+class Process;
+class MemObject;
+class MemInterface;
+
+/**
+ * This file defines the parameters that will be used for the InOrderCPU.
+ * This must be defined externally so that the Impl can have a params class
+ * defined that it can pass to all of the individual stages.
+ */
+
+class InOrderParams : public BaseCPU::Params
+{
+ public:
+
+ // Workloads
+#if !FULL_SYSTEM
+ std::vector<Process *> workload;
+ Process *process;
+#endif // FULL_SYSTEM
+
+ //
+ // Memory System/Caches
+ //
+ unsigned cachePorts;
+ std::string fetchMemPort;
+ std::string dataMemPort;
+
+ //
+ // Branch predictor (BP & BTB)
+ //
+ std::string predType;
+ unsigned localPredictorSize;
+ unsigned localCtrBits;
+ unsigned localHistoryTableSize;
+ unsigned localHistoryBits;
+ unsigned globalPredictorSize;
+ unsigned globalCtrBits;
+ unsigned globalHistoryBits;
+ unsigned choicePredictorSize;
+ unsigned choiceCtrBits;
+ unsigned BTBEntries;
+ unsigned BTBTagSize;
+ unsigned RASSize;
+
+ // Pipeline Parameters
+ unsigned stageWidth;
+
+ // InOrderCPU Simulation Parameters
+ unsigned instShiftAmt;
+ unsigned activity;
+ unsigned deferRegistration;
+
+ //
+ // Memory Parameters
+ //
+ unsigned memBlockSize;
+
+ //
+ // Multiply Divide Unit
+ //
+ // @NOTE: If >1 MDU is needed and each MDU is to use varying parametesr,
+ // then MDU must be defined as its own SimObject so that an arbitrary # can
+ // be defined with different parameters
+ /** Latency & Repeat Rate for Multiply Insts */
+ unsigned multLatency;
+ unsigned multRepeatRate;
+
+ /** Latency & Repeat Rate for 8-bit Divide Insts */
+ unsigned div8Latency;
+ unsigned div8RepeatRate;
+
+ /** Latency & Repeat Rate for 16-bit Divide Insts */
+ unsigned div16Latency;
+ unsigned div16RepeatRate;
+
+ /** Latency & Repeat Rate for 24-bit Divide Insts */
+ unsigned div24Latency;
+ unsigned div24RepeatRate;
+
+ /** Latency & Repeat Rate for 32-bit Divide Insts */
+ unsigned div32Latency;
+ unsigned div32RepeatRate;
+
+
+};
+
+#endif // __CPU_O3_CPU_INORDER_PARAMS_HH__
diff --git a/src/cpu/inorder/pipeline_stage.cc b/src/cpu/inorder/pipeline_stage.cc
new file mode 100644
index 000000000..547b42537
--- /dev/null
+++ b/src/cpu/inorder/pipeline_stage.cc
@@ -0,0 +1,1033 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "base/str.hh"
+#include "cpu/inorder/pipeline_stage.hh"
+#include "cpu/inorder/resource_pool.hh"
+#include "cpu/inorder/cpu.hh"
+
+using namespace std;
+using namespace ThePipeline;
+
+PipelineStage::PipelineStage(Params *params, unsigned stage_num)
+ : numThreads(ThePipeline::MaxThreads)
+{
+ stageNum = stage_num;
+ stageWidth = ThePipeline::StageWidth;
+
+ _status = Inactive;
+
+ prevStageValid = false;
+ nextStageValid = false;
+
+ // Init. structures
+ for(int tid=0; tid < numThreads; tid++) {
+ stageStatus[tid] = Idle;
+
+ for (int stNum = 0; stNum < NumStages; stNum++) {
+ stalls[tid].stage[stNum] = false;
+ }
+ stalls[tid].resources.clear();
+
+ if (stageNum < BackEndStartStage)
+ lastStallingStage[tid] = BackEndStartStage - 1;
+ else
+ lastStallingStage[tid] = NumStages - 1;
+ }
+
+ stageBufferMax = ThePipeline::interStageBuffSize[stage_num];
+}
+
+
+void
+PipelineStage::init(Params *params, unsigned stage_num)
+{
+ stageNum = stage_num;
+ stageWidth = ThePipeline::StageWidth;
+
+ _status = Inactive;
+
+ numThreads = ThePipeline::MaxThreads;
+
+ prevStageValid = false;
+ nextStageValid = false;
+
+ // Init. structures
+ for(int tid=0; tid < numThreads; tid++) {
+ stageStatus[tid] = Idle;
+
+ for (int stNum = 0; stNum < NumStages; stNum++) {
+ stalls[tid].stage[stNum] = false;
+ }
+ stalls[tid].resources.clear();
+
+ if (stageNum < BackEndStartStage)
+ lastStallingStage[tid] = BackEndStartStage - 1;
+ else
+ lastStallingStage[tid] = NumStages - 1;
+ }
+
+ stageBufferMax = ThePipeline::interStageBuffSize[stage_num];
+}
+
+
+std::string
+PipelineStage::name() const
+{
+ return cpu->name() + ".stage-" + to_string(stageNum);
+}
+
+
+void
+PipelineStage::regStats()
+{
+/* stageIdleCycles
+ .name(name() + ".IdleCycles")
+ .desc("Number of cycles stage is idle")
+ .prereq(stageIdleCycles);
+ stageBlockedCycles
+ .name(name() + ".BlockedCycles")
+ .desc("Number of cycles stage is blocked")
+ .prereq(stageBlockedCycles);
+ stageRunCycles
+ .name(name() + ".RunCycles")
+ .desc("Number of cycles stage is running")
+ .prereq(stageRunCycles);
+ stageUnblockCycles
+ .name(name() + ".UnblockCycles")
+ .desc("Number of cycles stage is unblocking")
+ .prereq(stageUnblockCycles);
+ stageSquashCycles
+ .name(name() + ".SquashCycles")
+ .desc("Number of cycles stage is squashing")
+ .prereq(stageSquashCycles);
+ stageProcessedInsts
+ .name(name() + ".ProcessedInsts")
+ .desc("Number of instructions handled by stage")
+ .prereq(stageProcessedInsts);
+ stageSquashedInsts
+ .name(name() + ".SquashedInsts")
+ .desc("Number of squashed instructions handled by stage")
+ .prereq(stageSquashedInsts);*/
+}
+
+
+void
+PipelineStage::setCPU(InOrderCPU *cpu_ptr)
+{
+ cpu = cpu_ptr;
+
+ dummyBufferInst = new InOrderDynInst(cpu_ptr, NULL, 0, 0);
+
+ DPRINTF(InOrderStage, "Set CPU pointer.\n");
+
+ tracer = dynamic_cast<Trace::InOrderTrace *>(cpu->getTracer());
+}
+
+
+void
+PipelineStage::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
+{
+ DPRINTF(InOrderStage, "Setting time buffer pointer.\n");
+ timeBuffer = tb_ptr;
+
+ // Setup wire to write information back to fetch.
+ toPrevStages = timeBuffer->getWire(0);
+
+ // Create wires to get information from proper places in time buffer.
+ fromNextStages = timeBuffer->getWire(-1);
+}
+
+
+void
+PipelineStage::setPrevStageQueue(TimeBuffer<InterStageStruct> *prev_stage_ptr)
+{
+ DPRINTF(InOrderStage, "Setting previous stage queue pointer.\n");
+ prevStageQueue = prev_stage_ptr;
+
+ // Setup wire to read information from fetch queue.
+ prevStage = prevStageQueue->getWire(-1);
+
+ prevStageValid = true;
+}
+
+
+
+void
+PipelineStage::setNextStageQueue(TimeBuffer<InterStageStruct> *next_stage_ptr)
+{
+ DPRINTF(InOrderStage, "Setting next stage pointer.\n");
+ nextStageQueue = next_stage_ptr;
+
+ // Setup wire to write information to proper place in stage queue.
+ nextStage = nextStageQueue->getWire(0);
+
+ nextStageValid = true;
+}
+
+
+
+void
+PipelineStage::setActiveThreads(list<unsigned> *at_ptr)
+{
+ DPRINTF(InOrderStage, "Setting active threads list pointer.\n");
+ activeThreads = at_ptr;
+}
+
+/*inline void
+PipelineStage::switchToActive()
+{
+ if (_status == Inactive) {
+ DPRINTF(Activity, "Activating stage.\n");
+
+ cpu->activateStage(stageNum);
+
+ _status = Active;
+ }
+}*/
+
+void
+PipelineStage::switchOut()
+{
+ // Stage can immediately switch out.
+ cpu->signalSwitched();
+}
+
+
+void
+PipelineStage::takeOverFrom()
+{
+ _status = Inactive;
+
+ // Be sure to reset state and clear out any old instructions.
+ for (int i = 0; i < numThreads; ++i) {
+ stageStatus[i] = Idle;
+
+ for (int stNum = 0; stNum < NumStages; stNum++) {
+ stalls[i].stage[stNum] = false;
+ }
+
+ stalls[i].resources.clear();
+
+ while (!insts[i].empty())
+ insts[i].pop();
+
+ while (!skidBuffer[i].empty())
+ skidBuffer[i].pop();
+ }
+ wroteToTimeBuffer = false;
+}
+
+
+
+bool
+PipelineStage::checkStall(unsigned tid) const
+{
+ bool ret_val = false;
+
+ // Only check pipeline stall from stage directly following this stage
+ if (stalls[tid].stage[stageNum + 1]) {
+ DPRINTF(InOrderStage,"[tid:%i]: Stall fom Stage %i detected.\n",
+ tid, stageNum + 1);
+ ret_val = true;
+ }
+
+ if (!stalls[tid].resources.empty()) {
+ string stall_src;
+
+ for (int i=0; i < stalls[tid].resources.size(); i++) {
+ stall_src += stalls[tid].resources[i]->res->name() + ":";
+ }
+
+ DPRINTF(InOrderStage,"[tid:%i]: Stall fom resources (%s) detected.\n",
+ tid, stall_src);
+ ret_val = true;
+ }
+
+ return ret_val;
+}
+
+
+void
+PipelineStage::removeStalls(unsigned tid)
+{
+ for (int stNum = 0; stNum < NumStages; stNum++) {
+ stalls[tid].stage[stNum] = false;
+ }
+ stalls[tid].resources.clear();
+}
+
+inline bool
+PipelineStage::prevStageInstsValid()
+{
+ return prevStage->size > 0;
+}
+
+bool
+PipelineStage::isBlocked(unsigned tid)
+{
+ return stageStatus[tid] == Blocked;
+}
+
+bool
+PipelineStage::block(unsigned tid)
+{
+ DPRINTF(InOrderStage, "[tid:%d]: Blocking, sending block signal back to previous stages.\n", tid);
+
+ // Add the current inputs to the skid buffer so they can be
+ // reprocessed when this stage unblocks.
+ // skidInsert(tid);
+
+ // If the stage status is blocked or unblocking then stage has not yet
+ // signalled fetch to unblock. In that case, there is no need to tell
+ // fetch to block.
+ if (stageStatus[tid] != Blocked) {
+ // Set the status to Blocked.
+ stageStatus[tid] = Blocked;
+
+ if (stageStatus[tid] != Unblocking) {
+ if (prevStageValid)
+ toPrevStages->stageBlock[stageNum][tid] = true;
+ wroteToTimeBuffer = true;
+ }
+
+ return true;
+ }
+
+
+ return false;
+}
+
+void
+PipelineStage::blockDueToBuffer(unsigned tid)
+{
+ DPRINTF(InOrderStage, "[tid:%d]: Blocking instructions from passing to next stage.\n", tid);
+
+ if (stageStatus[tid] != Blocked) {
+ // Set the status to Blocked.
+ stageStatus[tid] = Blocked;
+
+ if (stageStatus[tid] != Unblocking) {
+ wroteToTimeBuffer = true;
+ }
+ }
+}
+
+bool
+PipelineStage::unblock(unsigned tid)
+{
+ // Stage is done unblocking only if the skid buffer is empty.
+ if (skidBuffer[tid].empty()) {
+ DPRINTF(InOrderStage, "[tid:%u]: Done unblocking.\n", tid);
+
+ if (prevStageValid)
+ toPrevStages->stageUnblock[stageNum][tid] = true;
+
+ wroteToTimeBuffer = true;
+
+ stageStatus[tid] = Running;
+
+ return true;
+ }
+
+ DPRINTF(InOrderStage, "[tid:%u]: Currently unblocking.\n", tid);
+ return false;
+}
+
+void
+PipelineStage::squashDueToBranch(DynInstPtr &inst, unsigned tid)
+{
+ if (cpu->squashSeqNum[tid] < inst->seqNum &&
+ cpu->lastSquashCycle[tid] == curTick){
+ DPRINTF(Resource, "Ignoring [sn:%i] squash signal due to another stage's squash "
+ "signal for after [sn:%i].\n", inst->seqNum, cpu->squashSeqNum[tid]);
+ } else {
+ // Send back mispredict information.
+ toPrevStages->stageInfo[stageNum][tid].branchMispredict = true;
+ toPrevStages->stageInfo[stageNum][tid].predIncorrect = true;
+ toPrevStages->stageInfo[stageNum][tid].doneSeqNum = inst->seqNum;
+ toPrevStages->stageInfo[stageNum][tid].squash = true;
+ toPrevStages->stageInfo[stageNum][tid].nextPC = inst->readPredTarg();
+ toPrevStages->stageInfo[stageNum][tid].branchTaken = inst->readNextNPC() !=
+ (inst->readNextPC() + sizeof(TheISA::MachInst));
+ toPrevStages->stageInfo[stageNum][tid].bdelayDoneSeqNum = inst->bdelaySeqNum;
+
+ DPRINTF(InOrderStage, "Target being re-set to %08p\n", inst->readPredTarg());
+ InstSeqNum squash_seq_num = inst->bdelaySeqNum;
+
+ DPRINTF(InOrderStage, "[tid:%i]: Squashing after [sn:%i], due to [sn:%i] "
+ "branch.\n", tid, squash_seq_num, inst->seqNum);
+
+ // Save squash num for later stage use
+ cpu->squashSeqNum[tid] = squash_seq_num;
+ cpu->lastSquashCycle[tid] = curTick;
+ }
+}
+
+void
+PipelineStage::squashPrevStageInsts(InstSeqNum squash_seq_num,
+ unsigned tid)
+{
+ DPRINTF(InOrderStage, "[tid:%i]: Removing instructions from "
+ "incoming stage queue.\n", tid);
+
+ for (int i=0; i < prevStage->size; i++) {
+ if (prevStage->insts[i]->threadNumber == tid &&
+ prevStage->insts[i]->seqNum > squash_seq_num) {
+ DPRINTF(InOrderStage, "[tid:%i]: Squashing instruction, "
+ "[sn:%i] PC %08p.\n",
+ tid,
+ prevStage->insts[i]->seqNum,
+ prevStage->insts[i]->readPC());
+ prevStage->insts[i]->setSquashed();
+ }
+ }
+}
+
+void
+PipelineStage::squash(InstSeqNum squash_seq_num, unsigned tid)
+{
+ // Set status to squashing.
+ stageStatus[tid] = Squashing;
+
+ squashPrevStageInsts(squash_seq_num, tid);
+
+ DPRINTF(InOrderStage, "[tid:%i]: Removing instructions from incoming stage skidbuffer.\n",
+ tid);
+ while (!skidBuffer[tid].empty()) {
+ if (skidBuffer[tid].front()->seqNum <= squash_seq_num) {
+ DPRINTF(InOrderStage, "[tid:%i]: Cannot remove skidBuffer "
+ "instructions before delay slot [sn:%i]. %i insts"
+ "left.\n", tid, squash_seq_num,
+ skidBuffer[tid].size());
+ break;
+ }
+ DPRINTF(InOrderStage, "[tid:%i]: Removing instruction, [sn:%i] PC %08p.\n",
+ tid, skidBuffer[tid].front()->seqNum, skidBuffer[tid].front()->PC);
+ skidBuffer[tid].pop();
+ }
+
+}
+
+int
+PipelineStage::stageBufferAvail()
+{
+ unsigned total = 0;
+
+ for (int i=0; i < ThePipeline::MaxThreads; i++) {
+ total += skidBuffer[i].size();
+ }
+
+ int incoming_insts = (prevStageValid) ?
+ cpu->pipelineStage[stageNum-1]->nextStage->size :
+ 0;
+
+ int avail = stageBufferMax - total - incoming_insts;
+
+ assert(avail >= 0);
+
+ return avail;
+}
+
+bool
+PipelineStage::canSendInstToNextStage()
+{
+ bool buffer_avail = false;
+
+ if (nextStageValid) {
+ buffer_avail = (cpu->pipelineStage[stageNum+1]->stageBufferAvail() >= 1);
+ }
+
+ if (!buffer_avail && nextStageValid) {
+ DPRINTF(InOrderStall, "STALL: No room in stage %i buffer.\n", stageNum + 1);
+ }
+
+ return buffer_avail;
+}
+
+void
+PipelineStage::skidInsert(unsigned tid)
+{
+ DynInstPtr inst = NULL;
+
+ while (!insts[tid].empty()) {
+ inst = insts[tid].front();
+
+ insts[tid].pop();
+
+ assert(tid == inst->threadNumber);
+
+ DPRINTF(InOrderStage,"[tid:%i]: Inserting [sn:%lli] PC:%#x into stage skidBuffer %i\n",
+ tid, inst->seqNum, inst->readPC(), inst->threadNumber);
+
+ skidBuffer[tid].push(inst);
+ }
+}
+
+
+
+bool
+PipelineStage::skidsEmpty()
+{
+ list<unsigned>::iterator threads = (*activeThreads).begin();
+
+ while (threads != (*activeThreads).end()) {
+ if (!skidBuffer[*threads++].empty())
+ return false;
+ }
+
+ return true;
+}
+
+
+
+void
+PipelineStage::updateStatus()
+{
+ bool any_unblocking = false;
+
+ list<unsigned>::iterator threads = (*activeThreads).begin();
+
+ threads = (*activeThreads).begin();
+
+ while (threads != (*activeThreads).end()) {
+ unsigned tid = *threads++;
+
+ if (stageStatus[tid] == Unblocking) {
+ any_unblocking = true;
+ break;
+ }
+ }
+
+ // Stage will have activity if it's unblocking.
+ if (any_unblocking) {
+ if (_status == Inactive) {
+ _status = Active;
+
+ DPRINTF(Activity, "Activating stage.\n");
+
+ cpu->activateStage(stageNum);
+ }
+ } else {
+ // If it's not unblocking, then stage will not have any internal
+ // activity. Switch it to inactive.
+ if (_status == Active) {
+ _status = Inactive;
+ DPRINTF(Activity, "Deactivating stage.\n");
+
+ cpu->deactivateStage(stageNum);
+ }
+ }
+}
+
+
+
+void
+PipelineStage::sortInsts()
+{
+ if (prevStageValid) {
+ int insts_from_prev_stage = prevStage->size;
+
+ DPRINTF(InOrderStage, "%i insts available from previous stage.\n",
+ insts_from_prev_stage);
+
+ for (int i = 0; i < insts_from_prev_stage; ++i) {
+
+ if (prevStage->insts[i]->isSquashed()) {
+ DPRINTF(InOrderStage, "[tid:%i]: Ignoring squashed [sn:%i], not inserting "
+ "into stage buffer.\n",
+ prevStage->insts[i]->readTid(),
+ prevStage->insts[i]->seqNum);
+
+ continue;
+ }
+
+ DPRINTF(InOrderStage, "[tid:%i]: Inserting [sn:%i] into stage buffer.\n",
+ prevStage->insts[i]->readTid(),
+ prevStage->insts[i]->seqNum);
+
+ int tid = prevStage->insts[i]->threadNumber;
+
+ DynInstPtr inst = prevStage->insts[i];
+
+ skidBuffer[tid].push(prevStage->insts[i]);
+
+ prevStage->insts[i] = dummyBufferInst;
+
+ }
+ }
+}
+
+
+
+void
+PipelineStage::readStallSignals(unsigned tid)
+{
+ for (int stage_idx = stageNum+1; stage_idx <= lastStallingStage[tid];
+ stage_idx++) {
+
+ // Check for Stage Blocking Signal
+ if (fromNextStages->stageBlock[stage_idx][tid]) {
+ stalls[tid].stage[stage_idx] = true;
+ }
+
+ // Check for Stage Unblocking Signal
+ if (fromNextStages->stageUnblock[stage_idx][tid]) {
+ //assert(fromNextStages->stageBlock[stage_idx][tid]);
+ stalls[tid].stage[stage_idx] = false;
+ }
+ }
+}
+
+
+
+bool
+PipelineStage::checkSignalsAndUpdate(unsigned tid)
+{
+ // Check if there's a squash signal, squash if there is.
+ // Check stall signals, block if necessary.
+ // If status was blocked
+ // Check if stall conditions have passed
+ // if so then go to unblocking
+ // If status was Squashing
+ // check if squashing is not high. Switch to running this cycle.
+
+ // Update the per thread stall statuses.
+ readStallSignals(tid);
+
+ // Check for squash from later pipeline stages
+ for (int stage_idx=stageNum; stage_idx < NumStages; stage_idx++) {
+ if (fromNextStages->stageInfo[stage_idx][tid].squash) {
+ DPRINTF(InOrderStage, "[tid:%u]: Squashing instructions due to squash "
+ "from stage %u.\n", tid, stage_idx);
+ InstSeqNum squash_seq_num = fromNextStages->
+ stageInfo[stage_idx][tid].bdelayDoneSeqNum;
+ squash(squash_seq_num, tid);
+ break; //return true;
+ }
+ }
+
+ if (checkStall(tid)) {
+ return block(tid);
+ }
+
+ if (stageStatus[tid] == Blocked) {
+ DPRINTF(InOrderStage, "[tid:%u]: Done blocking, switching to unblocking.\n",
+ tid);
+
+ stageStatus[tid] = Unblocking;
+
+ unblock(tid);
+
+ return true;
+ }
+
+ if (stageStatus[tid] == Squashing) {
+ if (!skidBuffer[tid].empty()) {
+ DPRINTF(InOrderStage, "[tid:%u]: Done squashing, switching to unblocking.\n",
+ tid);
+
+ stageStatus[tid] = Unblocking;
+ } else {
+ // Switch status to running if stage isn't being told to block or
+ // squash this cycle.
+ DPRINTF(InOrderStage, "[tid:%u]: Done squashing, switching to running.\n",
+ tid);
+
+ stageStatus[tid] = Running;
+ }
+
+ return true;
+ }
+
+ // If we've reached this point, we have not gotten any signals that
+ // cause stage to change its status. Stage remains the same as before.*/
+ return false;
+}
+
+
+
+void
+PipelineStage::tick()
+{
+ wroteToTimeBuffer = false;
+
+ bool status_change = false;
+
+ toNextStageIndex = 0;
+
+ sortInsts();
+
+ processStage(status_change);
+
+ if (status_change) {
+ updateStatus();
+ }
+
+ if (wroteToTimeBuffer) {
+ DPRINTF(Activity, "Activity this cycle.\n");
+ cpu->activityThisCycle();
+ }
+
+ DPRINTF(InOrderStage, "\n\n");
+}
+
+bool
+PipelineStage::outOfOrderValid()
+{
+ //@TODO: Define this function when OOO is valid
+ return false;
+}
+
+void
+PipelineStage::setResStall(ResReqPtr res_req, unsigned tid)
+{
+ DPRINTF(InOrderStage, "Inserting stall from %s.\n", res_req->res->name());
+ stalls[tid].resources.push_back(res_req);
+}
+
+void
+PipelineStage::unsetResStall(ResReqPtr res_req, unsigned tid)
+{
+ // Search through stalls to find stalling request and then
+ // remove it
+ vector<ResReqPtr>::iterator req_it = stalls[tid].resources.begin();
+ vector<ResReqPtr>::iterator req_end = stalls[tid].resources.end();
+
+ while (req_it != req_end) {
+ if( (*req_it)->res == res_req->res && // Same Resource
+ (*req_it)->inst == res_req->inst && // Same Instruction
+ (*req_it)->getSlot() == res_req->getSlot()) {
+ DPRINTF(InOrderStage, "[tid:%u]: Clearing stall by %s.\n",
+ tid, res_req->res->name());
+ stalls[tid].resources.erase(req_it);
+ break;
+ }
+
+ req_it++;
+ }
+
+ if (stalls[tid].resources.size() == 0) {
+ DPRINTF(InOrderStage, "[tid:%u]: There are no remaining resource stalls.\n",
+ tid);
+ }
+}
+
+// @TODO: Update How we handled threads in CPU. Maybe threads shouldnt be handled
+// one at a time, but instead first come first serve by instruction?
+// Questions are how should a pipeline stage handle thread-specific stalls &
+// pipeline squashes
+void
+PipelineStage::processStage(bool &status_change)
+{
+ list<unsigned>::iterator threads = (*activeThreads).begin();
+
+ //Check stall and squash signals.
+ while (threads != (*activeThreads).end()) {
+ unsigned tid = *threads++;
+
+ DPRINTF(InOrderStage,"Processing [tid:%i]\n",tid);
+ status_change = checkSignalsAndUpdate(tid) || status_change;
+
+ processThread(status_change, tid);
+ }
+
+ if (nextStageValid) {
+ DPRINTF(InOrderStage, "%i insts now available for stage %i.\n",
+ nextStage->size, stageNum + 1);
+ }
+
+ DPRINTF(InOrderStage, "%i insts left in stage buffer.\n", stageBufferMax - stageBufferAvail());
+
+}
+
+void
+PipelineStage::processThread(bool &status_change, unsigned tid)
+{
+ // If status is Running or idle,
+ // call stageInsts()
+ // If status is Unblocking,
+ // buffer any instructions coming from fetch
+ // continue trying to empty skid buffer
+ // check if stall conditions have passed
+
+ if (stageStatus[tid] == Blocked) {
+ ;//++stageBlockedCycles;
+ } else if (stageStatus[tid] == Squashing) {
+ ;//++stageSquashCycles;
+ }
+
+ // Stage should try to stage as many instructions as its bandwidth
+ // will allow, as long as it is not currently blocked.
+ if (stageStatus[tid] == Running ||
+ stageStatus[tid] == Idle) {
+ DPRINTF(InOrderStage, "[tid:%u]: Not blocked, so attempting to run "
+ "stage.\n",tid);
+
+ processInsts(tid);
+ } else if (stageStatus[tid] == Unblocking) {
+ // Make sure that the skid buffer has something in it if the
+ // status is unblocking.
+ assert(!skidsEmpty());
+
+ // If the status was unblocking, then instructions from the skid
+ // buffer were used. Remove those instructions and handle
+ // the rest of unblocking.
+ processInsts(tid);
+
+ if (prevStageValid && prevStageInstsValid()) {
+ // Add the current inputs to the skid buffer so they can be
+ // reprocessed when this stage unblocks.
+ skidInsert(tid);
+ }
+
+ status_change = unblock(tid) || status_change;
+ }
+}
+
+
+void
+PipelineStage::processInsts(unsigned tid)
+{
+ // Instructions can come either from the skid buffer or the list of
+ // instructions coming from fetch, depending on stage's status.
+ int insts_available = skidBuffer[tid].size();
+
+ std::queue<DynInstPtr> &insts_to_stage = skidBuffer[tid];
+
+ if (insts_available == 0) {
+ DPRINTF(InOrderStage, "[tid:%u]: Nothing to do, breaking out"
+ " early.\n",tid);
+ // Should I change the status to idle?
+ //++stageIdleCycles;
+ return;
+ }
+
+ DynInstPtr inst;
+ bool last_req_completed = true;
+
+ int insts_processed = 0;
+
+ DPRINTF(InOrderStage, "[tid:%u]: Sending instructions to stage %u.\n", tid,
+ stageNum+1);
+
+ //Keep processing instructions while ... these ?s are true:
+ while (insts_available > 0 && //1. are there instructions to process
+ insts_processed < stageWidth && //2. can the stage still process this
+ (canSendInstToNextStage() || !nextStageValid) && //3. is there room in next stage
+ last_req_completed) { //4. was the last instruction completed
+ assert(!insts_to_stage.empty());
+
+ inst = insts_to_stage.front();
+
+ DPRINTF(InOrderStage, "[tid:%u]: Processing instruction [sn:%lli] with "
+ "PC %#x\n",
+ tid, inst->seqNum, inst->readPC());
+
+ if (inst->isSquashed()) {
+ DPRINTF(InOrderStage, "[tid:%u]: Instruction %i with PC %#x is "
+ "squashed, skipping.\n",
+ tid, inst->seqNum, inst->readPC());
+
+ //++stageSquashedInsts;
+
+ insts_to_stage.pop();
+
+ --insts_available;
+
+ continue;
+ }
+
+
+ last_req_completed = processInstSchedule(inst);
+
+
+ insts_processed++;
+
+ // Don't let instruction pass to next stage if it hasnt completed
+ // all of it's requests for this stage.
+ if (!last_req_completed && !outOfOrderValid())
+ continue;
+
+ insts_to_stage.pop();
+
+ DPRINTF(InOrderStage, "Marking [tid:%i] [sn:%i] for insertion into next stage buffer.\n",
+ tid, inst->seqNum);
+
+ // Send to Next Stage or Break Loop
+ if (!sendInstToNextStage(inst))
+ break;;
+
+
+ //++stageProcessedInsts;
+ --insts_available;
+ }
+
+ // If we didn't process all instructions, then we will need to block
+ // and put all those instructions into the skid buffer.
+ // @TODO:-IN-PROGRESS-:Evaluating when stages should block/unblock
+ // for stage stalls...
+ if (!insts_to_stage.empty()) {
+ blockDueToBuffer(tid);
+ }
+
+ // Record that stage has written to the time buffer for activity
+ // tracking.
+ if (toNextStageIndex) {
+ wroteToTimeBuffer = true;
+ }
+}
+
+bool
+PipelineStage::processInstSchedule(DynInstPtr inst)
+{
+ bool last_req_completed = true;
+ int tid;
+
+ tid = inst->readTid();
+
+ if (inst->nextResStage() == stageNum) {
+ int res_stage_num = inst->nextResStage();
+
+ while (res_stage_num == stageNum) {
+ int res_num = inst->nextResource();
+
+
+ DPRINTF(InOrderStage, "[tid:%i]: [sn:%i]: sending request to %s.\n",
+ tid, inst->seqNum, cpu->resPool->name(res_num));
+
+ ResReqPtr req = cpu->resPool->request(res_num, inst);
+
+ if (req->isCompleted()) {
+ DPRINTF(InOrderStage, "[tid:%i]: [sn:%i] request to %s completed.\n",
+ tid, inst->seqNum, cpu->resPool->name(res_num));
+
+ if (req->fault == NoFault) {
+ inst->popSchedEntry();
+ } else {
+ panic("%i: encountered %s fault!\n",
+ curTick, req->fault->name());
+ }
+ } else {
+ DPRINTF(InOrderStage, "[tid:%i]: [sn:%i] request to %s failed.\n",
+ tid, inst->seqNum, cpu->resPool->name(res_num));
+
+ last_req_completed = false;
+
+ break;
+ }
+
+ res_stage_num = inst->nextResStage();
+ }
+ } else {
+ DPRINTF(InOrderStage, "[tid:%u]: Instruction [sn:%i] with PC %#x "
+ " needed no resources in stage %i.\n",
+ tid, inst->seqNum, inst->readPC(), stageNum);
+ }
+
+ return last_req_completed;
+}
+
+bool
+PipelineStage::nextStageQueueValid(int stage_num)
+{
+ return cpu->pipelineStage[stage_num]->nextStageValid;
+}
+
+
+bool
+PipelineStage::sendInstToNextStage(DynInstPtr inst)
+{
+ // Update Next Stage Variable in Instruction
+ if (inst->nextStage == stageNum)
+ inst->nextStage++;
+
+ bool success = false;
+ int tid = inst->readTid();
+ int next_stage = inst->nextStage;
+ int prev_stage = next_stage - 1;
+
+ if (nextStageQueueValid(inst->nextStage - 1)) {
+ if (inst->seqNum > cpu->squashSeqNum[tid] &&
+ curTick == cpu->lastSquashCycle[tid]) {
+ DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: squashed, skipping insertion "
+ "into stage %i queue.\n", tid, inst->seqNum, inst->nextStage);
+ } else {
+ DPRINTF(InOrderStage, "[tid:%u] %i slots available in next stage buffer.\n",
+ tid, cpu->pipelineStage[next_stage]->stageBufferAvail());
+
+ DPRINTF(InOrderStage, "[tid:%u]: [sn:%i]: being placed into "
+ "index %i stage %i queue.\n",
+ tid, inst->seqNum, toNextStageIndex, inst->nextStage);
+
+ int next_stage_idx = cpu->pipelineStage[prev_stage]->nextStage->size;
+
+ // Place instructions in inter-stage communication struct for the next
+ // pipeline stage to read next cycle
+ cpu->pipelineStage[prev_stage]->nextStage->insts[next_stage_idx] = inst;
+
+ ++(cpu->pipelineStage[prev_stage]->nextStage->size);
+
+ ++toNextStageIndex;
+
+ success = true;
+
+ // Take note of trace data for this inst & stage
+ if (inst->traceData) {
+ inst->traceData->setStageCycle(stageNum, curTick);
+ }
+
+ }
+ }
+
+ return success;
+}
+
+void
+PipelineStage::dumpInsts()
+{
+ std::cerr << "Insts in Stage " << stageNum << " skidbuffers:" << endl;
+
+ for (int tid=0; tid < ThePipeline::MaxThreads; tid++) {
+
+ std::queue<DynInstPtr> copy_buff(skidBuffer[tid]);
+
+ while (!copy_buff.empty()) {
+ DynInstPtr inst = copy_buff.front();
+
+ cprintf("Inst. PC:%#x\n[tid:%i]\n[sn:%i]\n\n",
+ inst->readPC(), inst->threadNumber, inst->seqNum);
+
+ copy_buff.pop();
+ }
+ }
+
+}
diff --git a/src/cpu/inorder/pipeline_stage.hh b/src/cpu/inorder/pipeline_stage.hh
new file mode 100644
index 000000000..833547704
--- /dev/null
+++ b/src/cpu/inorder/pipeline_stage.hh
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_PIPELINE_STAGE_HH__
+#define __CPU_INORDER_PIPELINE_STAGE_HH__
+
+#include <queue>
+#include <vector>
+
+#include "base/statistics.hh"
+#include "base/timebuf.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/comm.hh"
+#include "params/InOrderCPU.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+
+class InOrderCPU;
+
+class PipelineStage
+{
+ protected:
+ typedef ThePipeline::Params Params;
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ public:
+ /** Overall stage status. Used to determine if the CPU can
+ * deschedule itself due to a lack of activity.
+ */
+ enum StageStatus {
+ Active,
+ Inactive
+ };
+
+ /** Individual thread status. */
+ enum ThreadStatus {
+ Running,
+ Idle,
+ StartSquash,
+ Squashing,
+ Blocked,
+ Unblocking,
+ MemWaitResponse,
+ MemWaitRetry,
+ MemAccessComplete
+ };
+
+ protected:
+ /** The Number of This Pipeline Stage */
+ unsigned stageNum;
+
+ /** The width of stage, in instructions. */
+ unsigned stageWidth;
+
+ /** Number of Threads*/
+ unsigned numThreads;
+
+ /** Stage status. */
+ StageStatus _status;
+
+ /** Per-thread status. */
+ ThreadStatus stageStatus[ThePipeline::MaxThreads];
+
+ public:
+ PipelineStage(Params *params, unsigned stage_num);
+
+ /** MUST use init() function if this constructor is used. */
+ PipelineStage() { }
+
+ virtual ~PipelineStage() { }
+
+ /** PipelineStage initialization. */
+ void init(Params *params, unsigned stage_num);
+
+ /** Returns the name of stage. */
+ std::string name() const;
+
+ /** Registers statistics. */
+ void regStats();
+
+ /** Sets CPU pointer. */
+ virtual void setCPU(InOrderCPU *cpu_ptr);
+
+ virtual void scheduleStageStart(int delay, int tid) { }
+
+ /** Sets the main backwards communication time buffer pointer. */
+ void setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr);
+
+ /** Sets pointer to time buffer coming from fetch. */
+ void setPrevStageQueue(TimeBuffer<InterStageStruct> *prev_stage_ptr);
+
+ /** Sets pointer to time buffer used to communicate to the next stage. */
+ void setNextStageQueue(TimeBuffer<InterStageStruct> *next_stage_ptr);
+
+ /** Sets pointer to list of active threads. */
+ void setActiveThreads(std::list<unsigned> *at_ptr);
+
+ bool nextStageQueueValid(int stage_num);
+
+ bool isBlocked(unsigned tid);
+
+ /** Changes the status of this stage to active, and indicates this
+ * to the CPU.
+ */
+ //inline void switchToActive();
+
+ /** Changes the status of this stage to inactive, and indicates
+ * this to the CPU.
+ */
+ //inline void switchToInactive();
+
+ /** Switches out the stage stage. */
+ void switchOut();
+
+ /** Takes over from another CPU's thread. */
+ void takeOverFrom();
+
+ /** Ticks stage, processing all input signals and executing as many
+ * instructions as possible.
+ */
+ virtual void tick();
+
+ /** Is out of order processing valid? */
+ bool outOfOrderValid();
+
+ /** Set a resource stall in the pipeline-stage */
+ void setResStall(ResReqPtr res_req, unsigned tid);
+
+ /** Unset a resource stall in the pipeline-stage */
+ void unsetResStall(ResReqPtr res_req, unsigned tid);
+
+ /** Remove all stall signals for a particular thread; */
+ virtual void removeStalls(unsigned tid);
+
+ /** Is there room in the stage buffer? */
+ int stageBufferAvail();
+
+ protected:
+ /** Evaluate Stage Conditions and then process stage */
+ virtual void processStage(bool &status_change);
+
+ /** Determines what to do based on stage's current status.
+ * @param status_change stage() sets this variable if there was a status
+ * change (ie switching from from blocking to unblocking).
+ * @param tid Thread id to stage instructions from.
+ */
+ virtual void processThread(bool &status_change, unsigned tid);
+
+ /** Processes instructions from fetch and passes them on to rename.
+ * Decoding of instructions actually happens when they are created in
+ * fetch, so this function mostly checks if PC-relative branches are
+ * correct.
+ */
+ virtual void processInsts(unsigned tid);
+
+ /** Process all resources on an instruction's resource schedule */
+ virtual bool processInstSchedule(DynInstPtr inst);
+
+ /** Is there room in the next stage buffer for this instruction? */
+ virtual bool canSendInstToNextStage();
+
+ /** Send an instruction to the next stage buffer */
+ virtual bool sendInstToNextStage(DynInstPtr inst);
+
+ /** Inserts a thread's instructions into the skid buffer, to be staged
+ * once stage unblocks.
+ */
+ virtual void skidInsert(unsigned tid);
+
+ /** Returns if all of the skid buffers are empty. */
+ bool skidsEmpty();
+
+ /** Updates overall stage status based on all of the threads' statuses. */
+ virtual void updateStatus();
+
+ /** Separates instructions from fetch into individual lists of instructions
+ * sorted by thread.
+ */
+ void sortInsts();
+
+ /** Reads all stall signals from the backwards communication timebuffer. */
+ virtual void readStallSignals(unsigned tid);
+
+ /** Checks all input signals and updates stage's status appropriately. */
+ virtual bool checkSignalsAndUpdate(unsigned tid);
+
+ /** Checks all stall signals, and returns if any are true. */
+ virtual bool checkStall(unsigned tid) const;
+
+ /** Returns if there any instructions from the previous stage
+ * on this cycle.
+ */
+ inline bool prevStageInstsValid();
+
+ /** Switches stage to blocking, and signals back that stage has
+ * become blocked.
+ * @return Returns true if there is a status change.
+ */
+ virtual bool block(unsigned tid);
+
+ void blockDueToBuffer(unsigned tid);
+
+ /** Switches stage to unblocking if the skid buffer is empty, and
+ * signals back that stage has unblocked.
+ * @return Returns true if there is a status change.
+ */
+ virtual bool unblock(unsigned tid);
+
+
+ public:
+ /** Squashes if there is a PC-relative branch that was predicted
+ * incorrectly. Sends squash information back to fetch.
+ */
+ virtual void squashDueToBranch(DynInstPtr &inst, unsigned tid);
+
+ /** Squash instructions from stage buffer */
+ virtual void squashPrevStageInsts(InstSeqNum squash_seq_num, unsigned tid);
+
+ /** Squashes due to commit signalling a squash. Changes status to
+ * squashing and clears block/unblock signals as needed.
+ */
+ virtual void squash(InstSeqNum squash_num, unsigned tid);
+
+ void dumpInsts();
+
+ protected:
+ /** CPU interface. */
+ InOrderCPU *cpu;
+
+ Trace::InOrderTrace *tracer;
+
+ /** List of active thread ids */
+ std::list<unsigned> *activeThreads;
+
+ /** Queue of all instructions coming from previous stage on this cycle. */
+ std::queue<DynInstPtr> insts[ThePipeline::MaxThreads];
+
+ /** Queue of instructions that are finished processing and ready to go next stage.
+ * This is used to prevent from processing an instrution more than once on any
+ * stage. NOTE: It is up to the PROGRAMMER must manage this as a queue
+ */
+ std::list<DynInstPtr> instsToNextStage;
+
+ /** Skid buffer between previous stage and this one. */
+ std::queue<DynInstPtr> skidBuffer[ThePipeline::MaxThreads];
+
+ /** Instruction used to signify that there is no *real* instruction in buffer slot */
+ DynInstPtr dummyBufferInst;
+
+ /** SeqNum of Squashing Branch Delay Instruction (used for MIPS) */
+ Addr bdelayDoneSeqNum[ThePipeline::MaxThreads];
+
+ /** Instruction used for squashing branch (used for MIPS) */
+ DynInstPtr squashInst[ThePipeline::MaxThreads];
+
+ /** Tells when their is a pending delay slot inst. to send
+ * to rename. If there is, then wait squash after the next
+ * instruction (used for MIPS).
+ */
+ bool squashAfterDelaySlot[ThePipeline::MaxThreads];
+
+ /** Maximum size of the inter-stage buffer connecting the previous stage to
+ * this stage (which we call a skid buffer) */
+ unsigned stageBufferMax;
+
+ /** Variable that tracks if stage has written to the time buffer this
+ * cycle. Used to tell CPU if there is activity this cycle.
+ */
+ bool wroteToTimeBuffer;
+
+ /** Index of instructions being sent to the next stage. */
+ unsigned toNextStageIndex;
+
+ /** The last stage that this particular stage should look for stalls */
+ int lastStallingStage[ThePipeline::MaxThreads];
+
+ /** Time buffer interface. */
+ TimeBuffer<TimeStruct> *timeBuffer;
+
+ public:
+ /** Wire to get rename's output from backwards time buffer. */
+ TimeBuffer<TimeStruct>::wire fromNextStages;
+
+ /** Wire to get iew's information from backwards time buffer. */
+ TimeBuffer<TimeStruct>::wire toPrevStages;
+
+ /** Instruction queue linking previous stage */
+ TimeBuffer<InterStageStruct> *prevStageQueue;
+
+ /** Wire to get the previous stage's. */
+ TimeBuffer<InterStageStruct>::wire prevStage;
+
+ /** Instruction queue linking next stage */
+ TimeBuffer<InterStageStruct> *nextStageQueue;
+
+ /** Wire to write to the next stage */
+ TimeBuffer<InterStageStruct>::wire nextStage;
+
+ /** Is Previous Stage Valid? */
+ bool prevStageValid;
+
+ /** Is Next Stage Valid? */
+ bool nextStageValid;
+
+ /** Source of possible stalls. */
+ struct Stalls {
+ bool stage[ThePipeline::NumStages];
+ std::vector<ResReqPtr> resources;
+ };
+
+ /** Tracks which stages are telling decode to stall. */
+ Stalls stalls[ThePipeline::MaxThreads];
+
+ //@TODO: Use Stats for the pipeline stages
+ /** Stat for total number of idle cycles. */
+ //Stats::Scalar<> stageIdleCycles;
+ /** Stat for total number of blocked cycles. */
+ //Stats::Scalar<> stageBlockedCycles;
+ /** Stat for total number of normal running cycles. */
+ //Stats::Scalar<> stageRunCycles;
+ /** Stat for total number of unblocking cycles. */
+ //Stats::Scalar<> stageUnblockCycles;
+ /** Stat for total number of squashing cycles. */
+ //Stats::Scalar<> stageSquashCycles;
+ /** Stat for total number of staged instructions. */
+ //Stats::Scalar<> stageProcessedInsts;
+ /** Stat for total number of squashed instructions. */
+ //Stats::Scalar<> stageSquashedInsts;
+};
+
+#endif
diff --git a/src/cpu/inorder/pipeline_traits.5stage.cc b/src/cpu/inorder/pipeline_traits.5stage.cc
new file mode 100644
index 000000000..50c30af1e
--- /dev/null
+++ b/src/cpu/inorder/pipeline_traits.5stage.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/resources/resource_list.hh"
+
+using namespace std;
+
+namespace ThePipeline {
+
+//@TODO: create my own Instruction Schedule Class
+//that operates as a Priority QUEUE
+int getNextPriority(DynInstPtr &inst, int stage_num)
+{
+ int cur_pri = 20;
+
+ /*
+ std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare>::iterator sked_it = inst->resSched.begin();
+
+ std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare>::iterator sked_end = inst->resSched.end();
+
+ while (sked_it != sked_end) {
+
+ if (sked_it.top()->stageNum == stage_num) {
+ cur_pri = sked_it.top()->priority;
+ }
+
+ sked_it++;
+ }
+ */
+
+ return cur_pri;
+}
+
+void createFrontEndSchedule(DynInstPtr &inst)
+{
+ int stNum = 0;
+ int stPri = 0;
+ // Get Pointer to Instuction's Schedule
+ ResSchedule *inst_sched = &inst->resSched;
+
+ //
+ // IF - Stage 0
+ // ---------------------------------------
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, FetchSeq, FetchSeqUnit::AssignNextPC));
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, ITLB, TLBUnit::FetchLookup));
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, ICache, CacheUnit::InitiateFetch));
+
+ //
+ // DE - Stage 1
+ // ---------------------------------------
+ stNum++; stPri = 0;
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, ICache, CacheUnit::CompleteFetch));
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, Decode, DecodeUnit::DecodeInst));
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, BPred, BranchPredictor::PredictBranch));
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, FetchSeq, FetchSeqUnit::UpdateTargetPC));
+
+}
+
+bool createBackEndSchedule(DynInstPtr &inst)
+{
+ if (!inst->staticInst) {
+ return false;
+ }
+
+ int stNum = BackEndStartStage;
+ int stPri = 0;
+
+ // Get Pointer to Instuction's Schedule
+ ResSchedule *inst_sched = &inst->resSched;
+
+ //
+ // EX - Stage 2
+ // ---------------------------------------
+ for (int idx=0; idx < inst->numSrcRegs(); idx++) {
+ if (!idx || !inst->isStore())
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, RegManager, UseDefUnit::ReadSrcReg, idx));
+ }
+
+ if ( inst->isNonSpeculative() ) {
+ // skip execution of non speculative insts until later
+ } else if (inst->isMemRef()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, AGEN, AGENUnit::GenerateAddr));
+ if ( inst->isLoad() ) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, DTLB, TLBUnit::DataLookup));
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, DCache, CacheUnit::InitiateReadData));
+ }
+ } else {
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, ExecUnit, ExecutionUnit::ExecuteInst));
+ }
+
+ //
+ // MEM - Stage 3
+ // ---------------------------------------
+ stPri = 0; stNum++;
+ if ( inst->isStore() ) { // for store, need src reg at this point
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, RegManager, UseDefUnit::ReadSrcReg, 1));
+ }
+ if ( inst->isLoad() ) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, DCache, CacheUnit::CompleteReadData));
+ } else if ( inst->isStore() ) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, DTLB, TLBUnit::DataLookup));
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, DCache, CacheUnit::InitiateWriteData));
+ }
+
+ //
+ // WB - Stage 4
+ // ---------------------------------------
+ stPri = 0; stNum++;
+ if (inst->isNonSpeculative()) {
+ if (inst->isMemRef())
+ fatal("Schedule doesnt handle Non-Speculative Memory Instructions.\n");
+
+ if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, MDU, MultDivUnit::MultDiv));
+ } else {
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, ExecUnit, ExecutionUnit::ExecuteInst));
+ }
+ }
+
+ if ( inst->isStore() )
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, DCache, CacheUnit::CompleteWriteData));
+
+ // Write Back to Register File
+ for (int idx=0; idx < inst->numDestRegs(); idx++) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, RegManager, UseDefUnit::WriteDestReg, idx));
+ }
+
+ // Graduate Instructions
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, Grad, GraduationUnit::GraduateInst));
+
+ return true;
+}
+
+};
diff --git a/src/cpu/inorder/pipeline_traits.5stage.hh b/src/cpu/inorder/pipeline_traits.5stage.hh
new file mode 100644
index 000000000..aea6eff37
--- /dev/null
+++ b/src/cpu/inorder/pipeline_traits.5stage.hh
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_PIPELINE_IMPL_HH__
+#define __CPU_INORDER_PIPELINE_IMPL_HH__
+
+#include <list>
+#include <queue>
+#include <vector>
+
+#include "arch/isa_traits.hh"
+#include "cpu/inorder/params.hh"
+
+
+class InOrderDynInst;
+
+/* This Namespace contains constants, typedefs, functions and
+ * objects specific to the Pipeline Implementation.
+ */
+namespace ThePipeline {
+ // Pipeline Constants
+ const unsigned NumStages = 5;
+ const unsigned MaxThreads = 3;
+ const unsigned StageWidth = 1;
+ const unsigned BackEndStartStage = 2;
+
+ // Enumerated List of Resources The Pipeline Uses
+ enum ResourceList {
+ FetchSeq = 0,
+ ITLB,
+ ICache,
+ Decode,
+ BPred,
+ FetchBuff,
+ RegManager,
+ AGEN,
+ ExecUnit,
+ DTLB,
+ DCache,
+ Grad,
+ FetchBuff2
+ };
+
+ // Expand this as necessary for your inter stage buffer sizes
+ static const unsigned interStageBuffSize[] = {
+ StageWidth, /* Stage 0 - 1 */
+ StageWidth, /* Stage 1 - 2 */
+ StageWidth, /* Stage 2 - 3 */
+ StageWidth, /* Stage 3 - 4 */
+ StageWidth, /* Stage 4 - 5 */
+ StageWidth, /* Stage 5 - 6 */
+ StageWidth, /* Stage 6 - 7 */
+ StageWidth, /* Stage 7 - 8 */
+ StageWidth /* Stage 8 - 9 */
+ };
+
+ typedef InOrderCPUParams Params;
+ typedef RefCountingPtr<InOrderDynInst> DynInstPtr;
+
+ //////////////////////////
+ // RESOURCE SCHEDULING
+ //////////////////////////
+ struct ScheduleEntry {
+ ScheduleEntry(int stage_num, int _priority, int res_num, int _cmd = 0,
+ int _idx = 0) :
+ stageNum(stage_num), resNum(res_num), cmd(_cmd),
+ idx(_idx), priority(_priority)
+ { }
+ virtual ~ScheduleEntry(){}
+
+ // Stage number to perform this service.
+ int stageNum;
+
+ // Resource ID to access
+ int resNum;
+
+ // See specific resource for meaning
+ unsigned cmd;
+
+ // See specific resource for meaning
+ unsigned idx;
+
+ // Some Resources May Need Priority?
+ int priority;
+ };
+
+ struct entryCompare {
+ bool operator()(const ScheduleEntry* lhs, const ScheduleEntry* rhs) const
+ {
+ // Prioritize first by stage number that the resource is needed
+ if (lhs->stageNum > rhs->stageNum) {
+ return true;
+ } else if (lhs->stageNum == rhs->stageNum) {
+ /*if (lhs->resNum > rhs->resNum) {
+ return true;
+ } else {
+ return false;
+ }*/
+
+ if (lhs->priority > rhs->priority) {
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+ };
+
+
+ typedef std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare> ResSchedule;
+
+ void createFrontEndSchedule(DynInstPtr &inst);
+ bool createBackEndSchedule(DynInstPtr &inst);
+ int getNextPriority(DynInstPtr &inst, int stage_num);
+};
+#endif
diff --git a/src/cpu/inorder/pipeline_traits.9stage.cc b/src/cpu/inorder/pipeline_traits.9stage.cc
new file mode 100644
index 000000000..d686bb3bc
--- /dev/null
+++ b/src/cpu/inorder/pipeline_traits.9stage.cc
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/resources/resource_list.hh"
+
+using namespace std;
+
+namespace ThePipeline {
+
+
+//@TODO: create my own Instruction Schedule Class
+//that operates as a Priority QUEUE
+int getNextPriority(DynInstPtr &inst, int stage_num)
+{
+ int cur_pri = 20;
+
+ /*
+ std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare>::iterator sked_it = inst->resSched.begin();
+
+ std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare>::iterator sked_end = inst->resSched.end();
+
+ while (sked_it != sked_end) {
+
+ if (sked_it.top()->stageNum == stage_num) {
+ cur_pri = sked_it.top()->priority;
+ }
+
+ sked_it++;
+ }
+ */
+
+ return cur_pri;
+}
+
+void createFrontEndSchedule(DynInstPtr &inst)
+{
+ int stNum = 0;
+ int stPri = 0;
+ // Get Pointer to Instuction's Schedule
+ ResSchedule *inst_sched = &inst->resSched;
+
+ //
+ // Stage 0
+ // ---------------------------------------
+ inst_sched->push(new ScheduleEntry(stNum, stPri, FetchSeq, FetchSeqUnit::AssignNextPC));
+ stPri++;
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, ITLB, TLBUnit::FetchLookup));
+ stPri++;
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, ICache, CacheUnit::InitiateFetch));
+ stPri++;
+
+ // Reset Priority / Update Next Stage Number
+ stNum++;
+ stPri = 0;
+
+ //
+ // Stage 1
+ // ---------------------------------------
+ inst_sched->push(new ScheduleEntry(stNum, stPri, ICache, CacheUnit::CompleteFetch));
+ stPri++;
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, Decode, DecodeUnit::DecodeInst));
+ stPri++;
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, BPred, BranchPredictor::PredictBranch));
+ stPri++;
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, FetchSeq, FetchSeqUnit::UpdateTargetPC));
+ stPri++;
+
+ if (inst->readTid() == 0)
+ inst_sched->push(new ScheduleEntry(stNum, stPri, FetchBuff, InstBuffer::ScheduleOrBypass));
+ else //if (inst->readTid() == 1)
+ inst_sched->push(new ScheduleEntry(stNum, stPri, FetchBuff2, InstBuffer::ScheduleOrBypass));
+ stPri++;
+
+ // Reset Priority / Update Next Stage Number
+ stNum++;
+ stPri = 0;
+
+ //
+ // Stage 2
+ // ---------------------------------------
+ // Reset Priority / Update Next Stage Number
+ stNum++;
+ stPri = 0;
+}
+
+bool createBackEndSchedule(DynInstPtr &inst)
+{
+ if (!inst->staticInst) {
+ return false;
+ }
+
+ std::string name = inst->staticInst->getName();
+
+ int stNum = BackEndStartStage;
+ int stPri = 0;
+
+ // Get Pointer to Instuction's Schedule
+ ResSchedule *inst_sched = &inst->resSched;
+
+ //
+ // Stage 3
+ // ---------------------------------------
+ // Set When Source Registers Should be read - Stage 4
+ for (int idx=0; idx < inst->numSrcRegs(); idx++) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, RegManager, UseDefUnit::ReadSrcReg, idx));
+ }
+ stPri++;
+
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ //
+ // Stage 4
+ // ---------------------------------------
+ if (inst->isMemRef()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, AGEN, AGENUnit::GenerateAddr));
+ }
+
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ //
+ // Stage 5
+ // ---------------------------------------
+ // Execution Unit
+ if (!inst->isNonSpeculative() && !inst->isMemRef()) {
+ if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri++, MDU, MultDivUnit::MultDiv));
+ } else {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, ExecUnit, ExecutionUnit::ExecuteInst));
+ }
+ }
+ stPri++;
+
+ // DCache Initiate Access
+ if (inst->isMemRef()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, DTLB, TLBUnit::DataLookup));
+ stPri++;
+
+ if (inst->isLoad()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, DCache, CacheUnit::InitiateReadData));
+ } else if (inst->isStore()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, DCache, CacheUnit::InitiateWriteData));
+ }
+ }
+
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ //
+ // Stage 6
+ // ---------------------------------------
+ // DCache Complete Access
+ if (inst->isMemRef()) {
+ if (inst->isLoad()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, DCache, CacheUnit::CompleteReadData));
+ } else if (inst->isStore()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, DCache, CacheUnit::CompleteWriteData));
+ }
+ }
+
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ //
+ // Stage 7
+ // ---------------------------------------
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ //
+ // Stage 8
+ // ---------------------------------------
+ // NonSpeculative Execution
+ if (inst->isNonSpeculative() ) {
+ if (inst->isMemRef())
+ fatal("Schedule doesnt handle Non-Speculative Memory Instructions.\n");
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, ExecUnit, ExecutionUnit::ExecuteInst));
+ stPri++;
+ }
+
+ // Write Back to Register File
+ for (int idx=0; idx < inst->numDestRegs(); idx++) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, RegManager, UseDefUnit::WriteDestReg, idx));
+ stPri++;
+ }
+
+ // Graduate Instructions
+ inst_sched->push(new ScheduleEntry(stNum, stPri, Grad, GraduationUnit::GraduateInst));
+ stPri++;
+
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ return true;
+}
+
+};
diff --git a/src/cpu/inorder/pipeline_traits.9stage.hh b/src/cpu/inorder/pipeline_traits.9stage.hh
new file mode 100644
index 000000000..91e537366
--- /dev/null
+++ b/src/cpu/inorder/pipeline_traits.9stage.hh
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_PIPELINE_IMPL_HH__
+#define __CPU_INORDER_PIPELINE_IMPL_HH__
+
+#include <list>
+#include <queue>
+#include <vector>
+#include <map>
+
+#include "arch/isa_traits.hh"
+#include "cpu/inorder/params.hh"
+
+
+class InOrderDynInst;
+
+/* This Namespace contains constants, typedefs, functions and
+ * objects specific to the Pipeline Implementation.
+ */
+namespace ThePipeline {
+ // Pipeline Constants
+ const unsigned NumStages = 9;
+ const unsigned MaxThreads = 3;
+ const unsigned StageWidth = 2;
+ const unsigned BackEndStartStage = 3;
+
+ // Use this to over-ride default stage widths
+ static std::map<unsigned, unsigned> stageBufferSizes;
+
+ //static unsigned interStageBuffSize[NumStages];
+
+ static const unsigned interStageBuffSize[NumStages] = {
+ StageWidth, /* Stage 0 - 1 */
+ StageWidth, /* Stage 1 - 2 */
+ 4, /* Stage 2 - 3 */
+ StageWidth, /* Stage 3 - 4 */
+ StageWidth, /* Stage 4 - 5 */
+ StageWidth, /* Stage 5 - 6 */
+ StageWidth, /* Stage 6 - 7 */
+ StageWidth, /* Stage 7 - 8 */
+ StageWidth /* Stage 8 - 9 */
+ };
+
+
+ // Enumerated List of Resources The Pipeline Uses
+ enum ResourceList {
+ FetchSeq = 0,
+ ITLB,
+ ICache,
+ Decode,
+ BPred,
+ FetchBuff,
+ RegManager,
+ AGEN,
+ ExecUnit,
+ DTLB,
+ DCache,
+ Grad,
+ FetchBuff2
+ };
+
+ typedef InOrderCPUParams Params;
+ typedef RefCountingPtr<InOrderDynInst> DynInstPtr;
+
+//void initPipelineTraits();
+
+ //////////////////////////
+ // RESOURCE SCHEDULING
+ //////////////////////////
+ struct ScheduleEntry {
+ ScheduleEntry(int stage_num, int _priority, int res_num, int _cmd = 0,
+ int _idx = 0) :
+ stageNum(stage_num), resNum(res_num), cmd(_cmd),
+ idx(_idx), priority(_priority)
+ { }
+ virtual ~ScheduleEntry(){}
+
+ // Stage number to perform this service.
+ int stageNum;
+
+ // Resource ID to access
+ int resNum;
+
+ // See specific resource for meaning
+ unsigned cmd;
+
+ // See specific resource for meaning
+ unsigned idx;
+
+ // Some Resources May Need Priority?
+ int priority;
+ };
+
+ struct entryCompare {
+ bool operator()(const ScheduleEntry* lhs, const ScheduleEntry* rhs) const
+ {
+ // Prioritize first by stage number that the resource is needed
+ if (lhs->stageNum > rhs->stageNum) {
+ return true;
+ } else if (lhs->stageNum == rhs->stageNum) {
+ /*if (lhs->resNum > rhs->resNum) {
+ return true;
+ } else {
+ return false;
+ }*/
+
+ if (lhs->priority > rhs->priority) {
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+ };
+
+
+ typedef std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare> ResSchedule;
+
+ void createFrontEndSchedule(DynInstPtr &inst);
+ bool createBackEndSchedule(DynInstPtr &inst);
+ int getNextPriority(DynInstPtr &inst, int stage_num);
+};
+#endif
diff --git a/src/cpu/inorder/pipeline_traits.9stage.smt2.cc b/src/cpu/inorder/pipeline_traits.9stage.smt2.cc
new file mode 100644
index 000000000..9d2ed8e61
--- /dev/null
+++ b/src/cpu/inorder/pipeline_traits.9stage.smt2.cc
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/resources/resource_list.hh"
+
+using namespace std;
+
+namespace ThePipeline {
+
+
+//@TODO: create my own Instruction Schedule Class
+//that operates as a Priority QUEUE
+int getNextPriority(DynInstPtr &inst, int stage_num)
+{
+ int cur_pri = 20;
+
+ /*
+ std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare>::iterator sked_it = inst->resSched.begin();
+
+ std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare>::iterator sked_end = inst->resSched.end();
+
+ while (sked_it != sked_end) {
+
+ if (sked_it.top()->stageNum == stage_num) {
+ cur_pri = sked_it.top()->priority;
+ }
+
+ sked_it++;
+ }
+ */
+
+ return cur_pri;
+}
+
+void createFrontEndSchedule(DynInstPtr &inst)
+{
+ int stNum = 0;
+ int stPri = 0;
+ // Get Pointer to Instuction's Schedule
+ ResSchedule *inst_sched = &inst->resSched;
+
+ //
+ // Stage 0
+ // ---------------------------------------
+ inst_sched->push(new ScheduleEntry(stNum, stPri, FetchSeq, FetchSeqUnit::AssignNextPC));
+ stPri++;
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, ITLB, TLBUnit::FetchLookup));
+ stPri++;
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, ICache, CacheUnit::InitiateFetch));
+ stPri++;
+
+ // Reset Priority / Update Next Stage Number
+ stNum++;
+ stPri = 0;
+
+ //
+ // Stage 1
+ // ---------------------------------------
+ inst_sched->push(new ScheduleEntry(stNum, stPri, ICache, CacheUnit::CompleteFetch));
+ stPri++;
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, Decode, DecodeUnit::DecodeInst));
+ stPri++;
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, BPred, BranchPredictor::PredictBranch));
+ stPri++;
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, FetchSeq, FetchSeqUnit::UpdateTargetPC));
+ stPri++;
+
+ int fetch_buff_num = FetchBuff + inst->readTid();
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, fetch_buff_num, InstBuffer::ScheduleOrBypass));
+
+ // Reset Priority / Update Next Stage Number
+ stNum++;
+ stPri = 0;
+
+ //
+ // Stage 2
+ // ---------------------------------------
+ // Reset Priority / Update Next Stage Number
+ stNum++;
+ stPri = 0;
+}
+
+bool createBackEndSchedule(DynInstPtr &inst)
+{
+ if (!inst->staticInst) {
+ return false;
+ }
+
+ std::string name = inst->staticInst->getName();
+
+ int stNum = BackEndStartStage;
+ int stPri = 0;
+
+ // Get Pointer to Instuction's Schedule
+ ResSchedule *inst_sched = &inst->resSched;
+
+ //
+ // Stage 3
+ // ---------------------------------------
+ // Set When Source Registers Should be read - Stage 4
+ for (int idx=0; idx < inst->numSrcRegs(); idx++) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, RegManager, UseDefUnit::ReadSrcReg, idx));
+ }
+ stPri++;
+
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ //
+ // Stage 4
+ // ---------------------------------------
+ if (inst->isMemRef()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, AGEN, AGENUnit::GenerateAddr));
+ }
+
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ //
+ // Stage 5
+ // ---------------------------------------
+ // Execution Unit
+ if (!inst->isNonSpeculative() && !inst->isMemRef()) {
+ //if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
+ //inst_sched->push(new ScheduleEntry(stNum, stPri++, MDU, MultDivUnit::MultDiv));
+ //} else {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, ExecUnit, ExecutionUnit::ExecuteInst));
+ //}
+ }
+ stPri++;
+
+ // DCache Initiate Access
+ if (inst->isMemRef()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, DTLB, TLBUnit::DataLookup));
+ stPri++;
+
+ if (inst->isLoad()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, DCache, CacheUnit::InitiateReadData));
+ } else if (inst->isStore()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, DCache, CacheUnit::InitiateWriteData));
+ }
+ }
+
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ //
+ // Stage 6
+ // ---------------------------------------
+ // DCache Complete Access
+ if (inst->isMemRef()) {
+ if (inst->isLoad()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, DCache, CacheUnit::CompleteReadData));
+ } else if (inst->isStore()) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, DCache, CacheUnit::CompleteWriteData));
+ }
+ }
+
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ //
+ // Stage 7
+ // ---------------------------------------
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ //
+ // Stage 8
+ // ---------------------------------------
+ // NonSpeculative Execution
+ if (inst->isNonSpeculative() ) {
+ if (inst->isMemRef())
+ fatal("Schedule doesnt handle Non-Speculative Memory Instructions.\n");
+
+ inst_sched->push(new ScheduleEntry(stNum, stPri, ExecUnit, ExecutionUnit::ExecuteInst));
+ stPri++;
+ }
+
+ // Write Back to Register File
+ for (int idx=0; idx < inst->numDestRegs(); idx++) {
+ inst_sched->push(new ScheduleEntry(stNum, stPri, RegManager, UseDefUnit::WriteDestReg, idx));
+ stPri++;
+ }
+
+ // Graduate Instructions
+ inst_sched->push(new ScheduleEntry(stNum, stPri, Grad, GraduationUnit::GraduateInst));
+ stPri++;
+
+ // Reset Priority / Update Next Stage Number
+ stPri = 0;
+ stNum++;
+
+ return true;
+}
+
+};
diff --git a/src/cpu/inorder/pipeline_traits.9stage.smt2.hh b/src/cpu/inorder/pipeline_traits.9stage.smt2.hh
new file mode 100644
index 000000000..22da4ea0f
--- /dev/null
+++ b/src/cpu/inorder/pipeline_traits.9stage.smt2.hh
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_PIPELINE_IMPL_HH__
+#define __CPU_INORDER_PIPELINE_IMPL_HH__
+
+#include <list>
+#include <queue>
+#include <vector>
+#include <map>
+
+#include "arch/isa_traits.hh"
+#include "cpu/inorder/params.hh"
+
+
+class InOrderDynInst;
+
+/* This Namespace contains constants, typedefs, functions and
+ * objects specific to the Pipeline Implementation.
+ */
+namespace ThePipeline {
+ // Pipeline Constants
+ const unsigned NumStages = 9;
+ const unsigned MaxThreads = 2;
+ const unsigned StageWidth = 1;
+ const unsigned BackEndStartStage = 3;
+
+ // Use this to over-ride default stage widths
+ static std::map<unsigned, unsigned> stageBufferSizes;
+
+ //static unsigned interStageBuffSize[NumStages];
+
+ static const unsigned interStageBuffSize[NumStages] = {
+ StageWidth, /* Stage 0 - 1 */
+ StageWidth, /* Stage 1 - 2 */
+ MaxThreads * 4, /* Stage 2 - 3 */
+ StageWidth, /* Stage 3 - 4 */
+ MaxThreads * 4, /* Stage 4 - 5 */
+ StageWidth, /* Stage 5 - 6 */
+ StageWidth, /* Stage 6 - 7 */
+ StageWidth, /* Stage 7 - 8 */
+ MaxThreads /* Stage 8 - 9 */
+ };
+
+
+ // Enumerated List of Resources The Pipeline Uses
+ enum ResourceList {
+ FetchSeq = 0,
+ ITLB,
+ ICache,
+ Decode,
+ BPred,
+ RegManager,
+ AGEN,
+ ExecUnit,
+ DTLB,
+ DCache,
+ Grad,
+ FetchBuff,
+ FetchBuff2
+ };
+
+ typedef InOrderCPUParams Params;
+ typedef RefCountingPtr<InOrderDynInst> DynInstPtr;
+
+//void initPipelineTraits();
+
+ //////////////////////////
+ // RESOURCE SCHEDULING
+ //////////////////////////
+ struct ScheduleEntry {
+ ScheduleEntry(int stage_num, int _priority, int res_num, int _cmd = 0,
+ int _idx = 0) :
+ stageNum(stage_num), resNum(res_num), cmd(_cmd),
+ idx(_idx), priority(_priority)
+ { }
+ virtual ~ScheduleEntry(){}
+
+ // Stage number to perform this service.
+ int stageNum;
+
+ // Resource ID to access
+ int resNum;
+
+ // See specific resource for meaning
+ unsigned cmd;
+
+ // See specific resource for meaning
+ unsigned idx;
+
+ // Some Resources May Need Priority?
+ int priority;
+ };
+
+ struct entryCompare {
+ bool operator()(const ScheduleEntry* lhs, const ScheduleEntry* rhs) const
+ {
+ // Prioritize first by stage number that the resource is needed
+ if (lhs->stageNum > rhs->stageNum) {
+ return true;
+ } else if (lhs->stageNum == rhs->stageNum) {
+ /*if (lhs->resNum > rhs->resNum) {
+ return true;
+ } else {
+ return false;
+ }*/
+
+ if (lhs->priority > rhs->priority) {
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+ };
+
+
+ typedef std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare> ResSchedule;
+
+ void createFrontEndSchedule(DynInstPtr &inst);
+ bool createBackEndSchedule(DynInstPtr &inst);
+ int getNextPriority(DynInstPtr &inst, int stage_num);
+};
+#endif
diff --git a/src/cpu/inorder/pipeline_traits.cc b/src/cpu/inorder/pipeline_traits.cc
new file mode 100644
index 000000000..eb899452a
--- /dev/null
+++ b/src/cpu/inorder/pipeline_traits.cc
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/resources/resource_list.hh"
+
+using namespace std;
+
+namespace ThePipeline {
+
+//@TODO: create my own Instruction Schedule Class
+//that operates as a Priority QUEUE
+int getNextPriority(DynInstPtr &inst, int stage_num)
+{
+ int cur_pri = 20;
+
+ /*
+ std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare>::iterator sked_it = inst->resSched.begin();
+
+ std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare>::iterator sked_end = inst->resSched.end();
+
+ while (sked_it != sked_end) {
+
+ if (sked_it.top()->stageNum == stage_num) {
+ cur_pri = sked_it.top()->priority;
+ }
+
+ sked_it++;
+ }
+ */
+
+ return cur_pri;
+}
+
+void createFrontEndSchedule(DynInstPtr &inst)
+{
+ InstStage *I = inst->addStage();
+ InstStage *E = inst->addStage();
+
+ I->needs(FetchSeq, FetchSeqUnit::AssignNextPC);
+ I->needs(ITLB, TLBUnit::FetchLookup);
+ I->needs(ICache, CacheUnit::InitiateFetch);
+
+ E->needs(ICache, CacheUnit::CompleteFetch);
+ E->needs(Decode, DecodeUnit::DecodeInst);
+ E->needs(BPred, BranchPredictor::PredictBranch);
+ E->needs(FetchSeq, FetchSeqUnit::UpdateTargetPC);
+}
+
+bool createBackEndSchedule(DynInstPtr &inst)
+{
+ if (!inst->staticInst) {
+ return false;
+ }
+
+ InstStage *E = inst->currentStage();
+ InstStage *M = inst->addStage();
+ InstStage *A = inst->addStage();
+ InstStage *W = inst->addStage();
+
+ for (int idx=0; idx < inst->numSrcRegs(); idx++) {
+ if (!idx || !inst->isStore()) {
+ E->needs(RegManager, UseDefUnit::ReadSrcReg, idx);
+ }
+ }
+
+
+ if ( inst->isNonSpeculative() ) {
+ // skip execution of non speculative insts until later
+ } else if ( inst->isMemRef() ) {
+ E->needs(AGEN, AGENUnit::GenerateAddr);
+ if ( inst->isLoad() ) {
+ E->needs(DTLB, TLBUnit::DataLookup);
+ E->needs(DCache, CacheUnit::InitiateReadData);
+ }
+ } else if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
+ E->needs(MDU, MultDivUnit::StartMultDiv);
+
+ // ZERO-LATENCY Multiply:
+ // E->needs(MDU, MultDivUnit::MultDiv);
+ } else {
+ E->needs(ExecUnit, ExecutionUnit::ExecuteInst);
+ }
+
+ if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
+ M->needs(MDU, MultDivUnit::EndMultDiv);
+ }
+
+ if ( inst->isLoad() ) {
+ M->needs(DCache, CacheUnit::CompleteReadData);
+ } else if ( inst->isStore() ) {
+ M->needs(RegManager, UseDefUnit::ReadSrcReg, 1);
+ M->needs(DTLB, TLBUnit::DataLookup);
+ M->needs(DCache, CacheUnit::InitiateWriteData);
+ }
+
+ if ( inst->isStore() ) {
+ A->needs(DCache, CacheUnit::CompleteWriteData);
+ }
+
+ if ( inst->isNonSpeculative() ) {
+ if ( inst->isMemRef() ) fatal("Non-Speculative Memory Instruction");
+ W->needs(ExecUnit, ExecutionUnit::ExecuteInst);
+ }
+
+ for (int idx=0; idx < inst->numDestRegs(); idx++) {
+ W->needs(RegManager, UseDefUnit::WriteDestReg, idx);
+ }
+
+ W->needs(Grad, GraduationUnit::GraduateInst);
+
+ return true;
+}
+
+InstStage::InstStage(DynInstPtr inst, int stage_num)
+{
+ stageNum = stage_num;
+ nextTaskPriority = 0;
+ instSched = &inst->resSched;
+}
+
+};
diff --git a/src/cpu/inorder/pipeline_traits.hh b/src/cpu/inorder/pipeline_traits.hh
new file mode 100644
index 000000000..c5e4bb228
--- /dev/null
+++ b/src/cpu/inorder/pipeline_traits.hh
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_PIPELINE_IMPL_HH__
+#define __CPU_INORDER_PIPELINE_IMPL_HH__
+
+#include <list>
+#include <queue>
+#include <vector>
+
+#include "arch/isa_traits.hh"
+#include "cpu/base.hh"
+//#include "cpu/inorder/params.hh"
+
+#include "params/InOrderCPU.hh"
+
+class InOrderDynInst;
+
+/* This Namespace contains constants, typedefs, functions and
+ * objects specific to the Pipeline Implementation.
+ */
+namespace ThePipeline {
+ // Pipeline Constants
+ const unsigned NumStages = 5;
+ const unsigned MaxThreads = 8;
+ const unsigned StageWidth = 1;
+ const unsigned BackEndStartStage = 2;
+
+ // Enumerated List of Resources The Pipeline Uses
+ enum ResourceList {
+ FetchSeq = 0,
+ ITLB,
+ ICache,
+ Decode,
+ BPred,
+ FetchBuff,
+ RegManager,
+ AGEN,
+ ExecUnit,
+ MDU,
+ DTLB,
+ DCache,
+ Grad,
+ FetchBuff2
+ };
+
+ // Expand this as necessary for your inter stage buffer sizes
+ static const unsigned interStageBuffSize[] = {
+ StageWidth, /* Stage 0 - 1 */
+ StageWidth, /* Stage 1 - 2 */
+ StageWidth, /* Stage 2 - 3 */
+ StageWidth, /* Stage 3 - 4 */
+ StageWidth, /* Stage 4 - 5 */
+ StageWidth, /* Stage 5 - 6 */
+ StageWidth, /* Stage 6 - 7 */
+ StageWidth, /* Stage 7 - 8 */
+ StageWidth /* Stage 8 - 9 */
+ };
+
+ typedef InOrderCPUParams Params;
+ typedef RefCountingPtr<InOrderDynInst> DynInstPtr;
+
+ //////////////////////////
+ // RESOURCE SCHEDULING
+ //////////////////////////
+ struct ScheduleEntry {
+ ScheduleEntry(int stage_num, int _priority, int res_num, int _cmd = 0,
+ int _idx = 0) :
+ stageNum(stage_num), resNum(res_num), cmd(_cmd),
+ idx(_idx), priority(_priority)
+ { }
+ virtual ~ScheduleEntry(){}
+
+ // Stage number to perform this service.
+ int stageNum;
+
+ // Resource ID to access
+ int resNum;
+
+ // See specific resource for meaning
+ unsigned cmd;
+
+ // See specific resource for meaning
+ unsigned idx;
+
+ // Some Resources May Need Priority?
+ int priority;
+ };
+
+ struct entryCompare {
+ bool operator()(const ScheduleEntry* lhs, const ScheduleEntry* rhs) const
+ {
+ // Prioritize first by stage number that the resource is needed
+ if (lhs->stageNum > rhs->stageNum) {
+ return true;
+ } else if (lhs->stageNum == rhs->stageNum) {
+ /*if (lhs->resNum > rhs->resNum) {
+ return true;
+ } else {
+ return false;
+ }*/
+
+ if (lhs->priority > rhs->priority) {
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+ };
+
+
+ typedef std::priority_queue<ScheduleEntry*, std::vector<ScheduleEntry*>,
+ entryCompare> ResSchedule;
+
+ void createFrontEndSchedule(DynInstPtr &inst);
+ bool createBackEndSchedule(DynInstPtr &inst);
+ int getNextPriority(DynInstPtr &inst, int stage_num);
+
+ class InstStage {
+ private:
+ int nextTaskPriority;
+ int stageNum;
+ ResSchedule *instSched;
+
+ public:
+ InstStage(DynInstPtr inst, int stage_num);
+
+ void needs(int unit, int request) {
+ instSched->push( new ScheduleEntry(
+ stageNum, nextTaskPriority++, unit, request
+ ));
+ }
+
+ void needs(int unit, int request, int param) {
+ instSched->push( new ScheduleEntry(
+ stageNum, nextTaskPriority++, unit, request, param
+ ));
+ }
+
+ };
+};
+
+
+
+
+#endif
diff --git a/src/cpu/inorder/reg_dep_map.cc b/src/cpu/inorder/reg_dep_map.cc
new file mode 100644
index 000000000..a405b1fb9
--- /dev/null
+++ b/src/cpu/inorder/reg_dep_map.cc
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "arch/isa_traits.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/reg_dep_map.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/cpu.hh"
+
+using namespace std;
+using namespace TheISA;
+using namespace ThePipeline;
+
+RegDepMap::RegDepMap(int size)
+{
+ regMap.resize(size);
+}
+
+string
+RegDepMap::name()
+{
+ return cpu->name() + ".RegDepMap";
+}
+
+void
+RegDepMap::setCPU(InOrderCPU *_cpu)
+{
+ cpu = _cpu;
+}
+
+void
+RegDepMap::clear()
+{
+ regMap.clear();
+}
+
+void
+RegDepMap::insert(DynInstPtr inst)
+{
+ int dest_regs = inst->numDestRegs();
+
+ DPRINTF(RegDepMap, "Setting Output Dependencies for [sn:%i] "
+ ", %s (dest. regs = %i).\n",
+ inst->seqNum,
+ inst->staticInst->getName(),
+ dest_regs);
+
+ for (int i = 0; i < dest_regs; i++) {
+ int idx = inst->destRegIdx(i);
+
+ //if (inst->numFPDestRegs())
+ // idx += TheISA::FP_Base_DepTag;
+
+ insert(idx, inst);
+ }
+}
+
+
+void
+RegDepMap::insert(unsigned idx, DynInstPtr inst)
+{
+ DPRINTF(RegDepMap, "Inserting [sn:%i] onto dep. list for reg. idx %i.\n",
+ inst->seqNum, idx);
+
+ regMap[idx].push_back(inst);
+
+ inst->setRegDepEntry();
+}
+
+void
+RegDepMap::remove(DynInstPtr inst)
+{
+ if (inst->isRegDepEntry()) {
+ DPRINTF(RegDepMap, "Removing [sn:%i]'s entries from reg. dep. map.\n",
+ inst->seqNum);
+
+ int dest_regs = inst->numDestRegs();
+
+ for (int i = 0; i < dest_regs; i++) {
+ int idx = inst->destRegIdx(i);
+ remove(idx, inst);
+ }
+ }
+}
+
+void
+RegDepMap::remove(unsigned idx, DynInstPtr inst)
+{
+ std::list<DynInstPtr>::iterator list_it = regMap[idx].begin();
+ std::list<DynInstPtr>::iterator list_end = regMap[idx].end();
+
+ while (list_it != list_end) {
+ if((*list_it) == inst) {
+ regMap[idx].erase(list_it);
+ break;
+ }
+
+ list_it++;
+ }
+}
+
+void
+RegDepMap::removeFront(unsigned idx, DynInstPtr inst)
+{
+ std::list<DynInstPtr>::iterator list_it = regMap[idx].begin();
+
+ DPRINTF(RegDepMap, "[tid:%u]: Removing dependency entry on phys. reg."
+ "%i for [sn:%i].\n", inst->readTid(), idx, inst->seqNum);
+
+ assert(list_it != regMap[idx].end());
+
+ assert(inst == (*list_it));
+
+ regMap[idx].erase(list_it);
+}
+
+bool
+RegDepMap::canRead(unsigned idx, DynInstPtr inst)
+{
+ if (regMap[idx].size() == 0)
+ return true;
+
+ std::list<DynInstPtr>::iterator list_it = regMap[idx].begin();
+
+ if (inst->seqNum <= (*list_it)->seqNum) {
+ return true;
+ } else {
+ DPRINTF(RegDepMap, "[sn:%i] Can't read from RegFile, [sn:%i] has not written"
+ " it's value back yet.\n", inst->seqNum, (*list_it)->seqNum);
+ return false;
+ }
+}
+
+ThePipeline::DynInstPtr
+RegDepMap::canForward(unsigned reg_idx, unsigned src_idx, DynInstPtr inst)
+{
+ std::list<DynInstPtr>::iterator list_it = regMap[reg_idx].begin();
+ std::list<DynInstPtr>::iterator list_end = regMap[reg_idx].end();
+
+ DynInstPtr forward_inst = NULL;
+
+ // Look for first, oldest instruction
+ while (list_it != list_end &&
+ (*list_it)->seqNum < inst->seqNum) {
+ forward_inst = (*list_it);
+ list_it++;
+ }
+
+ if (forward_inst) {
+ if (forward_inst->isExecuted() &&
+ forward_inst->readResultTime(src_idx) < curTick) {
+ return forward_inst;
+ } else {
+ DPRINTF(RegDepMap, "[sn:%i] Can't get value through forwarding, "
+ " [sn:%i] has not been executed yet.\n",
+ inst->seqNum, forward_inst->seqNum);
+ return NULL;
+ }
+ } else {
+ DPRINTF(RegDepMap, "[sn:%i] No instruction found to forward from.\n",
+ inst->seqNum);
+ return NULL;
+ }
+}
+
+bool
+RegDepMap::canWrite(unsigned idx, DynInstPtr inst)
+{
+ if (regMap[idx].size() == 0)
+ return true;
+
+ std::list<DynInstPtr>::iterator list_it = regMap[idx].begin();
+
+ if (inst->seqNum <= (*list_it)->seqNum) {
+ return true;
+ } else {
+ DPRINTF(RegDepMap, "[sn:%i] Can't write from RegFile: [sn:%i] has not written"
+ " it's value back yet.\n", inst->seqNum, (*list_it)->seqNum);
+ }
+
+ return false;
+}
+
+int
+RegDepMap::depSize(unsigned idx)
+{
+ return regMap[idx].size();
+}
+
+ThePipeline::DynInstPtr
+RegDepMap::findBypassInst(unsigned idx)
+{
+ std::list<DynInstPtr>::iterator list_it = regMap[idx].begin();
+
+ if (depSize(idx) == 1)
+ return NULL;
+
+ list_it++;
+
+ while (list_it != regMap[idx].end()) {
+ if((*list_it)->isExecuted()) {
+ return *list_it;
+ break;
+ }
+ }
+
+ return NULL;
+}
diff --git a/src/cpu/inorder/reg_dep_map.hh b/src/cpu/inorder/reg_dep_map.hh
new file mode 100644
index 000000000..ba2a8c8a3
--- /dev/null
+++ b/src/cpu/inorder/reg_dep_map.hh
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef CPU_INORDER_REG_DEP_MAP_HH
+#define CPU_INORDER_REG_DEP_MAP_HH
+
+#include <list>
+#include <vector>
+
+#include "arch/isa_traits.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+
+class InOrderCPU;
+
+class RegDepMap
+{
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ public:
+ RegDepMap(int size = TheISA::TotalNumRegs);
+
+ ~RegDepMap() { }
+
+ std::string name();
+
+ void setCPU(InOrderCPU *_cpu);
+
+ /** Clear the Entire Map */
+ void clear();
+
+ /** Insert all of a instruction's destination registers into map*/
+ void insert(DynInstPtr inst);
+
+ /** Insert an instruction into a specific destination register index onto map */
+ void insert(unsigned idx, DynInstPtr inst);
+
+ /** Remove all of a instruction's destination registers into map*/
+ void remove(DynInstPtr inst);
+
+ /** Remove a specific instruction and destination register index from map */
+ void remove(unsigned idx, DynInstPtr inst);
+
+ /** Remove Front instruction from a destination register */
+ void removeFront(unsigned idx, DynInstPtr inst);
+
+ /** Is the current instruction able to read from this destination register? */
+ bool canRead(unsigned idx, DynInstPtr inst);
+
+ /** Is the current instruction able to get a forwarded value from another instruction
+ * for this destination register? */
+ DynInstPtr canForward(unsigned reg_idx, unsigned src_idx, DynInstPtr inst);
+
+ /** find an instruction to forward/bypass a value from */
+ DynInstPtr findBypassInst(unsigned idx);
+
+ /** Is the current instruction able to write to this destination register? */
+ bool canWrite(unsigned idx, DynInstPtr inst);
+
+ /** Size of Dependency of Map */
+ int depSize(unsigned idx);
+
+ protected:
+ // Eventually make this a map of lists for
+ // efficiency sake!
+ std::vector<std::list<DynInstPtr> > regMap;
+
+ InOrderCPU *cpu;
+};
+
+#endif
+
+
+
+
+
+
+
diff --git a/src/cpu/inorder/resource.cc b/src/cpu/inorder/resource.cc
new file mode 100644
index 000000000..3106628f0
--- /dev/null
+++ b/src/cpu/inorder/resource.cc
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include <vector>
+#include <list>
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/cpu.hh"
+using namespace std;
+
+Resource::Resource(string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu)
+ : resName(res_name), id(res_id),
+ width(res_width), latency(res_latency), cpu(_cpu)
+{
+ // Use to deny a instruction a resource.
+ deniedReq = new ResourceRequest(this, NULL, 0, 0, 0, 0);
+}
+
+void
+Resource::init()
+{
+ // Set Up Resource Events to Appropriate Resource BandWidth
+ resourceEvent = new ResourceEvent[width];
+
+ initSlots();
+}
+
+void
+Resource::initSlots()
+{
+ // Add available slot numbers for resource
+ for (int slot_idx = 0; slot_idx < width; slot_idx++) {
+ availSlots.push_back(slot_idx);
+ resourceEvent[slot_idx].init(this, slot_idx);
+ }
+}
+
+std::string
+Resource::name()
+{
+ return cpu->name() + "." + resName;
+}
+
+void
+Resource::regStats()
+{
+ instReqsProcessed
+ .name(name() + ".instReqsProcessed")
+ .desc("Number of Instructions Requests that completed in this resource.");
+}
+
+int
+Resource::slotsAvail()
+{
+ return availSlots.size();
+}
+
+int
+Resource::slotsInUse()
+{
+ return width - availSlots.size();
+}
+
+void
+Resource::freeSlot(int slot_idx)
+{
+ DPRINTF(RefCount, "Removing [tid:%i] [sn:%i]'s request from resource [slot:%i].\n",
+ reqMap[slot_idx]->inst->readTid(),
+ reqMap[slot_idx]->inst->seqNum,
+ slot_idx);
+
+ // Put slot number on this resource's free list
+ availSlots.push_back(slot_idx);
+
+ // Erase Request Pointer From Request Map
+ std::map<int, ResReqPtr>::iterator req_it = reqMap.find(slot_idx);
+
+ assert(req_it != reqMap.end());
+ reqMap.erase(req_it);
+
+}
+
+// TODO: More efficiently search for instruction's slot within
+// resource.
+int
+Resource::findSlot(DynInstPtr inst)
+{
+ map<int, ResReqPtr>::iterator map_it = reqMap.begin();
+ map<int, ResReqPtr>::iterator map_end = reqMap.end();
+
+ int slot_num = -1;
+
+ while (map_it != map_end) {
+ if ((*map_it).second->getInst()->seqNum ==
+ inst->seqNum) {
+ slot_num = (*map_it).second->getSlot();
+ }
+ map_it++;
+ }
+
+ return slot_num;
+}
+
+int
+Resource::getSlot(DynInstPtr inst)
+{
+ int slot_num;
+
+ if (slotsAvail() != 0) {
+ slot_num = availSlots[0];
+
+ vector<int>::iterator vect_it = availSlots.begin();
+
+ assert(slot_num == *vect_it);
+
+ availSlots.erase(vect_it);
+ } else {
+ DPRINTF(Resource, "[tid:%i]: No slots in resource "
+ "available to service [sn:%i].\n", inst->readTid(),
+ inst->seqNum);
+ slot_num = -1;
+
+ map<int, ResReqPtr>::iterator map_it = reqMap.begin();
+ map<int, ResReqPtr>::iterator map_end = reqMap.end();
+
+ while (map_it != map_end) {
+ if ((*map_it).second) {
+ DPRINTF(Resource, "Currently Serving request from: [tid:%i] [sn:%i].\n",
+ (*map_it).second->getInst()->readTid(),
+ (*map_it).second->getInst()->seqNum);
+ }
+ map_it++;
+ }
+ }
+
+ return slot_num;
+}
+
+ResReqPtr
+Resource::request(DynInstPtr inst)
+{
+ // See if the resource is already serving this instruction.
+ // If so, use that request;
+ bool try_request = false;
+ int slot_num;
+ int stage_num;
+ ResReqPtr inst_req = findRequest(inst);
+
+ if (inst_req) {
+ // If some preprocessing has to be done on instruction
+ // that has already requested once, then handle it here.
+ // update the 'try_request' variable if we should
+ // re-execute the request.
+ requestAgain(inst, try_request);
+
+ slot_num = inst_req->getSlot();
+ stage_num = inst_req->getStageNum();
+ } else {
+ // Get new slot # for instruction
+ slot_num = getSlot(inst);
+
+ if (slot_num != -1) {
+ // Get Stage # from Schedule Entry
+ stage_num = inst->resSched.top()->stageNum;
+ unsigned cmd = inst->resSched.top()->cmd;
+
+ // Generate Resource Request
+ inst_req = getRequest(inst, stage_num, id, slot_num, cmd);
+
+ if (inst->staticInst) {
+ DPRINTF(Resource, "[tid:%i]: [sn:%i] requesting this resource.\n",
+ inst->readTid(), inst->seqNum);
+ } else {
+ DPRINTF(Resource, "[tid:%i]: instruction requesting this resource.\n",
+ inst->readTid());
+ }
+
+ reqMap[slot_num] = inst_req;
+
+ try_request = true;
+ }
+ }
+
+ if (try_request) {
+ // Schedule execution of resource
+ scheduleExecution(slot_num);
+ } else {
+ inst_req = deniedReq;
+ rejectRequest(inst);
+ }
+
+ return inst_req;
+}
+
+void
+Resource::requestAgain(DynInstPtr inst, bool &do_request)
+{
+ do_request = true;
+
+ if (inst->staticInst) {
+ DPRINTF(Resource, "[tid:%i]: [sn:%i] requesting this resource again.\n",
+ inst->readTid(), inst->seqNum);
+ } else {
+ DPRINTF(Resource, "[tid:%i]: requesting this resource again.\n",
+ inst->readTid());
+ }
+}
+
+ResReqPtr
+Resource::getRequest(DynInstPtr inst, int stage_num, int res_idx,
+ int slot_num, unsigned cmd)
+{
+ return new ResourceRequest(this, inst, stage_num, id, slot_num,
+ cmd);
+}
+
+ResReqPtr
+Resource::findRequest(DynInstPtr inst)
+{
+ map<int, ResReqPtr>::iterator map_it = reqMap.begin();
+ map<int, ResReqPtr>::iterator map_end = reqMap.end();
+
+ while (map_it != map_end) {
+ if ((*map_it).second &&
+ (*map_it).second->getInst() == inst) {
+ return (*map_it).second;
+ }
+ map_it++;
+ }
+
+ return NULL;
+}
+
+void
+Resource::rejectRequest(DynInstPtr inst)
+{
+ DPRINTF(RefCount, "[tid:%i]: Unable to grant request for [sn:%i].\n",
+ inst->readTid(), inst->seqNum);
+}
+
+void
+Resource::execute(int slot_idx)
+{
+ DPRINTF(Resource, "[tid:%i]: Executing %s resource.\n",
+ reqMap[slot_idx]->getTid(), name());
+ reqMap[slot_idx]->setCompleted(true);
+ reqMap[slot_idx]->fault = NoFault;
+ reqMap[slot_idx]->done();
+}
+
+void
+Resource::deactivateThread(unsigned tid)
+{
+ // In the most basic case, deactivation means squashing everything
+ // from a particular thread
+ DynInstPtr dummy_inst = new InOrderDynInst(cpu, NULL, 0, tid);
+ squash(dummy_inst, 0, 0, tid);
+}
+
+void
+Resource::squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num, unsigned tid)
+{
+ std::vector<int> slot_remove_list;
+
+ map<int, ResReqPtr>::iterator map_it = reqMap.begin();
+ map<int, ResReqPtr>::iterator map_end = reqMap.end();
+
+ while (map_it != map_end) {
+ ResReqPtr req_ptr = (*map_it).second;
+
+ if (req_ptr &&
+ req_ptr->getInst()->readTid() == tid &&
+ req_ptr->getInst()->seqNum > squash_seq_num) {
+
+ DPRINTF(Resource, "[tid:%i]: Squashing [sn:%i].\n",
+ req_ptr->getInst()->readTid(),
+ req_ptr->getInst()->seqNum);
+
+ int req_slot_num = req_ptr->getSlot();
+
+ unscheduleEvent(req_slot_num);
+
+ // Mark request for later removal
+ cpu->reqRemoveList.push(req_ptr);
+
+ // Mark slot for removal from resource
+ slot_remove_list.push_back(req_ptr->getSlot());
+ }
+
+ map_it++;
+ }
+
+ // Now Delete Slot Entry from Req. Map
+ for (int i = 0; i < slot_remove_list.size(); i++) {
+ freeSlot(slot_remove_list[i]);
+ }
+}
+
+
+Tick
+Resource::ticks(int num_cycles)
+{
+ return cpu->ticks(num_cycles);
+}
+
+
+void
+Resource::scheduleExecution(int slot_num)
+{
+ int res_latency = getLatency(slot_num);
+
+ if (res_latency >= 1) {
+ scheduleEvent(slot_num, res_latency);
+ } else {
+ execute(slot_num);
+ }
+}
+
+void
+Resource::scheduleEvent(int slot_idx, int delay)
+{
+ DPRINTF(Resource, "[tid:%i]: Scheduling event for [sn:%i] on tick %i.\n",
+ reqMap[slot_idx]->inst->readTid(),
+ reqMap[slot_idx]->inst->seqNum,
+ cpu->ticks(delay) + curTick);
+ resourceEvent[slot_idx].scheduleEvent(delay);
+}
+
+bool
+Resource::scheduleEvent(DynInstPtr inst, int delay)
+{
+ int slot_idx = findSlot(inst);
+
+ if(slot_idx != -1)
+ resourceEvent[slot_idx].scheduleEvent(delay);
+
+ return slot_idx;
+}
+
+void
+Resource::unscheduleEvent(int slot_idx)
+{
+ resourceEvent[slot_idx].unscheduleEvent();
+}
+
+bool
+Resource::unscheduleEvent(DynInstPtr inst)
+{
+ int slot_idx = findSlot(inst);
+
+ if(slot_idx != -1)
+ resourceEvent[slot_idx].unscheduleEvent();
+
+ return slot_idx;
+}
+
+int ResourceRequest::resReqID = 0;
+
+int ResourceRequest::resReqCount = 0;
+
+void
+ResourceRequest::done(bool completed)
+{
+ DPRINTF(Resource, "%s done with request from [sn:%i] [tid:%i].\n",
+ res->name(), inst->seqNum, inst->readTid());
+
+ setCompleted(completed);
+
+ // Add to remove list
+ res->cpu->reqRemoveList.push(res->reqMap[slotNum]);
+
+ // Free Slot So Another Instruction Can Use This Resource
+ res->freeSlot(slotNum);
+
+ res->instReqsProcessed++;
+}
+
+ResourceEvent::ResourceEvent()
+ : Event((Event::Priority)Resource_Event_Pri)
+{ }
+
+ResourceEvent::ResourceEvent(Resource *res, int slot_idx)
+ : Event((Event::Priority)Resource_Event_Pri), resource(res),
+ slotIdx(slot_idx)
+{ }
+
+void
+ResourceEvent::init(Resource *res, int slot_idx)
+{
+ resource = res;
+ slotIdx = slot_idx;
+}
+
+void
+ResourceEvent::process()
+{
+ resource->execute(slotIdx);
+}
+
+const char *
+ResourceEvent::description()
+{
+ string desc = resource->name() + " event";
+
+ return desc.c_str();
+}
diff --git a/src/cpu/inorder/resource.hh b/src/cpu/inorder/resource.hh
new file mode 100644
index 000000000..e3536258d
--- /dev/null
+++ b/src/cpu/inorder/resource.hh
@@ -0,0 +1,404 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_RESOURCE_HH__
+#define __CPU_INORDER_RESOURCE_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inst_seq.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "sim/eventq.hh"
+#include "sim/sim_object.hh"
+
+class Event;
+class InOrderCPU;
+class ResourceEvent;
+class ResourceRequest;
+
+typedef ResourceRequest ResReq;
+typedef ResourceRequest* ResReqPtr;
+
+class Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ friend class ResourceEvent;
+ friend class ResourceRequest;
+
+ public:
+ Resource(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu);
+ virtual ~Resource() {}
+
+ /** Return name of this resource */
+ virtual std::string name();
+
+ /** Define this function if resource, has a port to connect to an outside
+ * simulation object.
+ */
+ virtual Port* getPort(const std::string &if_name, int idx) { return NULL; }
+
+ /** Return ID for this resource */
+ int getId() { return id; }
+
+ /** Any extra initiliazation stuff can be set up using this function that
+ * should get called before the simulation starts (tick 0)
+ */
+ virtual void init();
+ virtual void initSlots();
+
+ /** Tasks to perform when simulation starts */
+ //virtual void startup() { }
+
+ /** Register Stats for this resource */
+ virtual void regStats();
+
+ /** Resources that care about thread activation override this. */
+ virtual void activateThread(unsigned tid) { }
+
+ /** Deactivate Thread. Default action is to squash all instructions
+ * from deactivated thread.
+ */
+ virtual void deactivateThread(unsigned tid);
+
+ /** Resources that care when an instruction has been graduated
+ * can override this
+ */
+ virtual void instGraduated(InstSeqNum seq_num,unsigned tid) { }
+
+ /** Request usage of this resource. Returns a ResourceRequest object
+ * with all the necessary resource information
+ */
+ virtual ResourceRequest* request(DynInstPtr inst);
+
+ /** Get the next available slot in this resource. Instruction is passed
+ * so that resources can check the instruction before allocating a slot
+ * if necessary.
+ */
+ virtual int getSlot(DynInstPtr inst);
+
+ /** Find the slot that this instruction is using in a resource */
+ virtual int findSlot(DynInstPtr inst);
+
+ /** Free a resource slot */
+ virtual void freeSlot(int slot_idx);
+
+ /** Request usage of a resource for this instruction. If this instruction already
+ * has made this request to this resource, and that request is uncompleted
+ * this function will just return that request
+ */
+ virtual ResourceRequest* getRequest(DynInstPtr _inst, int stage_num,
+ int res_idx, int slot_num,
+ unsigned cmd);
+
+ /** Schedule Execution of This Resource For A Given Slot*/
+ virtual void scheduleExecution(int slot_idx);
+
+ /** Execute the function of this resource. The Default is action
+ * is to do nothing. More specific models will derive from this
+ * class and define their own execute function.
+ */
+ virtual void execute(int slot_idx);
+
+ /** Fetch on behalf of an instruction. Will check to see
+ * if instruction is actually in resource before
+ * trying to fetch. Needs to be defined for derived units.
+ */
+ virtual Fault doFetchAccess(DynInstPtr inst)
+ { panic("doFetchAccess undefined for %s", name()); return NoFault; }
+
+ /** Read/Write on behalf of an instruction. Will check to see
+ * if instruction is actually in resource before
+ * trying to do access.Needs to be defined for derived units.
+ */
+ virtual Fault doDataAccess(DynInstPtr inst)
+ { panic("doDataAccess undefined for %s", name()); return NoFault; }
+
+ /** Squash All Requests After This Seq Num */
+ virtual void squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num, unsigned tid);
+
+ /** The number of instructions available that this resource can
+ * can still process
+ */
+ int slotsAvail();
+
+ /** The number of instructions using this resource */
+ int slotsInUse();
+
+ /** Schedule resource event, regardless of its current state. */
+ void scheduleEvent(int slot_idx, int delay);
+
+ /** Find instruction in list, Schedule resource event, regardless of its current state. */
+ bool scheduleEvent(DynInstPtr inst, int delay);
+
+ /** Unschedule resource event, regardless of its current state. */
+ void unscheduleEvent(int slot_idx);
+
+ /** Unschedule resource event, regardless of its current state. */
+ bool unscheduleEvent(DynInstPtr inst);
+
+ /** Return the number of cycles in 'Tick' format */
+ Tick ticks(int numCycles);
+
+ /** Find the request that corresponds to this instruction */
+ virtual ResReqPtr findRequest(DynInstPtr inst);
+
+ /** */
+ virtual void rejectRequest(DynInstPtr inst);
+
+ /** Request a Resource again. Some resources have to special process this
+ * in subsequent accesses.
+ */
+ virtual void requestAgain(DynInstPtr inst, bool &try_request);
+
+ /** Return Latency of Resource */
+ /* Can be overridden for complex cases */
+ virtual int getLatency(int slot_num) { return latency; }
+
+ protected:
+ /** The name of this resource */
+ std::string resName;
+
+ /** ID of the resource. The Resource Pool uses this # to identify this
+ * resource.
+ */
+ int id;
+
+ /** The number of instructions the resource can simultaneously
+ * process.
+ */
+ int width;
+
+ /** Constant latency for this resource.
+ * Note: Dynamic latency resources set this to 0 and
+ * manage the latency themselves
+ */
+ const int latency;
+
+ public:
+ /** Mapping of slot-numbers to the resource-request pointers */
+ std::map<int, ResReqPtr> reqMap;
+
+ /** A list of all the available execution slots for this resource.
+ * This correlates with the actual resource event idx.
+ */
+ std::vector<int> availSlots;
+
+ /** The CPU(s) that this resource interacts with */
+ InOrderCPU *cpu;
+
+ protected:
+ /** The resource event used for scheduling resource slots on the
+ * event queue
+ */
+ ResourceEvent *resourceEvent;
+
+ /** Default denied resource request pointer*/
+ ResReqPtr deniedReq;
+
+ public:
+ /////////////////////////////////////////////////////////////////
+ //
+ // DEFAULT RESOURCE STATISTICS
+ //
+ /////////////////////////////////////////////////////////////////
+ /** Number of Instruction Requests the Resource Processes */
+ Stats::Scalar<> instReqsProcessed;
+};
+
+class ResourceEvent : public Event
+{
+ public:
+ /** Pointer to the CPU. */
+ Resource *resource;
+
+
+ /// Resource events that come before other associated CPU events
+ /// (for InOrderCPU model).
+ /// check src/sim/eventq.hh for more event priorities.
+ enum InOrderPriority {
+ Resource_Event_Pri = 45,
+ };
+
+ /** The Resource Slot that this event is servicing */
+ int slotIdx;
+
+ /** Constructs a resource event. */
+ ResourceEvent();
+ ResourceEvent(Resource *res, int slot_idx);
+ virtual ~ResourceEvent() { }
+
+ /** Initialize data for this resource event. */
+ virtual void init(Resource *res, int slot_idx);
+
+ /** Processes a resource event. */
+ virtual void process();
+
+ /** Returns the description of the resource event. */
+ const char *description();
+
+ /** Set slot idx for event */
+ void setSlot(int slot) { slotIdx = slot; }
+
+ /** Schedule resource event, regardless of its current state. */
+ void scheduleEvent(int delay)
+ {
+ if (squashed())
+ mainEventQueue.reschedule(this, curTick + resource->ticks(delay));
+ else if (!scheduled())
+ mainEventQueue.schedule(this, curTick + resource->ticks(delay));
+ }
+
+ /** Unschedule resource event, regardless of its current state. */
+ void unscheduleEvent()
+ {
+ if (scheduled())
+ squash();
+ }
+
+};
+
+class ResourceRequest
+{
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ static int resReqID;
+
+ static int resReqCount;
+
+ public:
+ ResourceRequest(Resource *_res, DynInstPtr _inst, int stage_num,
+ int res_idx, int slot_num, unsigned _cmd)
+ : res(_res), inst(_inst), cmd(_cmd), stageNum(stage_num),
+ resIdx(res_idx), slotNum(slot_num), completed(false),
+ squashed(false), processing(false), waiting(false)
+ {
+ reqID = resReqID++;
+ resReqCount++;
+ DPRINTF(ResReqCount, "Res. Req %i created. resReqCount=%i.\n", reqID, resReqCount);
+
+ if (resReqCount > 100) {
+ fatal("Too many undeleted resource requests. Memory leak?\n");
+ }
+ }
+
+ virtual ~ResourceRequest()
+ {
+ resReqCount--;
+ DPRINTF(ResReqCount, "Res. Req %i deleted. resReqCount=%i.\n", reqID, resReqCount);
+ }
+
+ int reqID;
+
+ /** Acknowledge that this is a request is done and remove
+ * from resource.
+ */
+ void done(bool completed = true);
+
+ /////////////////////////////////////////////
+ //
+ // GET RESOURCE REQUEST IDENTIFICATION / INFO
+ //
+ /////////////////////////////////////////////
+ /** Get Resource Index */
+ int getResIdx() { return resIdx; }
+
+ /** Get Slot Number */
+ int getSlot() { return slotNum; }
+
+ /** Get Stage Number */
+ int getStageNum() { return stageNum; }
+
+ /** Set/Get Thread Ids */
+ void setTid(unsigned _tid) { tid = _tid; }
+ int getTid() { return tid; }
+
+ /** Instruction this request is for */
+ DynInstPtr getInst() { return inst; }
+
+ /** Data from this request. Overridden by Resource-Specific Request
+ * Objects
+ */
+ virtual PacketDataPtr getData() { return NULL; }
+
+ /** Pointer to Resource that is being used */
+ Resource *res;
+
+ /** Instruction being used */
+ DynInstPtr inst;
+
+ /** Fault Associated With This Resource Request */
+ Fault fault;
+
+ /** Command For This Resource */
+ unsigned cmd;
+
+ ////////////////////////////////////////
+ //
+ // GET RESOURCE REQUEST STATUS FROM VARIABLES
+ //
+ ////////////////////////////////////////
+ /** Get/Set Completed variables */
+ bool isCompleted() { return completed; }
+ void setCompleted(bool cond = true) { completed = cond; }
+
+ /** Get/Set Squashed variables */
+ bool isSquashed() { return squashed; }
+ void setSquashed() { squashed = true; }
+
+ /** Get/Set IsProcessing variables */
+ bool isProcessing() { return processing; }
+ void setProcessing() { processing = true; }
+
+ /** Get/Set IsWaiting variables */
+ bool isWaiting() { return waiting; }
+ void setWaiting() { waiting = true; }
+
+ protected:
+ /** Resource Identification */
+ int tid;
+ int stageNum;
+ int resIdx;
+ int slotNum;
+
+ /** Resource Status */
+ bool completed;
+ bool squashed;
+ bool processing;
+ bool waiting;
+};
+
+#endif //__CPU_INORDER_RESOURCE_HH__
diff --git a/src/cpu/inorder/resource_pool.9stage.cc b/src/cpu/inorder/resource_pool.9stage.cc
new file mode 100644
index 000000000..4a0258e71
--- /dev/null
+++ b/src/cpu/inorder/resource_pool.9stage.cc
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/resource_pool.hh"
+#include "cpu/inorder/resources/resource_list.hh"
+
+#include <vector>
+#include <list>
+
+using namespace std;
+using namespace ThePipeline;
+
+ResourcePool::ResourcePool(InOrderCPU *_cpu, InOrderCPUParams *params)
+ : cpu(_cpu)
+{
+ //@todo: use this function to instantiate the resources in resource pool. This will help in the
+ //auto-generation of this pipeline model.
+ //ThePipeline::addResources(resources, memObjects);
+
+ // Declare Resource Objects
+ // name - id - bandwidth - latency - CPU - Parameters
+ // --------------------------------------------------
+ resources.push_back(new FetchSeqUnit("Fetch-Seq-Unit", FetchSeq, StageWidth * 2, 0, _cpu, params));
+
+ resources.push_back(new TLBUnit("I-TLB", ITLB, StageWidth, 0, _cpu, params));
+
+ memObjects.push_back(ICache);
+ resources.push_back(new CacheUnit("icache_port", ICache, StageWidth * MaxThreads, 0, _cpu, params));
+
+ resources.push_back(new DecodeUnit("Decode-Unit", Decode, StageWidth, 0, _cpu, params));
+
+ resources.push_back(new BranchPredictor("Branch-Predictor", BPred, StageWidth, 0, _cpu, params));
+
+ for (int i = 0; i < params->numberOfThreads; i++) {
+ char fbuff_name[20];
+ sprintf(fbuff_name, "Fetch-Buffer-T%i", i);
+ resources.push_back(new InstBuffer(fbuff_name, FetchBuff + i, 4, 0, _cpu, params));
+ }
+
+ resources.push_back(new UseDefUnit("RegFile-Manager", RegManager, StageWidth * MaxThreads, 0, _cpu, params));
+
+ resources.push_back(new AGENUnit("AGEN-Unit", AGEN, StageWidth, 0, _cpu, params));
+
+ resources.push_back(new ExecutionUnit("Execution-Unit", ExecUnit, StageWidth, 0, _cpu, params));
+
+ resources.push_back(new MultDivUnit("Mult-Div-Unit", MDU, 5, 0, _cpu, params));
+
+ resources.push_back(new TLBUnit("D-TLB", DTLB, StageWidth, 0, _cpu, params));
+
+ memObjects.push_back(DCache);
+ resources.push_back(new CacheUnit("dcache_port", DCache, StageWidth * MaxThreads, 0, _cpu, params));
+
+ resources.push_back(new GraduationUnit("Graduation-Unit", Grad, StageWidth * MaxThreads, 0, _cpu, params));
+}
+
+void
+ResourcePool::init()
+{
+ for (int i=0; i < resources.size(); i++) {
+ resources[i]->init();
+ }
+}
+
+string
+ResourcePool::name()
+{
+ return cpu->name() + ".ResourcePool";
+}
+
+
+void
+ResourcePool::regStats()
+{
+ DPRINTF(Resource, "Registering Stats Throughout Resource Pool.\n");
+
+ int num_resources = resources.size();
+
+ for (int idx = 0; idx < num_resources; idx++) {
+ resources[idx]->regStats();
+ }
+}
+
+Port *
+ResourcePool::getPort(const std::string &if_name, int idx)
+{
+ for (int i = 0; i < memObjects.size(); i++) {
+ int obj_idx = memObjects[i];
+ Port *port = resources[obj_idx]->getPort(if_name, idx);
+ if (port != NULL) {
+ return port;
+ }
+ }
+
+ return NULL;
+}
+
+unsigned
+ResourcePool::getPortIdx(const std::string &port_name)
+{
+ for (int i = 0; i < memObjects.size(); i++) {
+ unsigned obj_idx = memObjects[i];
+ Port *port = resources[obj_idx]->getPort(port_name, obj_idx);
+ if (port != NULL) {
+ return obj_idx;
+ }
+ }
+
+ return 0;
+}
+
+ResReqPtr
+ResourcePool::request(int res_idx, DynInstPtr inst)
+{
+ //Make Sure This is a valid resource ID
+ assert(res_idx >= 0 && res_idx < resources.size());
+
+ return resources[res_idx]->request(inst);
+}
+
+void
+ResourcePool::squash(DynInstPtr inst, int res_idx, InstSeqNum done_seq_num, int tid)
+{
+ resources[res_idx]->squash(inst, ThePipeline::NumStages-1, done_seq_num, tid);
+}
+
+int
+ResourcePool::slotsAvail(int res_idx)
+{
+ return resources[res_idx]->slotsAvail();
+}
+
+int
+ResourcePool::slotsInUse(int res_idx)
+{
+ return resources[res_idx]->slotsInUse();
+}
+
+void
+ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
+ int delay, int res_idx, int tid)
+{
+ assert(delay >= 0);
+
+ ResPoolEvent *res_pool_event = new ResPoolEvent(this);
+
+ switch (e_type)
+ {
+ case InOrderCPU::ActivateThread:
+ {
+ DPRINTF(Resource, "Scheduling Activate Thread Resource Pool Event for tick %i.\n",
+ curTick + delay);
+ res_pool_event->setEvent(e_type,
+ inst,
+ inst->squashingStage,
+ inst->bdelaySeqNum,
+ inst->readTid());
+ res_pool_event->schedule(curTick + cpu->cycles(delay));
+
+ }
+ break;
+
+ case InOrderCPU::SuspendThread:
+ case InOrderCPU::DeallocateThread:
+ {
+ DPRINTF(Resource, "Scheduling Deactivate Thread Resource Pool Event for tick %i.\n",
+ curTick + delay);
+
+ res_pool_event->setEvent(e_type,
+ inst,
+ inst->squashingStage,
+ inst->bdelaySeqNum,
+ tid);
+
+ res_pool_event->schedule(curTick + cpu->cycles(delay));
+
+ }
+ break;
+
+ case ResourcePool::InstGraduated:
+ {
+ DPRINTF(Resource, "Scheduling Inst-Graduated Resource Pool Event for tick %i.\n",
+ curTick + delay);
+
+ res_pool_event->setEvent(e_type,
+ inst,
+ inst->squashingStage,
+ inst->seqNum,
+ inst->readTid());
+ res_pool_event->schedule(curTick + cpu->cycles(delay));
+
+ }
+ break;
+
+ case ResourcePool::SquashAll:
+ {
+ DPRINTF(Resource, "Scheduling Squash Resource Pool Event for tick %i.\n",
+ curTick + delay);
+ res_pool_event->setEvent(e_type,
+ inst,
+ inst->squashingStage,
+ inst->bdelaySeqNum,
+ inst->readTid());
+ res_pool_event->schedule(curTick + cpu->cycles(delay));
+
+ }
+ break;
+
+ default:
+ DPRINTF(Resource, "Ignoring Unrecognized CPU Event Type #%i.\n", e_type);
+ ; // If Resource Pool doesnt recognize event, we ignore it.
+ }
+}
+
+void
+ResourcePool::unscheduleEvent(int res_idx, DynInstPtr inst)
+{
+ resources[res_idx]->unscheduleEvent(inst);
+}
+
+void
+ResourcePool::squashAll(DynInstPtr inst, int stage_num, InstSeqNum done_seq_num, unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i] Stage %i squashing all instructions above [sn:%i].\n",
+ stage_num, tid, done_seq_num);
+
+ int num_resources = resources.size();
+
+ for (int idx = 0; idx < num_resources; idx++) {
+ resources[idx]->squash(inst, stage_num, done_seq_num, tid);
+ }
+}
+
+void
+ResourcePool::activateAll(unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i] Broadcasting Thread Activation to all resources.\n",
+ tid);
+
+ int num_resources = resources.size();
+
+ for (int idx = 0; idx < num_resources; idx++) {
+ resources[idx]->activateThread(tid);
+ }
+}
+
+void
+ResourcePool::deactivateAll(unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i] Broadcasting Thread Deactivation to all resources.\n",
+ tid);
+
+ int num_resources = resources.size();
+
+ for (int idx = 0; idx < num_resources; idx++) {
+ resources[idx]->deactivateThread(tid);
+ }
+}
+
+void
+ResourcePool::instGraduated(InstSeqNum seq_num,unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i] Broadcasting [sn:%i] graduation to all resources.\n",
+ tid, seq_num);
+
+ int num_resources = resources.size();
+
+ for (int idx = 0; idx < num_resources; idx++) {
+ resources[idx]->instGraduated(seq_num, tid);
+ }
+}
+
+ResourcePool::ResPoolEvent::ResPoolEvent(ResourcePool *_resPool)
+ : Event(&mainEventQueue, CPU_Tick_Pri),
+ resPool(_resPool)
+{ eventType = (InOrderCPU::CPUEventType) Default; }
+
+void
+ResourcePool::ResPoolEvent::process()
+{
+ switch (eventType)
+ {
+ case InOrderCPU::ActivateThread:
+ resPool->activateAll(tid);
+ break;
+
+ case InOrderCPU::SuspendThread:
+ case InOrderCPU::DeallocateThread:
+ resPool->deactivateAll(tid);
+ break;
+
+ case ResourcePool::InstGraduated:
+ resPool->instGraduated(seqNum, tid);
+ break;
+
+ case ResourcePool::SquashAll:
+ resPool->squashAll(inst, stageNum, seqNum, tid);
+ break;
+
+ default:
+ fatal("Unrecognized Event Type");
+ }
+
+ resPool->cpu->cpuEventRemoveList.push(this);
+}
+
+
+const char *
+ResourcePool::ResPoolEvent::description()
+{
+ return "Resource Pool event";
+}
+
+/** Schedule resource event, regardless of its current state. */
+void
+ResourcePool::ResPoolEvent::scheduleEvent(int delay)
+{
+ if (squashed())
+ reschedule(curTick + resPool->cpu->cycles(delay));
+ else if (!scheduled())
+ schedule(curTick + resPool->cpu->cycles(delay));
+}
+
+/** Unschedule resource event, regardless of its current state. */
+void
+ResourcePool::ResPoolEvent::unscheduleEvent()
+{
+ if (scheduled())
+ squash();
+}
diff --git a/src/cpu/inorder/resource_pool.cc b/src/cpu/inorder/resource_pool.cc
new file mode 100644
index 000000000..94af68c7a
--- /dev/null
+++ b/src/cpu/inorder/resource_pool.cc
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/resource_pool.hh"
+#include "cpu/inorder/resources/resource_list.hh"
+
+#include <vector>
+#include <list>
+
+using namespace std;
+using namespace ThePipeline;
+
+ResourcePool::ResourcePool(InOrderCPU *_cpu, ThePipeline::Params *params)
+ : cpu(_cpu)
+{
+ //@todo: use this function to instantiate the resources in resource pool. This will help in the
+ //auto-generation of this pipeline model.
+ //ThePipeline::addResources(resources, memObjects);
+
+ // Declare Resource Objects
+ // name - id - bandwidth - latency - CPU - Parameters
+ // --------------------------------------------------
+ resources.push_back(new FetchSeqUnit("Fetch-Seq-Unit", FetchSeq, StageWidth * 2, 0, _cpu, params));
+
+ resources.push_back(new TLBUnit("I-TLB", ITLB, StageWidth, 0, _cpu, params));
+
+ memObjects.push_back(ICache);
+ resources.push_back(new CacheUnit("icache_port", ICache, StageWidth * MaxThreads, 0, _cpu, params));
+
+ resources.push_back(new DecodeUnit("Decode-Unit", Decode, StageWidth, 0, _cpu, params));
+
+ resources.push_back(new BranchPredictor("Branch-Predictor", BPred, StageWidth, 0, _cpu, params));
+
+ resources.push_back(new InstBuffer("Fetch-Buffer-T0", FetchBuff, 4, 0, _cpu, params));
+
+ resources.push_back(new UseDefUnit("RegFile-Manager", RegManager, StageWidth * MaxThreads, 0, _cpu, params));
+
+ resources.push_back(new AGENUnit("AGEN-Unit", AGEN, StageWidth, 0, _cpu, params));
+
+ resources.push_back(new ExecutionUnit("Execution-Unit", ExecUnit, StageWidth, 0, _cpu, params));
+
+ resources.push_back(new MultDivUnit("Mult-Div-Unit", MDU, 5, 0, _cpu, params));
+
+ resources.push_back(new TLBUnit("D-TLB", DTLB, StageWidth, 0, _cpu, params));
+
+ memObjects.push_back(DCache);
+ resources.push_back(new CacheUnit("dcache_port", DCache, StageWidth * MaxThreads, 0, _cpu, params));
+
+ resources.push_back(new GraduationUnit("Graduation-Unit", Grad, StageWidth * MaxThreads, 0, _cpu, params));
+
+ resources.push_back(new InstBuffer("Fetch-Buffer-T1", FetchBuff2, 4, 0, _cpu, params));
+}
+
+void
+ResourcePool::init()
+{
+ for (int i=0; i < resources.size(); i++) {
+ DPRINTF(Resource, "Initializing resource: %s.\n", resources[i]->name());
+
+ resources[i]->init();
+ }
+}
+
+string
+ResourcePool::name()
+{
+ return cpu->name() + ".ResourcePool";
+}
+
+
+void
+ResourcePool::regStats()
+{
+ DPRINTF(Resource, "Registering Stats Throughout Resource Pool.\n");
+
+ int num_resources = resources.size();
+
+ for (int idx = 0; idx < num_resources; idx++) {
+ resources[idx]->regStats();
+ }
+}
+
+Port *
+ResourcePool::getPort(const std::string &if_name, int idx)
+{
+ DPRINTF(Resource, "Binding %s in Resource Pool.\n", if_name);
+
+ for (int i = 0; i < memObjects.size(); i++) {
+ int obj_idx = memObjects[i];
+ Port *port = resources[obj_idx]->getPort(if_name, idx);
+ if (port != NULL) {
+ DPRINTF(Resource, "%s set to resource %s(#%i) in Resource Pool.\n", if_name,
+ resources[obj_idx]->name(), obj_idx);
+ return port;
+ }
+ }
+
+ return NULL;
+}
+
+unsigned
+ResourcePool::getPortIdx(const std::string &port_name)
+{
+ DPRINTF(Resource, "Finding Port Idx for %s.\n", port_name);
+
+ for (int i = 0; i < memObjects.size(); i++) {
+ unsigned obj_idx = memObjects[i];
+ Port *port = resources[obj_idx]->getPort(port_name, obj_idx);
+ if (port != NULL) {
+ DPRINTF(Resource, "Returning Port Idx %i for %s.\n", obj_idx, port_name);
+ return obj_idx;
+ }
+ }
+
+ return 0;
+}
+
+ResReqPtr
+ResourcePool::request(int res_idx, DynInstPtr inst)
+{
+ //Make Sure This is a valid resource ID
+ assert(res_idx >= 0 && res_idx < resources.size());
+
+ return resources[res_idx]->request(inst);
+}
+
+void
+ResourcePool::squash(DynInstPtr inst, int res_idx, InstSeqNum done_seq_num, int tid)
+{
+ resources[res_idx]->squash(inst, ThePipeline::NumStages-1, done_seq_num, tid);
+}
+
+int
+ResourcePool::slotsAvail(int res_idx)
+{
+ return resources[res_idx]->slotsAvail();
+}
+
+int
+ResourcePool::slotsInUse(int res_idx)
+{
+ return resources[res_idx]->slotsInUse();
+}
+
+void
+ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
+ int delay, int res_idx, int tid)
+{
+ assert(delay >= 0);
+
+ ResPoolEvent *res_pool_event = new ResPoolEvent(this);
+
+ switch (e_type)
+ {
+ case InOrderCPU::ActivateThread:
+ {
+ DPRINTF(Resource, "Scheduling Activate Thread Resource Pool Event for tick %i.\n",
+ curTick + delay);
+ res_pool_event->setEvent(e_type,
+ inst,
+ inst->squashingStage,
+ inst->bdelaySeqNum,
+ inst->readTid());
+ mainEventQueue.schedule(res_pool_event, curTick + cpu->ticks(delay));
+
+ }
+ break;
+
+ case InOrderCPU::SuspendThread:
+ case InOrderCPU::DeallocateThread:
+ {
+ DPRINTF(Resource, "Scheduling Deactivate Thread Resource Pool Event for tick %i.\n",
+ curTick + delay);
+
+ res_pool_event->setEvent(e_type,
+ inst,
+ inst->squashingStage,
+ inst->bdelaySeqNum,
+ tid);
+
+ mainEventQueue.schedule(res_pool_event, curTick + cpu->ticks(delay));
+
+ }
+ break;
+
+ case ResourcePool::InstGraduated:
+ {
+ DPRINTF(Resource, "Scheduling Inst-Graduated Resource Pool Event for tick %i.\n",
+ curTick + delay);
+
+ res_pool_event->setEvent(e_type,
+ inst,
+ inst->squashingStage,
+ inst->seqNum,
+ inst->readTid());
+ mainEventQueue.schedule(res_pool_event, curTick + cpu->ticks(delay));
+
+ }
+ break;
+
+ case ResourcePool::SquashAll:
+ {
+ DPRINTF(Resource, "Scheduling Squash Resource Pool Event for tick %i.\n",
+ curTick + delay);
+ res_pool_event->setEvent(e_type,
+ inst,
+ inst->squashingStage,
+ inst->bdelaySeqNum,
+ inst->readTid());
+ mainEventQueue.schedule(res_pool_event, curTick + cpu->ticks(delay));
+
+ }
+ break;
+
+ default:
+ DPRINTF(Resource, "Ignoring Unrecognized CPU Event Type #%i.\n", e_type);
+ ; // If Resource Pool doesnt recognize event, we ignore it.
+ }
+}
+
+void
+ResourcePool::unscheduleEvent(int res_idx, DynInstPtr inst)
+{
+ resources[res_idx]->unscheduleEvent(inst);
+}
+
+void
+ResourcePool::squashAll(DynInstPtr inst, int stage_num, InstSeqNum done_seq_num, unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i] Stage %i squashing all instructions above [sn:%i].\n",
+ stage_num, tid, done_seq_num);
+
+ int num_resources = resources.size();
+
+ for (int idx = 0; idx < num_resources; idx++) {
+ resources[idx]->squash(inst, stage_num, done_seq_num, tid);
+ }
+}
+
+void
+ResourcePool::activateAll(unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i] Broadcasting Thread Activation to all resources.\n",
+ tid);
+
+ int num_resources = resources.size();
+
+ for (int idx = 0; idx < num_resources; idx++) {
+ resources[idx]->activateThread(tid);
+ }
+}
+
+void
+ResourcePool::deactivateAll(unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i] Broadcasting Thread Deactivation to all resources.\n",
+ tid);
+
+ int num_resources = resources.size();
+
+ for (int idx = 0; idx < num_resources; idx++) {
+ resources[idx]->deactivateThread(tid);
+ }
+}
+
+void
+ResourcePool::instGraduated(InstSeqNum seq_num,unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i] Broadcasting [sn:%i] graduation to all resources.\n",
+ tid, seq_num);
+
+ int num_resources = resources.size();
+
+ for (int idx = 0; idx < num_resources; idx++) {
+ resources[idx]->instGraduated(seq_num, tid);
+ }
+}
+
+ResourcePool::ResPoolEvent::ResPoolEvent(ResourcePool *_resPool)
+ : Event(CPU_Tick_Pri),
+ resPool(_resPool)
+{ eventType = (InOrderCPU::CPUEventType) Default; }
+
+void
+ResourcePool::ResPoolEvent::process()
+{
+ switch (eventType)
+ {
+ case InOrderCPU::ActivateThread:
+ resPool->activateAll(tid);
+ break;
+
+ case InOrderCPU::SuspendThread:
+ case InOrderCPU::DeallocateThread:
+ resPool->deactivateAll(tid);
+ break;
+
+ case ResourcePool::InstGraduated:
+ resPool->instGraduated(seqNum, tid);
+ break;
+
+ case ResourcePool::SquashAll:
+ resPool->squashAll(inst, stageNum, seqNum, tid);
+ break;
+
+ default:
+ fatal("Unrecognized Event Type");
+ }
+
+ resPool->cpu->cpuEventRemoveList.push(this);
+}
+
+
+const char *
+ResourcePool::ResPoolEvent::description()
+{
+ return "Resource Pool event";
+}
+
+/** Schedule resource event, regardless of its current state. */
+void
+ResourcePool::ResPoolEvent::scheduleEvent(int delay)
+{
+ if (squashed())
+ mainEventQueue.reschedule(this,curTick + resPool->cpu->ticks(delay));
+ else if (!scheduled())
+ mainEventQueue.schedule(this,curTick + resPool->cpu->ticks(delay));
+}
+
+/** Unschedule resource event, regardless of its current state. */
+void
+ResourcePool::ResPoolEvent::unscheduleEvent()
+{
+ if (scheduled())
+ squash();
+}
diff --git a/src/cpu/inorder/resource_pool.hh b/src/cpu/inorder/resource_pool.hh
new file mode 100644
index 000000000..35fce7db7
--- /dev/null
+++ b/src/cpu/inorder/resource_pool.hh
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_RESOURCE_POOL_HH__
+#define __CPU_INORDER_RESOURCE_POOL_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inst_seq.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/params.hh"
+#include "params/InOrderCPU.hh"
+#include "cpu/inorder/cpu.hh"
+#include "sim/eventq.hh"
+#include "sim/sim_object.hh"
+
+class Event;
+class InOrderCPU;
+class Resource;
+class ResourceEvent;
+
+class ResourcePool {
+ public:
+ typedef InOrderDynInst::DynInstPtr DynInstPtr;
+
+ public:
+ // List of Resource Pool Events that extends
+ // the list started by the CPU
+ // NOTE(1): Resource Pool also uses event list
+ // CPUEventType defined in inorder/cpu.hh
+ enum ResPoolEventType {
+ InstGraduated = InOrderCPU::NumCPUEvents,
+ SquashAll,
+ Default
+ };
+
+ class ResPoolEvent : public Event
+ {
+ protected:
+ /** Resource Pool */
+ ResourcePool *resPool;
+
+ public:
+ InOrderCPU::CPUEventType eventType;
+
+ DynInstPtr inst;
+
+ InstSeqNum seqNum;
+
+ int stageNum;
+
+ unsigned tid;
+
+ public:
+ /** Constructs a resource event. */
+ ResPoolEvent(ResourcePool *_resPool);
+
+ /** Set Type of Event To Be Scheduled */
+ void setEvent(InOrderCPU::CPUEventType e_type,
+ DynInstPtr _inst,
+ int stage_num,
+ InstSeqNum seq_num,
+ unsigned _tid)
+ {
+ eventType = e_type;
+ inst = _inst;
+ seqNum = seq_num;
+ stageNum = stage_num;
+ tid = _tid;
+ }
+
+ /** Processes a resource event. */
+ virtual void process();
+
+ /** Returns the description of the resource event. */
+ const char *description();
+
+ /** Schedule Event */
+ void scheduleEvent(int delay);
+
+ /** Unschedule This Event */
+ void unscheduleEvent();
+ };
+
+ public:
+ ResourcePool(InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~ResourcePool() {}
+
+ std::string name();
+
+ std::string name(int res_idx) { return resources[res_idx]->name(); }
+
+ void init();
+
+ /** Register Statistics in All Resources */
+ void regStats();
+
+ /** Returns a specific port. */
+ Port* getPort(const std::string &if_name, int idx);
+
+ /** Returns a specific port. */
+ unsigned getPortIdx(const std::string &if_name);
+
+ Resource* getResource(int res_idx) { return resources[res_idx]; }
+
+ /** Request usage of this resource. Returns -1 if not granted and
+ * a positive request tag if granted.
+ */
+ ResReqPtr request(int res_idx, DynInstPtr inst);
+
+ /** Squash The Resource */
+ void squash(DynInstPtr inst, int res_idx, InstSeqNum done_seq_num, int tid);
+
+ /** Squash All Resources in Pool after Done Seq. Num */
+ void squashAll(DynInstPtr inst, int stage_num,
+ InstSeqNum done_seq_num, unsigned tid);
+
+ /** Activate Thread in all resources */
+ void activateAll(unsigned tid);
+
+ /** De-Activate Thread in all resources */
+ void deactivateAll(unsigned tid);
+
+ /** Broadcast graduation to all resources */
+ void instGraduated(InstSeqNum seq_num,unsigned tid);
+
+ /** The number of instructions available that a resource can
+ * can still process.
+ */
+ int slotsAvail(int res_idx);
+
+ /** The number of instructions using a resource */
+ int slotsInUse(int res_idx);
+
+ /** Schedule resource event, regardless of its current state. */
+ void scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst = NULL,
+ int delay = 0, int res_idx = 0, int tid = 0);
+
+ /** UnSchedule resource event, regardless of its current state. */
+ void unscheduleEvent(int res_idx, DynInstPtr inst);
+
+ /** Tasks to perform when simulation starts */
+ virtual void startup() { }
+
+ /** The CPU(s) that this resource interacts with */
+ InOrderCPU *cpu;
+
+ DynInstPtr dummyInst[ThePipeline::MaxThreads];
+
+ private:
+ std::vector<Resource *> resources;
+
+ std::vector<int> memObjects;
+
+};
+
+#endif //__CPU_INORDER_RESOURCE_HH__
diff --git a/src/cpu/inorder/resources/agen_unit.cc b/src/cpu/inorder/resources/agen_unit.cc
new file mode 100644
index 000000000..8e3d25656
--- /dev/null
+++ b/src/cpu/inorder/resources/agen_unit.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/resources/agen_unit.hh"
+
+AGENUnit::AGENUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu)
+{ }
+
+void
+AGENUnit::execute(int slot_num)
+{
+ ResourceRequest* agen_req = reqMap[slot_num];
+ DynInstPtr inst = reqMap[slot_num]->inst;
+ Fault fault = reqMap[slot_num]->fault;
+ int tid;
+ int seq_num = inst->seqNum;
+
+ tid = inst->readTid();
+ agen_req->fault = NoFault;
+
+ switch (agen_req->cmd)
+ {
+ case GenerateAddr:
+ {
+ // Load/Store Instruction
+ if (inst->isMemRef()) {
+ DPRINTF(Resource, "[tid:%i] Generating Address for [sn:%i] (%s).\n",
+ tid, inst->seqNum, inst->staticInst->getName());
+
+
+ // We are not handdling Prefetches quite yet
+ if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
+ panic("Prefetches arent handled yet.\n");
+ } else {
+ if (inst->isLoad()) {
+ fault = inst->calcEA();
+ inst->setMemAddr(inst->getEA());
+ //inst->setExecuted();
+
+ DPRINTF(Resource, "[tid:%i] [sn:%i] Effective address calculated to be: "
+ "%#x.\n", tid, inst->seqNum, inst->getEA());
+ } else if (inst->isStore()) {
+ fault = inst->calcEA();
+ inst->setMemAddr(inst->getEA());
+
+ DPRINTF(Resource, "[tid:%i] [sn:%i] Effective address calculated to be: "
+ "%#x.\n", tid, inst->seqNum, inst->getEA());
+ } else {
+ panic("Unexpected memory type!\n");
+ }
+
+ if (fault == NoFault) {
+ agen_req->done();
+ } else {
+ fatal("%s encountered @ [sn:%i]",fault->name(), seq_num);
+ }
+ }
+ } else {
+ DPRINTF(Resource, "[tid:] Ignoring non-memory instruction [sn:%i].\n", tid, seq_num);
+ agen_req->done();
+ }
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+}
diff --git a/src/cpu/inorder/resources/agen_unit.hh b/src/cpu/inorder/resources/agen_unit.hh
new file mode 100644
index 000000000..2010c9fa6
--- /dev/null
+++ b/src/cpu/inorder/resources/agen_unit.hh
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_AGEN_UNIT_HH__
+#define __CPU_INORDER_AGEN_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/cpu.hh"
+#include "cpu/inorder/params.hh"
+
+class AGENUnit : public Resource {
+ public:
+ typedef InOrderDynInst::DynInstPtr DynInstPtr;
+
+ public:
+ AGENUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~AGENUnit() {}
+
+ enum Command {
+ GenerateAddr
+ };
+
+ virtual void execute(int slot_num);
+
+ protected:
+ /** @todo: Add Resource Stats Here */
+};
+
+#endif //__CPU_INORDER_DECODE_UNIT_HH__
diff --git a/src/cpu/inorder/resources/bpred_unit.cc b/src/cpu/inorder/resources/bpred_unit.cc
new file mode 100644
index 000000000..66d0779a2
--- /dev/null
+++ b/src/cpu/inorder/resources/bpred_unit.cc
@@ -0,0 +1,426 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Kevin Lim
+ */
+
+#include <list>
+#include <vector>
+
+#include "base/trace.hh"
+#include "base/traceflags.hh"
+#include "cpu/inorder/resources/bpred_unit.hh"
+
+using namespace std;
+using namespace ThePipeline;
+
+BPredUnit::BPredUnit(ThePipeline::Params *params)
+ : BTB(params->BTBEntries,
+ params->BTBTagSize,
+ params->instShiftAmt)
+{
+ // Setup the selected predictor.
+ if (params->predType == "local") {
+ localBP = new LocalBP(params->localPredictorSize,
+ params->localCtrBits,
+ params->instShiftAmt);
+ predictor = Local;
+ } else if (params->predType == "tournament") {
+ tournamentBP = new TournamentBP(params->localPredictorSize,
+ params->localCtrBits,
+ params->localHistoryTableSize,
+ params->localHistoryBits,
+ params->globalPredictorSize,
+ params->globalHistoryBits,
+ params->globalCtrBits,
+ params->choicePredictorSize,
+ params->choiceCtrBits,
+ params->instShiftAmt);
+ predictor = Tournament;
+ } else {
+ fatal("Invalid BP selected!");
+ }
+
+ for (int i=0; i < ThePipeline::MaxThreads; i++)
+ RAS[i].init(params->RASSize);
+}
+
+
+void
+BPredUnit::regStats()
+{
+ lookups
+ .name(name() + ".BPredUnit.lookups")
+ .desc("Number of BP lookups")
+ ;
+
+ condPredicted
+ .name(name() + ".BPredUnit.condPredicted")
+ .desc("Number of conditional branches predicted")
+ ;
+
+ condIncorrect
+ .name(name() + ".BPredUnit.condIncorrect")
+ .desc("Number of conditional branches incorrect")
+ ;
+
+ BTBLookups
+ .name(name() + ".BPredUnit.BTBLookups")
+ .desc("Number of BTB lookups")
+ ;
+
+ BTBHits
+ .name(name() + ".BPredUnit.BTBHits")
+ .desc("Number of BTB hits")
+ ;
+
+ BTBCorrect
+ .name(name() + ".BPredUnit.BTBCorrect")
+ .desc("Number of correct BTB predictions (this stat may not "
+ "work properly.")
+ ;
+
+ usedRAS
+ .name(name() + ".BPredUnit.usedRAS")
+ .desc("Number of times the RAS was used to get a target.")
+ ;
+
+ RASIncorrect
+ .name(name() + ".BPredUnit.RASInCorrect")
+ .desc("Number of incorrect RAS predictions.")
+ ;
+}
+
+
+void
+BPredUnit::switchOut()
+{
+ // Clear any state upon switch out.
+ for (int i = 0; i < ThePipeline::MaxThreads; ++i) {
+ squash(0, i);
+ }
+}
+
+
+void
+BPredUnit::takeOverFrom()
+{
+ // Can reset all predictor state, but it's not necessarily better
+ // than leaving it be.
+/*
+ for (int i = 0; i < ThePipeline::MaxThreads; ++i)
+ RAS[i].reset();
+
+ BP.reset();
+ BTB.reset();
+*/
+}
+
+
+bool
+BPredUnit::predict(DynInstPtr &inst, Addr &PC, unsigned tid)
+{
+ // See if branch predictor predicts taken.
+ // If so, get its target addr either from the BTB or the RAS.
+ // Save off record of branch stuff so the RAS can be fixed
+ // up once it's done.
+
+ using TheISA::MachInst;
+
+ bool pred_taken = false;
+ Addr target;
+
+ ++lookups;
+
+ void *bp_history = NULL;
+
+ if (inst->isUncondCtrl()) {
+ DPRINTF(Resource, "BranchPred: [tid:%i] Unconditional control.\n", tid);
+ pred_taken = true;
+ // Tell the BP there was an unconditional branch.
+ BPUncond(bp_history);
+
+ if (inst->isReturn() && RAS[tid].empty()) {
+ DPRINTF(Resource, "BranchPred: [tid:%i] RAS is empty, predicting "
+ "false.\n", tid);
+ pred_taken = false;
+ }
+ } else {
+ ++condPredicted;
+
+ pred_taken = BPLookup(PC, bp_history);
+
+ DPRINTF(Resource, "BranchPred: [tid:%i]: Branch predictor predicted %i "
+ "for PC %#x\n",
+ tid, pred_taken, inst->readPC());
+ }
+
+ PredictorHistory predict_record(inst->seqNum, PC, pred_taken,
+ bp_history, tid);
+
+ // Now lookup in the BTB or RAS.
+ if (pred_taken) {
+ if (inst->isReturn()) {
+ ++usedRAS;
+
+ // If it's a function return call, then look up the address
+ // in the RAS.
+ target = RAS[tid].top();
+
+ // Record the top entry of the RAS, and its index.
+ predict_record.usedRAS = true;
+ predict_record.RASIndex = RAS[tid].topIdx();
+ predict_record.RASTarget = target;
+
+ assert(predict_record.RASIndex < 16);
+
+ RAS[tid].pop();
+
+ DPRINTF(Resource, "BranchPred: [tid:%i]: Instruction %#x is a return, "
+ "RAS predicted target: %#x, RAS index: %i.\n",
+ tid, inst->readPC(), target, predict_record.RASIndex);
+ } else {
+ ++BTBLookups;
+
+ if (inst->isCall()) {
+ RAS[tid].push(PC + sizeof(MachInst));
+
+ // Record that it was a call so that the top RAS entry can
+ // be popped off if the speculation is incorrect.
+ predict_record.wasCall = true;
+
+ DPRINTF(Resource, "BranchPred: [tid:%i] Instruction %#x was a call"
+ ", adding %#x to the RAS.\n",
+ tid, inst->readPC(), PC + sizeof(MachInst));
+ }
+
+ if (inst->isCall() &&
+ inst->isUncondCtrl() &&
+ inst->isDirectCtrl()) {
+ target = inst->branchTarget();
+
+ DPRINTF(Fetch, "BranchPred: [tid:%i]: Setting %#x predicted"
+ " target to %#x.\n",
+ tid, inst->readPC(), target);
+ } else if (BTB.valid(PC, tid)) {
+ ++BTBHits;
+
+ // If it's not a return, use the BTB to get the target addr.
+ target = BTB.lookup(PC, tid);
+
+ DPRINTF(Resource, "BranchPred: [tid:%i]: Instruction %#x predicted"
+ " target is %#x.\n",
+ tid, inst->readPC(), target);
+ } else {
+ DPRINTF(Resource, "BranchPred: [tid:%i]: BTB doesn't have a "
+ "valid entry.\n",tid);
+ pred_taken = false;
+ }
+ }
+ }
+
+ if (pred_taken) {
+ // Set the PC and the instruction's predicted target.
+ PC = target;
+ inst->setPredTarg(target);
+ } else {
+ PC = PC + sizeof(MachInst);
+ inst->setPredTarg(PC);
+ }
+
+ predHist[tid].push_front(predict_record);
+
+ DPRINTF(Resource, "[tid:%i] predHist.size(): %i\n", tid, predHist[tid].size());
+
+ inst->setBranchPred(pred_taken);
+
+ return pred_taken;
+}
+
+
+void
+BPredUnit::update(const InstSeqNum &done_sn, unsigned tid)
+{
+ DPRINTF(Resource, "BranchPred: [tid:%i]: Commiting branches until sequence"
+ "number %lli.\n", tid, done_sn);
+
+ while (!predHist[tid].empty() &&
+ predHist[tid].back().seqNum <= done_sn) {
+ // Update the branch predictor with the correct results.
+ BPUpdate(predHist[tid].back().PC,
+ predHist[tid].back().predTaken,
+ predHist[tid].back().bpHistory);
+
+ predHist[tid].pop_back();
+ }
+}
+
+
+void
+BPredUnit::squash(const InstSeqNum &squashed_sn, unsigned tid)
+{
+ History &pred_hist = predHist[tid];
+
+ while (!pred_hist.empty() &&
+ pred_hist.front().seqNum > squashed_sn) {
+ if (pred_hist.front().usedRAS) {
+ DPRINTF(Resource, "BranchPred: [tid:%i]: Restoring top of RAS to: %i,"
+ " target: %#x.\n",
+ tid,
+ pred_hist.front().RASIndex,
+ pred_hist.front().RASTarget);
+
+ RAS[tid].restore(pred_hist.front().RASIndex,
+ pred_hist.front().RASTarget);
+
+ } else if (pred_hist.front().wasCall) {
+ DPRINTF(Resource, "BranchPred: [tid:%i]: Removing speculative entry "
+ "added to the RAS.\n",tid);
+
+ RAS[tid].pop();
+ }
+
+ // This call should delete the bpHistory.
+ BPSquash(pred_hist.front().bpHistory);
+
+ pred_hist.pop_front();
+ }
+
+}
+
+
+void
+BPredUnit::squash(const InstSeqNum &squashed_sn,
+ const Addr &corr_target,
+ const bool actually_taken,
+ unsigned tid)
+{
+ // Now that we know that a branch was mispredicted, we need to undo
+ // all the branches that have been seen up until this branch and
+ // fix up everything.
+
+ History &pred_hist = predHist[tid];
+
+ ++condIncorrect;
+
+ DPRINTF(Resource, "BranchPred: [tid:%i]: Squashing from sequence number %i, "
+ "setting target to %#x.\n",
+ tid, squashed_sn, corr_target);
+
+ squash(squashed_sn, tid);
+
+ // If there's a squash due to a syscall, there may not be an entry
+ // corresponding to the squash. In that case, don't bother trying to
+ // fix up the entry.
+ if (!pred_hist.empty()) {
+ assert(pred_hist.front().seqNum == squashed_sn);
+ if (pred_hist.front().usedRAS) {
+ ++RASIncorrect;
+ }
+
+ BPUpdate(pred_hist.front().PC, actually_taken,
+ pred_hist.front().bpHistory);
+
+ BTB.update(pred_hist.front().PC, corr_target, tid);
+ pred_hist.pop_front();
+ }
+}
+
+
+void
+BPredUnit::BPUncond(void * &bp_history)
+{
+ // Only the tournament predictor cares about unconditional branches.
+ if (predictor == Tournament) {
+ tournamentBP->uncondBr(bp_history);
+ }
+}
+
+
+void
+BPredUnit::BPSquash(void *bp_history)
+{
+ if (predictor == Local) {
+ localBP->squash(bp_history);
+ } else if (predictor == Tournament) {
+ tournamentBP->squash(bp_history);
+ } else {
+ panic("Predictor type is unexpected value!");
+ }
+}
+
+
+bool
+BPredUnit::BPLookup(Addr &inst_PC, void * &bp_history)
+{
+ if (predictor == Local) {
+ return localBP->lookup(inst_PC, bp_history);
+ } else if (predictor == Tournament) {
+ return tournamentBP->lookup(inst_PC, bp_history);
+ } else {
+ panic("Predictor type is unexpected value!");
+ }
+}
+
+
+void
+BPredUnit::BPUpdate(Addr &inst_PC, bool taken, void *bp_history)
+{
+ if (predictor == Local) {
+ localBP->update(inst_PC, taken, bp_history);
+ } else if (predictor == Tournament) {
+ tournamentBP->update(inst_PC, taken, bp_history);
+ } else {
+ panic("Predictor type is unexpected value!");
+ }
+}
+
+
+void
+BPredUnit::dump()
+{
+ /*typename History::iterator pred_hist_it;
+
+ for (int i = 0; i < ThePipeline::MaxThreads; ++i) {
+ if (!predHist[i].empty()) {
+ pred_hist_it = predHist[i].begin();
+
+ cprintf("predHist[%i].size(): %i\n", i, predHist[i].size());
+
+ while (pred_hist_it != predHist[i].end()) {
+ cprintf("[sn:%lli], PC:%#x, tid:%i, predTaken:%i, "
+ "bpHistory:%#x\n",
+ (*pred_hist_it).seqNum, (*pred_hist_it).PC,
+ (*pred_hist_it).tid, (*pred_hist_it).predTaken,
+ (*pred_hist_it).bpHistory);
+ pred_hist_it++;
+ }
+
+ cprintf("\n");
+ }
+ }*/
+}
diff --git a/src/cpu/inorder/resources/bpred_unit.hh b/src/cpu/inorder/resources/bpred_unit.hh
new file mode 100644
index 000000000..b945922a7
--- /dev/null
+++ b/src/cpu/inorder/resources/bpred_unit.hh
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Kevin Lim
+ * Korey Sewell
+ */
+
+#ifndef __CPU_INORDER_BPRED_UNIT_HH__
+#define __CPU_INORDER_BPRED_UNIT_HH__
+
+// For Addr type.
+#include "arch/isa_traits.hh"
+#include "base/statistics.hh"
+#include "cpu/inst_seq.hh"
+
+//#include "cpu/inorder/params.hh"
+#include "cpu/o3/2bit_local_pred.hh"
+#include "cpu/o3/btb.hh"
+#include "cpu/o3/ras.hh"
+#include "cpu/o3/tournament_pred.hh"
+#include "params/InOrderCPU.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+
+#include <list>
+
+/**
+ * Basically a wrapper class to hold both the branch predictor
+ * and the BTB.
+ */
+class BPredUnit
+{
+ private:
+
+ enum PredType {
+ Local,
+ Tournament
+ };
+
+ PredType predictor;
+
+ public:
+
+ /**
+ * @param params The params object, that has the size of the BP and BTB.
+ */
+ BPredUnit(ThePipeline::Params *params);
+
+ /**
+ * Registers statistics.
+ */
+ void regStats();
+
+ void switchOut();
+
+ void takeOverFrom();
+
+ /**
+ * Predicts whether or not the instruction is a taken branch, and the
+ * target of the branch if it is taken.
+ * @param inst The branch instruction.
+ * @param PC The predicted PC is passed back through this parameter.
+ * @param tid The thread id.
+ * @return Returns if the branch is taken or not.
+ */
+ bool predict(ThePipeline::DynInstPtr &inst, Addr &PC, unsigned tid);
+
+ // @todo: Rename this function.
+ void BPUncond(void * &bp_history);
+
+ /**
+ * Tells the branch predictor to commit any updates until the given
+ * sequence number.
+ * @param done_sn The sequence number to commit any older updates up until.
+ * @param tid The thread id.
+ */
+ void update(const InstSeqNum &done_sn, unsigned tid);
+
+ /**
+ * Squashes all outstanding updates until a given sequence number.
+ * @param squashed_sn The sequence number to squash any younger updates up
+ * until.
+ * @param tid The thread id.
+ */
+ void squash(const InstSeqNum &squashed_sn, unsigned tid);
+
+ /**
+ * Squashes all outstanding updates until a given sequence number, and
+ * corrects that sn's update with the proper address and taken/not taken.
+ * @param squashed_sn The sequence number to squash any younger updates up
+ * until.
+ * @param corr_target The correct branch target.
+ * @param actually_taken The correct branch direction.
+ * @param tid The thread id.
+ */
+ void squash(const InstSeqNum &squashed_sn, const Addr &corr_target,
+ bool actually_taken, unsigned tid);
+
+ /**
+ * @param bp_history Pointer to the history object. The predictor
+ * will need to update any state and delete the object.
+ */
+ void BPSquash(void *bp_history);
+
+ /**
+ * Looks up a given PC in the BP to see if it is taken or not taken.
+ * @param inst_PC The PC to look up.
+ * @param bp_history Pointer that will be set to an object that
+ * has the branch predictor state associated with the lookup.
+ * @return Whether the branch is taken or not taken.
+ */
+ bool BPLookup(Addr &inst_PC, void * &bp_history);
+
+ /**
+ * Looks up a given PC in the BTB to see if a matching entry exists.
+ * @param inst_PC The PC to look up.
+ * @return Whether the BTB contains the given PC.
+ */
+ bool BTBValid(Addr &inst_PC)
+ { return BTB.valid(inst_PC, 0); }
+
+ /**
+ * Looks up a given PC in the BTB to get the predicted target.
+ * @param inst_PC The PC to look up.
+ * @return The address of the target of the branch.
+ */
+ Addr BTBLookup(Addr &inst_PC)
+ { return BTB.lookup(inst_PC, 0); }
+
+ /**
+ * Updates the BP with taken/not taken information.
+ * @param inst_PC The branch's PC that will be updated.
+ * @param taken Whether the branch was taken or not taken.
+ * @param bp_history Pointer to the branch predictor state that is
+ * associated with the branch lookup that is being updated.
+ * @todo Make this update flexible enough to handle a global predictor.
+ */
+ void BPUpdate(Addr &inst_PC, bool taken, void *bp_history);
+
+ /**
+ * Updates the BTB with the target of a branch.
+ * @param inst_PC The branch's PC that will be updated.
+ * @param target_PC The branch's target that will be added to the BTB.
+ */
+ void BTBUpdate(Addr &inst_PC, Addr &target_PC)
+ { BTB.update(inst_PC, target_PC,0); }
+
+ void dump();
+
+ private:
+ struct PredictorHistory {
+ /**
+ * Makes a predictor history struct that contains any
+ * information needed to update the predictor, BTB, and RAS.
+ */
+ PredictorHistory(const InstSeqNum &seq_num, const Addr &inst_PC,
+ const bool pred_taken, void *bp_history,
+ const unsigned _tid)
+ : seqNum(seq_num), PC(inst_PC), RASTarget(0),
+ RASIndex(0), tid(_tid), predTaken(pred_taken), usedRAS(0),
+ wasCall(0), bpHistory(bp_history)
+ { }
+
+ /** The sequence number for the predictor history entry. */
+ InstSeqNum seqNum;
+
+ /** The PC associated with the sequence number. */
+ Addr PC;
+
+ /** The RAS target (only valid if a return). */
+ Addr RASTarget;
+
+ /** The RAS index of the instruction (only valid if a call). */
+ unsigned RASIndex;
+
+ /** The thread id. */
+ unsigned tid;
+
+ /** Whether or not it was predicted taken. */
+ bool predTaken;
+
+ /** Whether or not the RAS was used. */
+ bool usedRAS;
+
+ /** Whether or not the instruction was a call. */
+ bool wasCall;
+
+ /** Pointer to the history object passed back from the branch
+ * predictor. It is used to update or restore state of the
+ * branch predictor.
+ */
+ void *bpHistory;
+ };
+
+ typedef std::list<PredictorHistory> History;
+
+ /**
+ * The per-thread predictor history. This is used to update the predictor
+ * as instructions are committed, or restore it to the proper state after
+ * a squash.
+ */
+ History predHist[ThePipeline::MaxThreads];
+
+ /** The local branch predictor. */
+ LocalBP *localBP;
+
+ /** The tournament branch predictor. */
+ TournamentBP *tournamentBP;
+
+ /** The BTB. */
+ DefaultBTB BTB;
+
+ /** The per-thread return address stack. */
+ ReturnAddrStack RAS[ThePipeline::MaxThreads];
+
+ /** Stat for number of BP lookups. */
+ Stats::Scalar<> lookups;
+ /** Stat for number of conditional branches predicted. */
+ Stats::Scalar<> condPredicted;
+ /** Stat for number of conditional branches predicted incorrectly. */
+ Stats::Scalar<> condIncorrect;
+ /** Stat for number of BTB lookups. */
+ Stats::Scalar<> BTBLookups;
+ /** Stat for number of BTB hits. */
+ Stats::Scalar<> BTBHits;
+ /** Stat for number of times the BTB is correct. */
+ Stats::Scalar<> BTBCorrect;
+ /** Stat for number of times the RAS is used to get a target. */
+ Stats::Scalar<> usedRAS;
+ /** Stat for number of times the RAS is incorrect. */
+ Stats::Scalar<> RASIncorrect;
+};
+
+#endif // __CPU_INORDER_BPRED_UNIT_HH__
diff --git a/src/cpu/inorder/resources/branch_predictor.cc b/src/cpu/inorder/resources/branch_predictor.cc
new file mode 100644
index 000000000..b563a3057
--- /dev/null
+++ b/src/cpu/inorder/resources/branch_predictor.cc
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/resources/branch_predictor.hh"
+
+using namespace std;
+using namespace TheISA;
+using namespace ThePipeline;
+
+BranchPredictor::BranchPredictor(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu),
+ branchPred(params)
+{
+ instSize = sizeof(MachInst);
+}
+
+void
+BranchPredictor::regStats()
+{
+ predictedTaken
+ .name(name() + ".predictedTaken")
+ .desc("Number of Branches Predicted As Taken (True).");
+
+ predictedNotTaken
+ .name(name() + ".predictedNotTaken")
+ .desc("Number of Branches Predicted As Not Taken (False).");
+
+ Resource::regStats();
+}
+
+void
+BranchPredictor::execute(int slot_num)
+{
+ // After this is working, change this to a reinterpret cast
+ // for performance considerations
+ ResourceRequest* bpred_req = reqMap[slot_num];
+
+ DynInstPtr inst = bpred_req->inst;
+ int tid = inst->readTid();
+ int seq_num = inst->seqNum;
+ //int stage_num = bpred_req->getStageNum();
+
+ bpred_req->fault = NoFault;
+
+ switch (bpred_req->cmd)
+ {
+ case PredictBranch:
+ {
+ Addr pred_PC = inst->readNextPC();
+
+ if (inst->isControl()) {
+ // If predicted, the pred_PC will be updated to new target value
+ // If not, the pred_PC be updated to pc+8
+ bool predict_taken = branchPred.predict(inst, pred_PC, tid);
+
+ if (predict_taken) {
+ DPRINTF(Resource, "[tid:%i]: [sn:%i]: Branch predicted true.\n",
+ tid, seq_num);
+
+ inst->setPredTarg(pred_PC);
+
+ predictedTaken++;
+ } else {
+ DPRINTF(Resource, "[tid:%i]: [sn:%i]: Branch predicted false.\n",
+ tid, seq_num);
+
+ if (inst->isCondDelaySlot())
+ {
+ inst->setPredTarg(inst->readPC() + (2 * instSize));
+ } else {
+ inst->setPredTarg(pred_PC);
+ }
+
+ predictedNotTaken++;
+ }
+
+ inst->setBranchPred(predict_taken);
+
+ DPRINTF(Resource, "[tid:%i]: [sn:%i]: Predicted PC is %08p.\n",
+ tid, seq_num, pred_PC);
+
+ } else {
+ DPRINTF(Resource, "[tid:%i]: Ignoring [sn:%i] because this isn't "
+ "a control instruction.\n", tid, seq_num);
+ }
+
+ bpred_req->done();
+ }
+ break;
+
+ case UpdatePredictor:
+ {
+ DPRINTF(Resource, "[tid:%i]: [sn:%i]: Updating Branch Predictor.\n",
+ tid, seq_num);
+
+
+ branchPred.update(seq_num, tid);
+
+ bpred_req->done();
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+}
+
+void
+BranchPredictor::squash(DynInstPtr inst, int squash_stage,
+ InstSeqNum squash_seq_num, unsigned tid)
+{
+ DPRINTF(Resource, "Squashing...\n");
+ branchPred.squash(squash_seq_num, tid);
+}
+
+void
+BranchPredictor::instGraduated(InstSeqNum seq_num,unsigned tid)
+{
+ branchPred.update(seq_num, tid);
+}
diff --git a/src/cpu/inorder/resources/branch_predictor.hh b/src/cpu/inorder/resources/branch_predictor.hh
new file mode 100644
index 000000000..66eb1afe8
--- /dev/null
+++ b/src/cpu/inorder/resources/branch_predictor.hh
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_BRANCH_PREDICTOR_HH__
+#define __CPU_INORDER_BRANCH_PREDICTOR_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/resources/bpred_unit.hh"
+#include "cpu/inorder/cpu.hh"
+//#include "cpu/inorder/params.hh"
+
+class BranchPredictor : public Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ public:
+ enum Command {
+ PredictBranch,
+ UpdatePredictor
+ };
+
+ public:
+ BranchPredictor(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+
+ virtual void regStats();
+
+ virtual void execute(int slot_num);
+
+ virtual void squash(DynInstPtr inst, int stage_num,
+ InstSeqNum squash_seq_num, unsigned tid);
+
+ virtual void instGraduated(InstSeqNum seq_num,unsigned tid);
+
+ protected:
+ /** List of instructions this resource is currently
+ * processing.
+ */
+ BPredUnit branchPred;
+
+ int instSize;
+
+ /////////////////////////////////////////////////////////////////
+ //
+ // RESOURCE STATISTICS
+ //
+ /////////////////////////////////////////////////////////////////
+ Stats::Scalar<> predictedTaken;
+ Stats::Scalar<> predictedNotTaken;
+
+};
+
+#endif //__CPU_INORDER_INST_BUFF_UNIT_HH__
diff --git a/src/cpu/inorder/resources/cache_unit.cc b/src/cpu/inorder/resources/cache_unit.cc
new file mode 100644
index 000000000..79ca7a096
--- /dev/null
+++ b/src/cpu/inorder/resources/cache_unit.cc
@@ -0,0 +1,609 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include <vector>
+#include <list>
+#include "arch/isa_traits.hh"
+#include "arch/mips/locked_mem.hh"
+#include "arch/utility.hh"
+#include "cpu/inorder/resources/cache_unit.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/cpu.hh"
+#include "mem/request.hh"
+
+using namespace std;
+using namespace TheISA;
+using namespace ThePipeline;
+
+
+Tick
+CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
+{
+ panic("DefaultFetch doesn't expect recvAtomic callback!");
+ return curTick;
+}
+
+void
+CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
+{
+ panic("DefaultFetch doesn't expect recvFunctional callback!");
+}
+
+void
+CacheUnit::CachePort::recvStatusChange(Status status)
+{
+ if (status == RangeChange)
+ return;
+
+ panic("DefaultFetch doesn't expect recvStatusChange callback!");
+}
+
+bool
+CacheUnit::CachePort::recvTiming(Packet *pkt)
+{
+ cachePortUnit->processCacheCompletion(pkt);
+ return true;
+}
+
+void
+CacheUnit::CachePort::recvRetry()
+{
+ cachePortUnit->recvRetry();
+}
+
+CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu),
+ retryPkt(NULL), retrySlot(-1)
+{
+ //cacheData.resize(res_width);
+ //slotStatus = new CachePortStatus[width];
+ //fetchPC = new Addr[width];
+ cachePort = new CachePort(this);
+
+ cacheBlocked = false;
+}
+
+
+Port *
+CacheUnit::getPort(const std::string &if_name, int idx)
+{
+ if (if_name == resName)
+ return cachePort;
+ else
+ return NULL;
+}
+
+int
+CacheUnit::getSlot(DynInstPtr inst)
+{
+ if (!inst->validMemAddr()) {
+ panic("Mem. Addr. must be set before requesting cache access.\n");
+ }
+
+ Addr req_addr = inst->getMemAddr();
+
+ if (resName == "icache_port" ||
+ find(addrList.begin(), addrList.end(), req_addr) == addrList.end()) {
+
+ int new_slot = Resource::getSlot(inst);
+
+ if (new_slot != -1) {
+ inst->memTime = curTick;
+ addrList.push_back(req_addr);
+ addrMap[req_addr] = inst->seqNum;
+ DPRINTF(InOrderCachePort, "[tid:%i]: [sn:%i]: Address %08p added to dependency list.\n",
+ inst->readTid(), inst->seqNum, req_addr);
+ return new_slot;
+ } else {
+ return -1;
+ }
+
+
+ } else {
+ DPRINTF(InOrderCachePort,"Denying request because there is an outstanding"
+ " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
+ req_addr, addrMap[req_addr], inst->memTime);
+ return -1;
+ }
+}
+
+void
+CacheUnit::freeSlot(int slot_num)
+{
+ std::vector<Addr>::iterator vect_it = find(addrList.begin(), addrList.end(),
+ reqMap[slot_num]->inst->getMemAddr());
+ assert(vect_it != addrList.end());
+
+ DPRINTF(InOrderCachePort, "[tid:%i]: Address %08p removed from dependency list.\n",
+ reqMap[slot_num]->inst->readTid(), (*vect_it));
+
+ addrList.erase(vect_it);
+
+ Resource::freeSlot(slot_num);
+}
+
+ResReqPtr
+CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
+ int slot_num, unsigned cmd)
+{
+ ScheduleEntry* sched_entry = inst->resSched.top();
+
+ if (!inst->validMemAddr()) {
+ panic("Mem. Addr. must be set before requesting cache access.\n");
+ }
+
+ int req_size = 0;
+ MemCmd::Command pkt_cmd;
+
+ if (sched_entry->cmd == InitiateReadData) {
+ pkt_cmd = MemCmd::ReadReq;
+ req_size = inst->getMemAccSize();
+
+ DPRINTF(InOrderCachePort, "[tid:%i]: %i byte Read request from [sn:%i] for addr %08p.\n",
+ inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
+ } else if (sched_entry->cmd == InitiateWriteData) {
+ pkt_cmd = MemCmd::WriteReq;
+ req_size = inst->getMemAccSize();
+
+
+ DPRINTF(InOrderCachePort, "[tid:%i]: %i byte Write request from [sn:%i] for addr %08p.\n",
+ inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
+ } else if (sched_entry->cmd == InitiateFetch){
+ pkt_cmd = MemCmd::ReadReq;
+ req_size = sizeof(MachInst); //@TODO: mips16e
+
+ DPRINTF(InOrderCachePort, "[tid:%i]: %i byte Fetch request from [sn:%i] for addr %08p.\n",
+ inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
+ } else {
+ panic("%i: Unexpected request type (%i) to %s", curTick, sched_entry->cmd, name());
+ }
+
+ return new CacheRequest(this, inst, stage_num, id, slot_num,
+ sched_entry->cmd, req_size, pkt_cmd,
+ 0/*flags*/, this->cpu->readCpuId());
+}
+
+void
+CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
+{
+ //service_request = false;
+
+ CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
+ assert(cache_req);
+
+ // Check to see if this instruction is requesting the same command
+ // or a different one
+ if (cache_req->cmd != inst->resSched.top()->cmd) {
+ // If different, then update command in the request
+ cache_req->cmd = inst->resSched.top()->cmd;
+ DPRINTF(InOrderCachePort, "[tid:%i]: [sn:%i]: Updating the command for this "
+ "instruction.\n", inst->readTid(), inst->seqNum);
+
+ service_request = true;
+ } else {
+ // If same command, just check to see if memory access was completed
+ // but dont try to re-execute
+ DPRINTF(InOrderCachePort, "[tid:%i]: [sn:%i]: requesting this resource again.\n",
+ inst->readTid(), inst->seqNum);
+
+ service_request = true;
+ }
+
+}
+
+void
+CacheUnit::execute(int slot_num)
+{
+ if (cacheBlocked) {
+ DPRINTF(InOrderCachePort, "Cache Blocked. Cannot Access.\n");
+ return;
+ }
+
+ CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
+ assert(cache_req);
+
+ DynInstPtr inst = cache_req->inst;
+ int tid;
+ tid = inst->readTid();
+ int seq_num;
+ seq_num = inst->seqNum;
+ //int stage_num = cache_req->getStageNum();
+
+ cache_req->fault = NoFault;
+
+ switch (cache_req->cmd)
+ {
+ case InitiateFetch:
+ {
+ DPRINTF(InOrderCachePort,
+ "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
+ tid, name(), cache_req->inst->getMemAddr());
+
+ DPRINTF(InOrderCachePort,
+ "[tid:%u]: Fetching new cache block from addr: %08p\n",
+ tid, cache_req->memReq->getVaddr());
+
+ inst->setCurResSlot(slot_num);
+ doDataAccess(inst);
+ }
+ break;
+
+ case CompleteFetch:
+ {
+ if (cache_req->isMemAccComplete()) {
+ DPRINTF(InOrderCachePort,
+ "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
+ tid, inst->seqNum);
+
+ MachInst mach_inst = cache_req->dataPkt->get<MachInst>();
+
+ //@TODO: May Need This Function for Endianness-Compatibility
+ //mach_inst = gtoh(*reinterpret_cast<MachInst *>(&cacheData[tid][offset]));
+
+ DPRINTF(InOrderCachePort,
+ "[tid:%i]: Fetched instruction is %08p\n",
+ tid, mach_inst);
+
+ //ExtMachInst ext_inst
+ // = TheISA::makeExtMI(mach_inst, cpu->tcBase(tid));
+
+ inst->setMachInst(mach_inst);
+ inst->setASID(tid);
+ inst->setThreadState(cpu->thread[tid]);
+
+ DPRINTF(InOrderStage, "[tid:%i]: Instruction [sn:%i] is: %s\n",
+ tid, seq_num, inst->staticInst->disassemble(inst->PC));
+
+ // Set Up More TraceData info
+ if (inst->traceData) {
+ inst->traceData->setStaticInst(inst->staticInst);
+ inst->traceData->setPC(inst->readPC());
+ }
+
+ cache_req->done();
+ } else {
+ DPRINTF(InOrderCachePort, "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access.\n",
+ tid, inst->seqNum);
+ DPRINTF(InOrderStall, "STALL: [tid:%i]: Fetch miss from %08p.\n",
+ tid, cache_req->inst->readPC());
+ cache_req->setCompleted(false);
+ }
+ }
+ break;
+
+ case InitiateReadData:
+ case InitiateWriteData:
+ {
+ DPRINTF(InOrderCachePort, "[tid:%u]: Initiating data access to %s "
+ "for addr. %08p\n",
+ tid, name(), cache_req->inst->getMemAddr());
+
+ inst->setCurResSlot(slot_num);
+ //inst->memAccess();
+ inst->initiateAcc();
+ }
+ break;
+
+ case CompleteReadData:
+ case CompleteWriteData:
+ {
+ DPRINTF(InOrderCachePort,
+ "[tid:%i]: [sn:%i]: Trying to Complete Data Access.\n",
+ tid, inst->seqNum);
+ if (cache_req->isMemAccComplete()) {
+ cache_req->done();
+ } else {
+ DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
+ tid, cache_req->inst->getMemAddr());
+ cache_req->setCompleted(false);
+ }
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+}
+
+Fault
+CacheUnit::doDataAccess(DynInstPtr inst)
+{
+ Fault fault = NoFault;
+ int tid = 0;
+
+ tid = inst->readTid();
+
+ CacheReqPtr cache_req
+ = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
+ assert(cache_req);
+
+ cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
+ Packet::Broadcast);
+
+ if (cache_req->dataPkt->isRead()) {
+ cache_req->dataPkt->dataStatic(cache_req->reqData);
+ } else if (cache_req->dataPkt->isWrite()) {
+ cache_req->dataPkt->dataStatic(&cache_req->inst->storeData);
+
+ }
+
+ cache_req->dataPkt->time = curTick;
+
+ bool do_access = true; // flag to suppress cache access
+
+ Request *memReq = cache_req->dataPkt->req;
+
+ if (cache_req->dataPkt->isWrite() && memReq->isLocked()) {
+ assert(cache_req->inst->isStoreConditional());
+ DPRINTF(InOrderCachePort, "Evaluating Store Conditional access.\n");
+ do_access = TheISA::handleLockedWrite(cpu, memReq);
+ }
+
+
+ DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] attempting to access cache..\n", tid, inst->seqNum);
+
+ //@TODO: If you want to ignore failed store conditional accesses, then
+ // enable this. However, this might skew memory stats because
+ // the failed store conditional access will get ignored.
+ // - Remove optionality here ...
+ if (1/*do_access*/) {
+ if (!cachePort->sendTiming(cache_req->dataPkt)) {
+ DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] is waiting to retry request.\n", tid, inst->seqNum);
+
+ retrySlot = cache_req->getSlot();
+ retryReq = cache_req;
+ retryPkt = cache_req->dataPkt;
+
+ cacheStatus = cacheWaitRetry;
+
+ //cacheBlocked = true;
+
+ DPRINTF(InOrderStall, "STALL: \n");
+
+ cache_req->setCompleted(false);
+ } else {
+ DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] is now waiting for cache response.\n", tid, inst->seqNum);
+ cache_req->setCompleted();
+ cache_req->setMemAccPending();
+ cacheStatus = cacheWaitResponse;
+ cacheBlocked = false;
+ }
+ } else if (!do_access && memReq->isLocked()){
+ // Store-Conditional instructions complete even if they "failed"
+ assert(cache_req->inst->isStoreConditional());
+ cache_req->setCompleted(true);
+
+ DPRINTF(LLSC, "[tid:%i]: T%i Ignoring Failed Store Conditional Access.\n",
+ tid, tid);
+
+ cache_req->dataPkt->req->setExtraData(0);
+
+ processCacheCompletion(cache_req->dataPkt);
+
+ // Automatically set these since we ignored the memory access
+ //cache_req->setMemAccPending(false);
+ //cache_req->setMemAccCompleted();
+ } else {
+ // Make cache request again since access due to
+ // inability to access
+ DPRINTF(InOrderStall, "STALL: \n");
+ cache_req->setCompleted(false);
+ }
+
+ return fault;
+}
+
+void
+CacheUnit::processCacheCompletion(PacketPtr pkt)
+{
+ // Cast to correct packet type
+ CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
+ assert(cache_pkt);
+
+ if (cache_pkt->cacheReq->isSquashed()) {
+ DPRINTF(InOrderCachePort,
+ "Ignoring completion of squashed access, [tid:%i] [sn:%i].\n",
+ cache_pkt->cacheReq->getInst()->readTid(),
+ cache_pkt->cacheReq->getInst()->seqNum);
+
+ cache_pkt->cacheReq->done();
+ return;
+ } else {
+ DPRINTF(InOrderCachePort,
+ "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p.\n",
+ cache_pkt->cacheReq->getInst()->readTid(),
+ cache_pkt->cacheReq->getInst()->seqNum,
+ cache_pkt->cacheReq->getInst()->getMemAddr());
+ }
+
+ // Cast to correct request type
+ CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
+ findRequest(cache_pkt->cacheReq->getInst()));
+ assert(cache_req);
+
+#if TRACING_ON
+ // Get resource request info
+ unsigned tid = 0;
+#endif
+
+ //tid = pkt->req->getThreadNum();
+ unsigned stage_num = cache_req->getStageNum();
+ DynInstPtr inst = cache_req->inst;
+
+ if (!cache_req->isSquashed()) {
+ if (inst->resSched.top()->cmd == CompleteFetch) {
+ DPRINTF(InOrderCachePort,
+ "[tid:%u]: [sn:%i]: Processing fetch access.\n",
+ tid, inst->seqNum);
+ } else if (inst->staticInst && inst->isMemRef()) {
+ DPRINTF(InOrderCachePort,
+ "[tid:%u]: [sn:%i]: Processing cache access.\n",
+ tid, inst->seqNum);
+
+ inst->completeAcc(pkt);
+
+ if (inst->isLoad()) {
+ assert(cache_pkt->isRead());
+
+ if (cache_pkt->req->isLocked()) {
+ DPRINTF(InOrderCachePort, "[tid:%u]: Handling Load-Linked "
+ "access for [sn:%u].\n", tid, inst->seqNum);
+ TheISA::handleLockedRead(cpu, cache_pkt->req);
+ }
+
+ // @TODO: Hardcoded to for load instructions. Assumes that
+ // the dest. idx 0 is always where the data is loaded to.
+ DPRINTF(InOrderCachePort,
+ "[tid:%u]: [sn:%i]: Data loaded was: %08p.\n",
+ tid, inst->seqNum, inst->readIntResult(0));
+ } else if(inst->isStore()) {
+ assert(cache_pkt->isWrite());
+
+ DPRINTF(InOrderCachePort,
+ "[tid:%u]: [sn:%i]: Data stored was: %08p.\n",
+ tid, inst->seqNum,
+ getMemData(cache_pkt));
+
+ }
+ }
+
+ cache_req->setMemAccPending(false);
+ cache_req->setMemAccCompleted();
+
+ // Wake up the CPU (if it went to sleep and was waiting on this
+ // completion event).
+ cpu->wakeCPU();
+
+ DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
+ tid, cpu->pipelineStage[stage_num]->name());
+
+ cpu->switchToActive(stage_num);
+ } else {
+ DPRINTF(InOrderCachePort,
+ "[tid:%u] Cache miss on memory access to block @ %08p "
+ "completed, but squashed.\n", tid, cache_req->inst->readPC());
+ cache_req->setMemAccCompleted();
+ }
+
+ inst->unsetMemAddr();
+}
+
+void
+CacheUnit::recvRetry()
+{
+ DPRINTF(InOrderCachePort, "Retrying Request for [tid:%i] [sn:%i].\n",
+ retryReq->inst->readTid(), retryReq->inst->seqNum);
+
+ assert(retryPkt != NULL);
+ assert(cacheBlocked);
+ assert(cacheStatus == cacheWaitRetry);
+
+ if (cachePort->sendTiming(retryPkt)) {
+ cacheStatus = cacheWaitResponse;
+ retryPkt = NULL;
+ cacheBlocked = false;
+ } else {
+ DPRINTF(InOrderCachePort, "Retry Request for [tid:%i] [sn:%i] failed.\n",
+ retryReq->inst->readTid(), retryReq->inst->seqNum);
+ }
+
+}
+
+void
+CacheUnit::squash(DynInstPtr inst, int stage_num,
+ InstSeqNum squash_seq_num, unsigned tid)
+{
+ std::vector<int> slot_remove_list;
+
+ std::map<int, ResReqPtr>::iterator map_it = reqMap.begin();
+ std::map<int, ResReqPtr>::iterator map_end = reqMap.end();
+
+ while (map_it != map_end) {
+ ResReqPtr req_ptr = (*map_it).second;
+
+ if (req_ptr &&
+ req_ptr->getInst()->readTid() == tid &&
+ req_ptr->getInst()->seqNum > squash_seq_num) {
+
+ DPRINTF(InOrderCachePort,
+ "[tid:%i] Squashing request from [sn:%i].\n",
+ req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
+
+ req_ptr->setSquashed();
+
+ req_ptr->getInst()->setSquashed();
+
+ CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
+ assert(cache_req);
+
+ if (!cache_req->isMemAccPending()) {
+ // Mark request for later removal
+ cpu->reqRemoveList.push(req_ptr);
+
+ // Mark slot for removal from resource
+ slot_remove_list.push_back(req_ptr->getSlot());
+ }
+ }
+
+ map_it++;
+ }
+
+ // Now Delete Slot Entry from Req. Map
+ for (int i = 0; i < slot_remove_list.size(); i++) {
+ freeSlot(slot_remove_list[i]);
+ }
+
+}
+
+uint64_t CacheUnit::getMemData(Packet *packet) {
+ switch (packet->getSize())
+ {
+ case 8:
+ return packet->get<uint8_t>();
+
+ case 16:
+ return packet->get<uint16_t>();
+
+ case 32:
+ return packet->get<uint32_t>();
+
+ case 864:
+ return packet->get<uint64_t>();
+
+ default:
+ std::cerr << "bad store data size = " << packet->getSize() << std::endl;
+
+ assert(0);
+ return 0;
+ }
+}
+
diff --git a/src/cpu/inorder/resources/cache_unit.hh b/src/cpu/inorder/resources/cache_unit.hh
new file mode 100644
index 000000000..9d048d789
--- /dev/null
+++ b/src/cpu/inorder/resources/cache_unit.hh
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_CACHE_UNIT_HH__
+#define __CPU_INORDER_CACHE_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+//#include "cpu/inorder/params.hh"
+
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "mem/packet.hh"
+#include "mem/packet_access.hh"
+#include "mem/port.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "sim/sim_object.hh"
+
+#include "params/InOrderCPU.hh"
+
+class CacheRequest;
+typedef CacheRequest* CacheReqPtr;
+
+class CacheReqPacket;
+typedef CacheReqPacket* CacheReqPktPtr;
+
+class CacheUnit : public Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ public:
+ CacheUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~CacheUnit() {}
+
+ enum Command {
+ InitiateFetch,
+ CompleteFetch,
+ InitiateReadData,
+ CompleteReadData,
+ InitiateWriteData,
+ CompleteWriteData,
+ Fetch,
+ ReadData,
+ WriteData
+ };
+
+ public:
+ /** CachePort class for the Cache Unit. Handles doing the
+ * communication with the cache/memory.
+ */
+ class CachePort : public Port
+ {
+ protected:
+ /** Pointer to cache port unit */
+ CacheUnit *cachePortUnit;
+
+ public:
+ /** Default constructor. */
+ CachePort(CacheUnit *_cachePortUnit)
+ : Port(_cachePortUnit->name() + "-cache-port", (MemObject*)_cachePortUnit->cpu),
+ cachePortUnit(_cachePortUnit)
+ { }
+
+ bool snoopRangeSent;
+
+ protected:
+ /** Atomic version of receive. Panics. */
+ virtual Tick recvAtomic(PacketPtr pkt);
+
+ /** Functional version of receive. Panics. */
+ virtual void recvFunctional(PacketPtr pkt);
+
+ /** Receives status change. Other than range changing, panics. */
+ virtual void recvStatusChange(Status status);
+
+ /** Returns the address ranges of this device. */
+ virtual void getDeviceAddressRanges(AddrRangeList &resp,
+ AddrRangeList &snoop)
+ { resp.clear(); snoop.clear(); }
+
+ /** Timing version of receive. Handles setting fetch to the
+ * proper status to start fetching. */
+ virtual bool recvTiming(PacketPtr pkt);
+
+ /** Handles doing a retry of a failed fetch. */
+ virtual void recvRetry();
+ };
+
+ enum CachePortStatus {
+ cacheWaitResponse,
+ cacheWaitRetry,
+ cacheAccessComplete
+ };
+
+ ///virtual void init();
+
+ virtual ResourceRequest* getRequest(DynInstPtr _inst, int stage_num,
+ int res_idx, int slot_num,
+ unsigned cmd);
+
+ void requestAgain(DynInstPtr inst, bool &try_request);
+
+ int getSlot(DynInstPtr inst);
+
+ void freeSlot(int slot_num);
+
+ /** Execute the function of this resource. The Default is action
+ * is to do nothing. More specific models will derive from this
+ * class and define their own execute function.
+ */
+ void execute(int slot_num);
+
+ void squash(DynInstPtr inst, int stage_num,
+ InstSeqNum squash_seq_num, unsigned tid);
+
+ /** Processes cache completion event. */
+ void processCacheCompletion(PacketPtr pkt);
+
+ void recvRetry();
+
+ /** Align a PC to the start of an I-cache block. */
+ Addr cacheBlockAlignPC(Addr addr)
+ {
+ //addr = TheISA::realPCToFetchPC(addr);
+ return (addr & ~(cacheBlkMask));
+ }
+
+ /** Returns a specific port. */
+ Port *getPort(const std::string &if_name, int idx);
+
+ /** Fetch on behalf of an instruction. Will check to see
+ * if instruction is actually in resource before
+ * trying to fetch.
+ */
+ //Fault doFetchAccess(DynInstPtr inst);
+
+ /** Read/Write on behalf of an instruction.
+ * curResSlot needs to be a valid value in instruction.
+ */
+ Fault doDataAccess(DynInstPtr inst);
+
+ uint64_t getMemData(Packet *packet);
+
+ protected:
+ /** Cache interface. */
+ CachePort *cachePort;
+
+ CachePortStatus cacheStatus;
+
+ CacheReqPtr retryReq;
+
+ PacketPtr retryPkt;
+
+ int retrySlot;
+
+ bool cacheBlocked;
+
+ std::vector<Addr> addrList;
+
+ std::map<Addr, InstSeqNum> addrMap;
+
+ public:
+ int cacheBlkSize;
+
+ int cacheBlkMask;
+
+ /** Align a PC to the start of the Cache block. */
+ Addr cacheBlockAlign(Addr addr)
+ {
+ return (addr & ~(cacheBlkMask));
+ }
+
+ /** THINGS USED FOR FETCH */
+ // NO LONGER USED BY COMMENT OUT UNTIL FULL VERIFICATION
+ /** The mem line being fetched. */
+ //uint8_t *cacheData[ThePipeline::MaxThreads];
+
+ /** The Addr of the cacheline that has been loaded. */
+ //Addr cacheBlockAddr[ThePipeline::MaxThreads];
+
+ //unsigned fetchOffset[ThePipeline::MaxThreads];
+
+ /** @todo: Add Resource Stats Here */
+};
+
+struct CacheSchedEntry : public ThePipeline::ScheduleEntry {
+ enum EntryType {
+ FetchAccess,
+ DataAccess
+ };
+
+ CacheSchedEntry(int stage_num, int _priority, int res_num, MemCmd::Command pkt_cmd,
+ EntryType _type = FetchAccess) :
+ ScheduleEntry(stage_num, _priority, res_num), pktCmd(pkt_cmd),
+ type(_type)
+ { }
+
+ MemCmd::Command pktCmd;
+ EntryType type;
+};
+
+class CacheRequest : public ResourceRequest {
+ public:
+ CacheRequest(CacheUnit *cres, DynInstPtr inst, int stage_num, int res_idx,
+ int slot_num, unsigned cmd, int req_size,
+ MemCmd::Command pkt_cmd, unsigned flags, int cpu_id)
+ : ResourceRequest(cres, inst, stage_num, res_idx, slot_num, cmd),
+ pktCmd(pkt_cmd), memAccComplete(false), memAccPending(false)
+ {
+ memReq = inst->memReq;
+
+ reqData = new uint8_t[req_size];
+ retryPkt = NULL;
+ }
+
+ virtual ~CacheRequest()
+ {
+ /*
+ delete reqData;
+
+ Can get rid of packet and packet request now
+ if (*dataPkt) {
+ if (*dataPkt->req) {
+ delete dataPkt->req;
+ }
+ delete dataPkt;
+ }
+
+ // Can get rid of packet and packet request now
+ if (retryPkt) {
+ if (retryPkt->req) {
+ delete retryPkt->req;
+ }
+ delete retryPkt;
+ }*/
+
+ if (memReq) {
+ delete memReq;
+ }
+ }
+
+ virtual PacketDataPtr getData()
+ { return reqData; }
+
+ void setMemAccCompleted(bool completed = true) { memAccComplete = completed; }
+ bool isMemAccComplete() { return memAccComplete; }
+
+ void setMemAccPending(bool pending = true) { memAccPending = pending; }
+ bool isMemAccPending() { return memAccPending; }
+
+ //Make this data private/protected!
+ MemCmd::Command pktCmd;
+ RequestPtr memReq;
+ PacketDataPtr reqData;
+ PacketPtr dataPkt;
+ PacketPtr retryPkt;
+
+ bool memAccComplete;
+ bool memAccPending;
+};
+
+class CacheReqPacket : public Packet {
+ public:
+ CacheReqPacket(CacheRequest *_req,
+ Command _cmd, short _dest)
+ : Packet(_req->memReq, _cmd, _dest), cacheReq(_req)
+ {
+
+ }
+
+ CacheRequest *cacheReq;
+};
+
+#endif //__CPU_CACHE_UNIT_HH__
diff --git a/src/cpu/inorder/resources/decode_unit.cc b/src/cpu/inorder/resources/decode_unit.cc
new file mode 100644
index 000000000..1628c32d0
--- /dev/null
+++ b/src/cpu/inorder/resources/decode_unit.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/resources/decode_unit.hh"
+
+using namespace TheISA;
+using namespace ThePipeline;
+using namespace std;
+
+DecodeUnit::DecodeUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu)
+{
+ for (int tid = 0; tid < MaxThreads; tid++) {
+ regDepMap[tid] = &cpu->archRegDepMap[tid];
+ }
+}
+
+void
+DecodeUnit::execute(int slot_num)
+{
+ ResourceRequest* decode_req = reqMap[slot_num];
+ DynInstPtr inst = reqMap[slot_num]->inst;
+ Fault fault = reqMap[slot_num]->fault;
+ int tid, seq_num;
+
+ tid = inst->readTid();
+ seq_num = inst->seqNum;
+ decode_req->fault = NoFault;
+
+ switch (decode_req->cmd)
+ {
+ case DecodeInst:
+ {
+ bool done_sked = ThePipeline::createBackEndSchedule(inst);
+
+ if (done_sked) {
+ DPRINTF(Resource, "[tid:%i]: Setting Destination Register(s) for [sn:%i].\n",
+ tid, seq_num);
+ regDepMap[tid]->insert(inst);
+ decode_req->done();
+ } else {
+ DPRINTF(Resource,"[tid:%i] Static Inst not available to decode. Unable to create "
+ "schedule for instruction [sn:%i] \n", tid, inst->seqNum);
+ DPRINTF(InOrderStall, "STALL: \n");
+ decode_req->done(false);
+ }
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+}
+
+
+void
+DecodeUnit::squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num, unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i]: Updating due to squash from stage %i after [sn:%i].\n",
+ tid, stage_num, squash_seq_num);
+
+ //cpu->removeInstsUntil(squash_seq_num, tid);
+}
diff --git a/src/cpu/inorder/resources/decode_unit.hh b/src/cpu/inorder/resources/decode_unit.hh
new file mode 100644
index 000000000..3813de6c4
--- /dev/null
+++ b/src/cpu/inorder/resources/decode_unit.hh
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_DECODE_UNIT_HH__
+#define __CPU_INORDER_DECODE_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/cpu.hh"
+#include "cpu/inorder/reg_dep_map.hh"
+
+class DecodeUnit : public Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ public:
+ DecodeUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~DecodeUnit() {}
+
+ enum Command {
+ DecodeInst
+ };
+
+ virtual void execute(int slot_num);
+
+ void squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num, unsigned tid);
+
+ RegDepMap *regDepMap[ThePipeline::MaxThreads];
+
+ protected:
+ /** @todo: Add Resource Stats Here */
+};
+
+#endif //__CPU_INORDER_DECODE_UNIT_HH__
diff --git a/src/cpu/inorder/resources/execution_unit.cc b/src/cpu/inorder/resources/execution_unit.cc
new file mode 100644
index 000000000..e41291103
--- /dev/null
+++ b/src/cpu/inorder/resources/execution_unit.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include <vector>
+#include <list>
+#include "cpu/inorder/resources/execution_unit.hh"
+#include "cpu/inorder/resource_pool.hh"
+#include "cpu/inorder/cpu.hh"
+
+using namespace std;
+using namespace ThePipeline;
+
+ExecutionUnit::ExecutionUnit(string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu)
+{ }
+
+void
+ExecutionUnit::regStats()
+{
+ predictedTakenIncorrect
+ .name(name() + ".predictedTakenIncorrect")
+ .desc("Number of Branches Incorrectly Predicted As Taken.");
+
+ predictedNotTakenIncorrect
+ .name(name() + ".predictedNotTakenIncorrect")
+ .desc("Number of Branches Incorrectly Predicted As Not Taken).");
+
+ Resource::regStats();
+}
+
+void
+ExecutionUnit::execute(int slot_num)
+{
+ ResourceRequest* exec_req = reqMap[slot_num];
+ DynInstPtr inst = reqMap[slot_num]->inst;
+ Fault fault = reqMap[slot_num]->fault;
+ int tid = inst->readTid();
+ int seq_num = inst->seqNum;
+
+ exec_req->fault = NoFault;
+
+ DPRINTF(Resource, "[tid:%i] Executing [sn:%i] [PC:%#x] .\n",
+ tid, seq_num, inst->readPC());
+
+ switch (exec_req->cmd)
+ {
+ case ExecuteInst:
+ {
+ if (inst->isMemRef()) {
+ fatal("%s not configured to handle memory ops.\n", resName);
+ } else if (inst->isControl()) {
+ // Evaluate Branch
+ fault = inst->execute();
+
+ inst->setExecuted();
+
+ if (fault == NoFault) {
+ // If branch is mispredicted, then signal squash
+ // throughout all stages behind the pipeline stage
+ // that got squashed.
+ if (inst->mispredicted()) {
+ int stage_num = exec_req->getStageNum();
+ int tid = inst->readTid();
+
+ // If it's a branch ...
+ if (inst->isDirectCtrl()) {
+ assert(!inst->isIndirectCtrl());
+
+ if (inst->predTaken() && inst->isCondDelaySlot()) {
+ inst->bdelaySeqNum = seq_num;
+ inst->setPredTarg(inst->nextPC);
+
+ DPRINTF(Resource, "[tid:%i]: Conditional branch inst"
+ "[sn:%i] PC %#x mispredicted as taken.\n", tid,
+ seq_num, inst->PC);
+ } else if (!inst->predTaken() && inst->isCondDelaySlot()) {
+ inst->bdelaySeqNum = seq_num;
+ inst->setPredTarg(inst->nextPC);
+ inst->procDelaySlotOnMispred = true;
+
+ DPRINTF(Resource, "[tid:%i]: Conditional branch inst."
+ "[sn:%i] PC %#x mispredicted as not taken.\n", tid,
+ seq_num, inst->PC);
+ } else {
+ inst->bdelaySeqNum = seq_num + 1;
+
+ DPRINTF(Resource, "[tid:%i]: Misprediction detected at "
+ "[sn:%i] PC %#x,\n\t squashing after delay slot "
+ "instruction [sn:%i].\n",
+ tid, seq_num, inst->PC, inst->bdelaySeqNum);
+ DPRINTF(InOrderStall, "STALL: [tid:%i]: Branch "
+ "misprediction at %#x\n", tid, inst->PC);
+ inst->setPredTarg(inst->nextNPC);
+ }
+
+ DPRINTF(Resource, "[tid:%i] Redirecting fetch to %#x.\n", tid,
+ inst->readPredTarg());
+
+ } else if(inst->isIndirectCtrl()){
+ inst->setPredTarg(inst->nextNPC);
+ inst->bdelaySeqNum = seq_num + 1;
+ DPRINTF(Resource, "[tid:%i] Redirecting fetch to %#x.\n", tid,
+ inst->readPredTarg());
+ } else {
+ panic("Non-control instruction (%s) mispredicting?!!",
+ inst->staticInst->getName());
+ }
+
+ DPRINTF(Resource, "[tid:%i] Squashing will start from stage %i.\n",
+ tid, stage_num);
+
+ cpu->pipelineStage[stage_num]->squashDueToBranch(inst, tid);
+
+ inst->squashingStage = stage_num;
+
+ // Squash throughout other resources
+ cpu->resPool->scheduleEvent((InOrderCPU::CPUEventType)ResourcePool::SquashAll,
+ inst, 0, 0, tid);
+
+ if (inst->predTaken()) {
+ predictedTakenIncorrect++;
+ } else {
+ predictedNotTakenIncorrect++;
+ }
+ }
+ exec_req->done();
+ } else {
+ warn("inst [sn:%i] had a %s fault", seq_num, fault->name());
+ }
+ } else {
+ // Regular ALU instruction
+ fault = inst->execute();
+
+ if (fault == NoFault) {
+ inst->setExecuted();
+ exec_req->done();
+
+ DPRINTF(Resource, "[tid:%i]: The result of execution is 0x%x.\n",
+ inst->readTid(), inst->readIntResult(0));
+ } else {
+ warn("inst [sn:%i] had a %s fault", seq_num, fault->name());
+ cpu->trap(fault, tid);
+ }
+ }
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+}
+
+
diff --git a/src/cpu/inorder/resources/execution_unit.hh b/src/cpu/inorder/resources/execution_unit.hh
new file mode 100644
index 000000000..e969497a2
--- /dev/null
+++ b/src/cpu/inorder/resources/execution_unit.hh
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_EXECUTION_UNIT_HH__
+#define __CPU_INORDER_EXECUTION_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/func_unit.hh"
+#include "cpu/inorder/first_stage.hh"
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+
+class ExecutionUnit : public Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ enum Command {
+ ExecuteInst
+ };
+
+ public:
+ ExecutionUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~ExecutionUnit() {}
+
+ public:
+ virtual void regStats();
+
+ /** Execute the function of this resource. The Default is action
+ * is to do nothing. More specific models will derive from this
+ * class and define their own execute function.
+ */
+ virtual void execute(int slot_num);
+
+ protected:
+ /////////////////////////////////////////////////////////////////
+ //
+ // RESOURCE STATISTICS
+ //
+ /////////////////////////////////////////////////////////////////
+ Stats::Scalar<> predictedTakenIncorrect;
+ Stats::Scalar<> predictedNotTakenIncorrect;
+};
+
+
+#endif //__CPU_INORDER_EXCUTION_UNIT_HH__
diff --git a/src/cpu/inorder/resources/fetch_seq_unit.cc b/src/cpu/inorder/resources/fetch_seq_unit.cc
new file mode 100644
index 000000000..00f76f74b
--- /dev/null
+++ b/src/cpu/inorder/resources/fetch_seq_unit.cc
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/resources/fetch_seq_unit.hh"
+#include "cpu/inorder/resource_pool.hh"
+
+using namespace std;
+using namespace TheISA;
+using namespace ThePipeline;
+
+FetchSeqUnit::FetchSeqUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu)
+{
+ instSize = sizeof(MachInst);
+
+ for (int tid = 0; tid < ThePipeline::MaxThreads; tid++) {
+ delaySlotInfo[tid].numInsts = 0;
+ delaySlotInfo[tid].targetReady = false;
+
+ pcValid[tid] = false;
+ pcBlockStage[tid] = 0;
+
+ squashSeqNum[tid] = (InstSeqNum)-1;
+ lastSquashCycle[tid] = 0;
+ }
+}
+
+void
+FetchSeqUnit::init()
+{
+ resourceEvent = new FetchSeqEvent[width];
+
+ initSlots();
+}
+
+void
+FetchSeqUnit::execute(int slot_num)
+{
+ // After this is working, change this to a reinterpret cast
+ // for performance considerations
+ ResourceRequest* fs_req = reqMap[slot_num];
+ DynInstPtr inst = fs_req->inst;
+ int tid = inst->readTid();
+ int stage_num = fs_req->getStageNum();
+ int seq_num = inst->seqNum;
+
+ fs_req->fault = NoFault;
+
+ switch (fs_req->cmd)
+ {
+ case AssignNextPC:
+ {
+ if (pcValid[tid]) {
+
+ if (delaySlotInfo[tid].targetReady &&
+ delaySlotInfo[tid].numInsts == 0) {
+ // Set PC to target
+ PC[tid] = delaySlotInfo[tid].targetAddr; //next_PC
+ nextPC[tid] = PC[tid] + instSize; //next_NPC
+ nextNPC[tid] = PC[tid] + (2 * instSize);
+
+ delaySlotInfo[tid].targetReady = false;
+
+ DPRINTF(Resource, "[tid:%i]: Setting PC to delay slot target\n",tid);
+ }
+
+ inst->setPC(PC[tid]);
+ inst->setNextPC(PC[tid] + instSize);
+ inst->setNextNPC(PC[tid] + (instSize * 2));
+
+ inst->setPredTarg(inst->readNextNPC());
+
+ inst->setMemAddr(PC[tid]);
+ inst->setSeqNum(cpu->getAndIncrementInstSeq(tid));
+
+ DPRINTF(Resource, "[tid:%i]: Assigning [sn:%i] to PC %08p\n", tid,
+ inst->seqNum, inst->readPC());
+
+ if (delaySlotInfo[tid].numInsts > 0) {
+ --delaySlotInfo[tid].numInsts;
+
+ // It's OK to set PC to target of branch
+ if (delaySlotInfo[tid].numInsts == 0) {
+ delaySlotInfo[tid].targetReady = true;
+ }
+
+ DPRINTF(Resource, "[tid:%i]: %i delay slot inst(s) left to"
+ " process.\n", tid, delaySlotInfo[tid].numInsts);
+ }
+
+ PC[tid] = nextPC[tid];
+ nextPC[tid] = nextNPC[tid];
+ nextNPC[tid] += instSize;
+
+ fs_req->done();
+ } else {
+ DPRINTF(InOrderStall, "STALL: [tid:%i]: NPC not valid\n", tid);
+ fs_req->setCompleted(false);
+ }
+ }
+ break;
+
+ case UpdateTargetPC:
+ {
+ if (inst->isControl()) {
+ // If it's a return, then we must wait for resolved address.
+ if (inst->isReturn() && !inst->predTaken()) {
+ cpu->pipelineStage[stage_num]->toPrevStages->stageBlock[stage_num][tid] = true;
+ pcValid[tid] = false;
+ pcBlockStage[tid] = stage_num;
+ } else if (inst->isCondDelaySlot() && !inst->predTaken()) {
+ // Not-Taken AND Conditional Control
+ DPRINTF(Resource, "[tid:%i]: [sn:%i]: [PC:%08p] Predicted Not-Taken Cond. "
+ "Delay inst. Skipping delay slot and Updating PC to %08p\n",
+ tid, inst->seqNum, inst->readPC(), inst->readPredTarg());
+
+ DPRINTF(Resource, "[tid:%i] Setting up squash to start from stage %i, after [sn:%i].\n",
+ tid, stage_num, seq_num);
+
+ inst->bdelaySeqNum = seq_num;
+ inst->squashingStage = stage_num;
+
+ squashAfterInst(inst, stage_num, tid);
+ } else if (!inst->isCondDelaySlot() && !inst->predTaken()) {
+ // Not-Taken Control
+ DPRINTF(Resource, "[tid:%i]: [sn:%i]: Predicted Not-Taken Control "
+ "inst. updating PC to %08p\n", tid, inst->seqNum,
+ inst->readNextPC());
+
+ ++delaySlotInfo[tid].numInsts;
+ delaySlotInfo[tid].targetReady = false;
+ delaySlotInfo[tid].targetAddr = inst->readNextNPC();
+
+ } else if (inst->predTaken()) {
+ // Taken Control
+ ++delaySlotInfo[tid].numInsts;
+ delaySlotInfo[tid].targetReady = false;
+ delaySlotInfo[tid].targetAddr = inst->readPredTarg();
+
+ DPRINTF(Resource, "[tid:%i]: [sn:%i] Updating delay slot target "
+ "to PC %08p\n", tid, inst->seqNum, inst->readPredTarg());
+
+ // Set-Up Squash Through-Out Pipeline
+ DPRINTF(Resource, "[tid:%i] Setting up squash to start from stage %i, after [sn:%i].\n",
+ tid, stage_num, seq_num + 1);
+ inst->bdelaySeqNum = seq_num + 1;
+ inst->squashingStage = stage_num;
+
+ // Do Squashing
+ squashAfterInst(inst, stage_num, tid);
+ }
+ } else {
+ DPRINTF(Resource, "[tid:%i]: [sn:%i]: Ignoring branch target update "
+ "since then is not a control instruction.\n", tid, inst->seqNum);
+ }
+
+ fs_req->done();
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+}
+
+inline void
+FetchSeqUnit::squashAfterInst(DynInstPtr inst, int stage_num, unsigned tid)
+{
+ // Squash In Pipeline Stage
+ cpu->pipelineStage[stage_num]->squashDueToBranch(inst, tid);
+
+ // Squash inside current resource, so if there needs to be fetching on same cycle
+ // the fetch information will be correct.
+ // squash(inst, stage_num, inst->bdelaySeqNum, tid);
+
+ // Schedule Squash Through-out Resource Pool
+ cpu->resPool->scheduleEvent((InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, 0);
+}
+void
+FetchSeqUnit::squash(DynInstPtr inst, int squash_stage,
+ InstSeqNum squash_seq_num, unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i]: Updating due to squash from stage %i.\n",
+ tid, squash_stage);
+
+ InstSeqNum done_seq_num = inst->bdelaySeqNum;
+ Addr new_PC = inst->readPredTarg();
+
+ if (squashSeqNum[tid] <= done_seq_num &&
+ lastSquashCycle[tid] == curTick) {
+ DPRINTF(Resource, "[tid:%i]: Ignoring squash from stage %i, since"
+ "there is an outstanding squash that is older.\n",
+ tid, squash_stage);
+ } else {
+ squashSeqNum[tid] = done_seq_num;
+ lastSquashCycle[tid] = curTick;
+
+ // If The very next instruction number is the done seq. num,
+ // then we haven't seen the delay slot yet ... if it isn't
+ // the last done_seq_num then this is the delay slot inst.
+ if (cpu->nextInstSeqNum(tid) != done_seq_num &&
+ !inst->procDelaySlotOnMispred) {
+ delaySlotInfo[tid].numInsts = 0;
+ delaySlotInfo[tid].targetReady = false;
+
+ // Reset PC
+ PC[tid] = new_PC;
+ nextPC[tid] = new_PC + instSize;
+ nextNPC[tid] = new_PC + (2 * instSize);
+
+ DPRINTF(Resource, "[tid:%i]: Setting PC to %08p.\n",
+ tid, PC[tid]);
+ } else {
+ delaySlotInfo[tid].numInsts = 1;
+ delaySlotInfo[tid].targetReady = false;
+ delaySlotInfo[tid].targetAddr = (inst->procDelaySlotOnMispred) ? inst->branchTarget() : new_PC;
+
+ // Reset PC to Delay Slot Instruction
+ if (inst->procDelaySlotOnMispred) {
+ PC[tid] = new_PC;
+ nextPC[tid] = new_PC + instSize;
+ nextNPC[tid] = new_PC + (2 * instSize);
+ }
+
+ }
+
+ // Unblock Any Stages Waiting for this information to be updated ...
+ if (!pcValid[tid]) {
+ cpu->pipelineStage[pcBlockStage[tid]]->toPrevStages->stageUnblock[pcBlockStage[tid]][tid] = true;
+ }
+
+ pcValid[tid] = true;
+ }
+
+ Resource::squash(inst, squash_stage, squash_seq_num, tid);
+}
+
+FetchSeqUnit::FetchSeqEvent::FetchSeqEvent()
+ : ResourceEvent()
+{ }
+
+void
+FetchSeqUnit::FetchSeqEvent::process()
+{
+ FetchSeqUnit* fs_res = dynamic_cast<FetchSeqUnit*>(resource);
+ assert(fs_res);
+
+ for (int i=0; i < MaxThreads; i++) {
+ fs_res->PC[i] = fs_res->cpu->readPC(i);
+ fs_res->nextPC[i] = fs_res->cpu->readNextPC(i);
+ fs_res->nextNPC[i] = fs_res->cpu->readNextNPC(i);
+ DPRINTF(Resource, "[tid:%i]: Setting PC:%08p NPC:%08p NNPC:%08p.\n",
+ fs_res->PC[i], fs_res->nextPC[i], fs_res->nextNPC[i]);
+
+ fs_res->pcValid[i] = true;
+ }
+
+ //cpu->fetchPriorityList.push_back(tid);
+}
+
+
+void
+FetchSeqUnit::activateThread(unsigned tid)
+{
+ pcValid[tid] = true;
+
+ PC[tid] = cpu->readPC(tid);
+ nextPC[tid] = cpu->readNextPC(tid);
+ nextNPC[tid] = cpu->readNextNPC(tid);
+
+ cpu->fetchPriorityList.push_back(tid);
+
+ DPRINTF(Resource, "[tid:%i]: Reading PC:%08p NPC:%08p NNPC:%08p.\n",
+ tid, PC[tid], nextPC[tid], nextNPC[tid]);
+}
+
+void
+FetchSeqUnit::deactivateThread(unsigned tid)
+{
+ delaySlotInfo[tid].numInsts = 0;
+ delaySlotInfo[tid].targetReady = false;
+
+ pcValid[tid] = false;
+ pcBlockStage[tid] = 0;
+
+ squashSeqNum[tid] = (InstSeqNum)-1;
+ lastSquashCycle[tid] = 0;
+
+ std::list<unsigned>::iterator thread_it = find(cpu->fetchPriorityList.begin(),
+ cpu->fetchPriorityList.end(),
+ tid);
+
+ if (thread_it != cpu->fetchPriorityList.end())
+ cpu->fetchPriorityList.erase(thread_it);
+}
diff --git a/src/cpu/inorder/resources/fetch_seq_unit.hh b/src/cpu/inorder/resources/fetch_seq_unit.hh
new file mode 100644
index 000000000..2b074bbaa
--- /dev/null
+++ b/src/cpu/inorder/resources/fetch_seq_unit.hh
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_FETCH_SEQ_UNIT_HH__
+#define __CPU_INORDER_FETCH_SEQ_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/cpu.hh"
+
+using namespace ThePipeline;
+
+class FetchSeqUnit : public Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ enum Command {
+ AssignNextPC,
+ UpdateTargetPC
+ };
+
+ public:
+ FetchSeqUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~FetchSeqUnit() {}
+
+ virtual void init();
+ virtual void activateThread(unsigned tid);
+ virtual void deactivateThread(unsigned tid);
+ virtual void execute(int slot_num);
+
+ /** Override default Resource squash sequence. This actually,
+ * looks in the global communication buffer to get squash
+ * info
+ */
+ virtual void squash(DynInstPtr inst, int squash_stage,
+ InstSeqNum squash_seq_num, unsigned tid);
+
+
+ inline void squashAfterInst(DynInstPtr inst, int stage_num, unsigned tid);
+
+ protected:
+ unsigned instSize;
+
+ bool pcValid[ThePipeline::MaxThreads];
+ int pcBlockStage[ThePipeline::MaxThreads];
+
+ TheISA::IntReg PC[ThePipeline::MaxThreads];
+ TheISA::IntReg nextPC[ThePipeline::MaxThreads];
+ TheISA::IntReg nextNPC[ThePipeline::MaxThreads];
+
+ /** Tracks delay slot information for threads in ISAs which use
+ * delay slots;
+ */
+ struct DelaySlotInfo {
+ InstSeqNum delaySlotSeqNum;
+ InstSeqNum branchSeqNum;
+ int numInsts;
+ Addr targetAddr;
+ bool targetReady;
+ };
+
+ DelaySlotInfo delaySlotInfo[ThePipeline::MaxThreads];
+
+ /** Squash Seq. Nums*/
+ InstSeqNum squashSeqNum[ThePipeline::MaxThreads];
+
+ /** Squash Seq. Nums*/
+ Tick lastSquashCycle[ThePipeline::MaxThreads];
+
+ /** @todo: Add Resource Stats Here */
+
+ public:
+ class FetchSeqEvent : public ResourceEvent {
+ public:
+ /** Constructs a resource event. */
+ FetchSeqEvent();
+ virtual ~FetchSeqEvent() {}
+
+ /** Processes a resource event. */
+ virtual void process();
+ };
+
+};
+
+#endif
diff --git a/src/cpu/inorder/resources/graduation_unit.cc b/src/cpu/inorder/resources/graduation_unit.cc
new file mode 100644
index 000000000..f4bbf3f5d
--- /dev/null
+++ b/src/cpu/inorder/resources/graduation_unit.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "cpu/inorder/resources/graduation_unit.hh"
+
+using namespace ThePipeline;
+
+GraduationUnit::GraduationUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu)
+{
+ lastCycleGrad = 0;
+ numCycleGrad = 0;
+
+ for (int tid = 0; tid < ThePipeline::MaxThreads; tid++) {
+ nonSpecInstActive[tid] = &cpu->nonSpecInstActive[tid];
+ nonSpecSeqNum[tid] = &cpu->nonSpecSeqNum[tid];
+ }
+}
+
+void
+GraduationUnit::execute(int slot_num)
+{
+ ResourceRequest* grad_req = reqMap[slot_num];
+ DynInstPtr inst = reqMap[slot_num]->inst;
+ Fault fault = reqMap[slot_num]->fault;
+ int tid, seq_num;
+
+ tid = inst->readTid();
+ seq_num = inst->seqNum;
+ int stage_num = inst->resSched.top()->stageNum;
+
+ grad_req->fault = NoFault;
+
+ switch (grad_req->cmd)
+ {
+ case GraduateInst:
+ {
+ // @TODO: Instructions should never really get to this point since this should be handled
+ // through the request interface. Check to make sure this happens and delete this
+ // code.
+ if (lastCycleGrad != curTick) {
+ lastCycleGrad = curTick;
+ numCycleGrad = 0;
+ } else if (numCycleGrad > width) {
+ DPRINTF(Resource, "Graduation bandwidth reached for this cycle.\n");
+ return;
+ }
+
+ // Make sure this is the last thing on the resource schedule
+ assert(inst->resSched.size() == 1);
+
+ DPRINTF(Resource, "[tid:%i] Graduating instruction [sn:%i].\n",
+ tid, seq_num);
+
+ DPRINTF(RefCount, "Refcount = %i.\n", 0/*inst->curCount()*/);
+
+ // Release Non-Speculative "Block" on instructions that could not execute
+ // because there was a non-speculative inst. active.
+ // @TODO: Fix this functionality. Probably too conservative.
+ if (inst->isNonSpeculative()) {
+ *nonSpecInstActive[tid] = false;
+ DPRINTF(Resource, "[tid:%i] Non-speculative instruction [sn:%i] has graduated.\n",
+ tid, seq_num);
+ }
+
+ if (inst->traceData) {
+ inst->traceData->setStageCycle(stage_num, curTick);
+ }
+
+ // Tell CPU that instruction is finished processing
+ cpu->instDone(inst, tid);
+
+ //cpu->pipelineStage[stage_num]->toPrevStages->
+ //stageInfo[stage_num][tid].doneSeqNum = inst->seqNum;
+
+ grad_req->done();
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+
+}
diff --git a/src/cpu/inorder/resources/graduation_unit.hh b/src/cpu/inorder/resources/graduation_unit.hh
new file mode 100644
index 000000000..ad222b119
--- /dev/null
+++ b/src/cpu/inorder/resources/graduation_unit.hh
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_GRAD_UNIT_HH__
+#define __CPU_INORDER_GRAD_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/cpu.hh"
+
+class GraduationUnit : public Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ enum Command {
+ GraduateInst
+ };
+
+ public:
+ GraduationUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~GraduationUnit() {}
+
+ virtual void execute(int slot_num);
+
+ protected:
+ Tick lastCycleGrad;
+ int numCycleGrad;
+
+ bool *nonSpecInstActive[ThePipeline::MaxThreads];
+
+ InstSeqNum *nonSpecSeqNum[ThePipeline::MaxThreads];
+
+ /** @todo: Add Resource Stats Here */
+};
+
+#endif //__CPU_INORDER_GRAD_UNIT_HH__
diff --git a/src/cpu/inorder/resources/inst_buffer.cc b/src/cpu/inorder/resources/inst_buffer.cc
new file mode 100644
index 000000000..97924d2d3
--- /dev/null
+++ b/src/cpu/inorder/resources/inst_buffer.cc
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include <vector>
+#include <list>
+#include "arch/isa_traits.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/resources/inst_buffer.hh"
+#include "cpu/inorder/cpu.hh"
+
+using namespace std;
+using namespace TheISA;
+using namespace ThePipeline;
+
+InstBuffer::InstBuffer(string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu)
+{ }
+
+void
+InstBuffer::regStats()
+{
+ instsBypassed
+ .name(name() + ".instsBypassed")
+ .desc("Number of Instructions Bypassed.");
+
+ Resource::regStats();
+}
+
+void
+InstBuffer::execute(int slot_idx)
+{
+ ResReqPtr ib_req = reqMap[slot_idx];
+ DynInstPtr inst = ib_req->inst;
+ int tid, seq_num, stage_num;
+
+ tid = inst->readTid();
+ seq_num = inst->seqNum;
+ stage_num = ib_req->getStageNum();
+ ib_req->fault = NoFault;
+
+ switch (ib_req->cmd)
+ {
+ case ScheduleOrBypass:
+ {
+ int next_stage = stage_num + 1;
+ int bypass_stage = stage_num + 2;
+ bool do_bypass = true;
+
+ if (!instList.empty()) {
+ DPRINTF(Resource, "[sn:%i] cannot bypass stage %i because buffer isn't empty.\n",
+ inst->seqNum, next_stage);
+ do_bypass = false;
+ } else if(cpu->pipelineStage[bypass_stage]->isBlocked(tid)) {
+ DPRINTF(Resource, "[sn:%i] cannot bypass stage %i because stage %i is blocking.\n",
+ inst->seqNum, next_stage);
+ do_bypass = false;
+ } else if(cpu->pipelineStage[bypass_stage]->stageBufferAvail() <= 0) {
+ DPRINTF(Resource, "[sn:%i] cannot bypass stage %i because there is no room in "
+ "stage %i incoming stage buffer.\n", inst->seqNum, next_stage);
+ do_bypass = false;
+ }
+
+ if (!do_bypass) { // SCHEDULE USAGE OF BUFFER
+ DPRINTF(Resource, "Scheduling [sn:%i] for buffer insertion in stage %i\n",
+ inst->seqNum, next_stage);
+
+ // Add to schedule: Insert into buffer in next stage
+ int stage_pri = ThePipeline::getNextPriority(inst, next_stage);
+
+ inst->resSched.push(new ScheduleEntry(next_stage, stage_pri, id,
+ InstBuffer::InsertInst));
+
+ // Add to schedule: Remove from buffer in next next (bypass) stage
+ stage_pri = ThePipeline::getNextPriority(inst, bypass_stage);
+
+ inst->resSched.push(new ScheduleEntry(bypass_stage, stage_pri, id,
+ InstBuffer::RemoveInst));
+ } else { // BYPASS BUFFER & NEXT STAGE
+ DPRINTF(Resource, "Setting [sn:%i] to bypass stage %i and enter stage %i.\n",
+ inst->seqNum, next_stage, bypass_stage);
+ inst->setNextStage(bypass_stage);
+ instsBypassed++;
+ }
+
+ ib_req->done();
+ }
+ break;
+
+ case InsertInst:
+ {
+ bool inserted = false;
+
+ if (instList.size() < width) {
+ DPRINTF(Resource, "[tid:%i]: Inserting [sn:%i] into buffer.\n",
+ tid, seq_num);
+ insert(inst);
+ inserted = true;
+ } else {
+ DPRINTF(Resource, "[tid:%i]: Denying [sn:%i] request because "
+ "buffer is full.\n", tid, seq_num);
+
+
+ std::list<DynInstPtr>::iterator list_it = instList.begin();
+ std::list<DynInstPtr>::iterator list_end = instList.end();
+
+ while (list_it != list_end) {
+ DPRINTF(Resource,"Serving [tid:%i] [sn:%i].\n", (*list_it)->readTid(), (*list_it)->seqNum);
+ list_it++;
+ }
+ }
+
+ ib_req->done(inserted);
+ }
+ break;
+
+ case RemoveInst:
+ {
+ DPRINTF(Resource, "[tid:%i]: Removing [sn:%i] from buffer.\n",
+ tid, seq_num);
+ remove(inst);
+ ib_req->done();
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+
+ DPRINTF(Resource, "Buffer now contains %i insts.\n", instList.size());
+}
+
+void
+InstBuffer::insert(DynInstPtr inst)
+{
+ instList.push_back(inst);
+}
+
+void
+InstBuffer::remove(DynInstPtr inst)
+{
+ std::list<DynInstPtr>::iterator list_it = instList.begin();
+ std::list<DynInstPtr>::iterator list_end = instList.end();
+
+ while (list_it != list_end) {
+ if((*list_it) == inst) {
+ instList.erase(list_it);
+ break;
+ }
+ list_it++;
+ }
+}
+
+void
+InstBuffer::pop(unsigned tid)
+{
+ instList.pop_front();
+}
+
+ThePipeline::DynInstPtr
+InstBuffer::top(unsigned tid)
+{
+ return instList.front();
+}
+
+void
+InstBuffer::squash(DynInstPtr inst, int stage_num,
+ InstSeqNum squash_seq_num, unsigned tid)
+{
+ queue<list<DynInstPtr>::iterator> remove_list;
+ list<DynInstPtr>::iterator list_it = instList.begin();
+ list<DynInstPtr>::iterator list_end = instList.end();
+
+ // Collect All Instructions to be Removed in Remove List
+ while (list_it != list_end) {
+ if((*list_it)->readTid() == tid &&
+ (*list_it)->seqNum > squash_seq_num) {
+ (*list_it)->setSquashed();
+ remove_list.push(list_it);
+ }
+
+ list_it++;
+ }
+
+ // Removed Instructions from InstList & Clear Remove List
+ while (!remove_list.empty()) {
+ DPRINTF(Resource, "[tid:%i]: Removing squashed [sn:%i] from buffer.\n",
+ tid, (*remove_list.front())->seqNum);
+ instList.erase(remove_list.front());
+ remove_list.pop();
+ }
+
+ Resource::squash(inst, stage_num, squash_seq_num, tid);
+}
diff --git a/src/cpu/inorder/resources/inst_buffer.hh b/src/cpu/inorder/resources/inst_buffer.hh
new file mode 100644
index 000000000..f4851e868
--- /dev/null
+++ b/src/cpu/inorder/resources/inst_buffer.hh
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_INST_BUFF_UNIT_HH__
+#define __CPU_INORDER_INST_BUFF_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/cpu.hh"
+
+class InstBuffer : public Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ public:
+ enum Command {
+ InsertInst,
+ InsertAddr,
+ RemoveInst,
+ RemoveAddr,
+ ScheduleOrBypass
+ };
+
+ public:
+ InstBuffer(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~InstBuffer() {}
+
+ virtual void regStats();
+
+ virtual void execute(int slot_num);
+
+ virtual void insert(DynInstPtr inst);
+
+ virtual void remove(DynInstPtr inst);
+
+ virtual void pop(unsigned tid);
+
+ virtual DynInstPtr top(unsigned tid);
+
+ virtual void squash(DynInstPtr inst, int stage_num,
+ InstSeqNum squash_seq_num, unsigned tid);
+ protected:
+ /** List of instructions this resource is currently
+ * processing.
+ */
+ std::list<DynInstPtr> instList;
+
+ public:
+ /////////////////////////////////////////////////////////////////
+ //
+ // RESOURCE STATISTICS
+ //
+ /////////////////////////////////////////////////////////////////
+ /** Number of Instruction Requests the Resource Processes */
+ Stats::Scalar<> instsBypassed;
+
+};
+
+#endif //__CPU_INORDER_INST_BUFF_UNIT_HH__
diff --git a/src/cpu/inorder/resources/inst_buffer_new.cc b/src/cpu/inorder/resources/inst_buffer_new.cc
new file mode 100644
index 000000000..7e2c98837
--- /dev/null
+++ b/src/cpu/inorder/resources/inst_buffer_new.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include <vector>
+#include <list>
+#include "arch/isa_traits.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/resources/inst_buffer.hh"
+#include "cpu/inorder/cpu.hh"
+
+using namespace std;
+using namespace TheISA;
+using namespace ThePipeline;
+
+InstBuffer::InstBuffer(string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu)
+{ }
+
+ResReqPtr
+InstBuffer::getRequest(DynInstPtr inst, int stage_num, int res_idx,
+ int slot_num)
+{
+ // After this is working, change this to a reinterpret cast
+ // for performance considerations
+ InstBufferEntry* ib_entry = dynamic_cast<InstBufferEntry*>(inst->resSched.top());
+ assert(ib_entry);
+
+ return new InstBufferRequest(this, inst, stage_num, id, slot_num,
+ ib_entry->cmd);
+}
+
+void
+InstBuffer::execute(int slot_idx)
+{
+ // After this is working, change this to a reinterpret cast
+ // for performance considerations
+ InstBufferRequest* ib_req = dynamic_cast<InstBufferRequest*>(reqMap[slot_idx]);
+ assert(ib_req);
+
+ DynInstPtr inst = ib_req->inst;
+ int tid = inst->readTid();
+ int seq_num = inst->seqNum;
+ ib_req->fault = NoFault;
+
+ switch (ib_req->cmd)
+ {
+ case InsertInst:
+ {
+ DPRINTF(Resource, "[tid:%i]: Inserting [sn:%i] into buffer.\n",
+ tid, seq_num);
+ insert(inst);
+ ib_req->done();
+ }
+ break;
+
+ case RemoveInst:
+ {
+ DPRINTF(Resource, "[tid:%i]: Removing [sn:%i] from buffer.\n",
+ tid, seq_num);
+ remove(inst);
+ ib_req->done();
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+
+ DPRINTF(Resource, "Buffer now contains %i insts.\n", instList.size());
+}
+
+void
+InstBuffer::insert(DynInstPtr inst)
+{
+ instList.push_back(inst);
+}
+
+void
+InstBuffer::remove(DynInstPtr inst)
+{
+ std::list<DynInstPtr>::iterator list_it = instList.begin();
+ std::list<DynInstPtr>::iterator list_end = instList.end();
+
+ while (list_it != list_end) {
+ if((*list_it) == inst) {
+ instList.erase(list_it);
+ break;
+ }
+ list_it++;
+ }
+}
+
+void
+InstBuffer::pop()
+{ instList.pop_front(); }
+
+ThePipeline::DynInstPtr
+InstBuffer::top()
+{ return instList.front(); }
+
+void
+InstBuffer::squash(InstSeqNum squash_seq_num, unsigned tid)
+{
+ list<DynInstPtr>::iterator list_it = instList.begin();
+ list<DynInstPtr>::iterator list_end = instList.end();
+ queue<list<DynInstPtr>::iterator> remove_list;
+
+ // Collect All Instructions to be Removed in Remove List
+ while (list_it != list_end) {
+ if((*list_it)->seqNum > squash_seq_num) {
+ DPRINTF(Resource, "[tid:%i]: Squashing [sn:%i] in resource.\n",
+ tid, (*list_it)->seqNum);
+ (*list_it)->setSquashed();
+ remove_list.push(list_it);
+ }
+
+ list_it++;
+ }
+
+ // Removed Instructions from InstList & Clear Remove List
+ while (!remove_list.empty()) {
+ instList.erase(remove_list.front());
+ remove_list.pop();
+ }
+
+ Resource::squash(squash_seq_num, tid);
+}
diff --git a/src/cpu/inorder/resources/inst_buffer_new.hh b/src/cpu/inorder/resources/inst_buffer_new.hh
new file mode 100644
index 000000000..e374fa109
--- /dev/null
+++ b/src/cpu/inorder/resources/inst_buffer_new.hh
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_INST_BUFF_UNIT_HH__
+#define __CPU_INORDER_INST_BUFF_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/cpu.hh"
+
+class InstBuffer : public Resource {
+ public:
+ typedef InOrderDynInst::DynInstPtr DynInstPtr;
+
+ public:
+ enum Command {
+ InsertInst,
+ InsertAddr,
+ RemoveInst,
+ RemoveAddr
+ };
+
+ public:
+ InstBuffer(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu);
+ virtual ~InstBuffer() {}
+
+ virtual ResourceRequest* getRequest(DynInstPtr _inst, int stage_num,
+ int res_idx, int slot_num);
+
+ virtual void execute(int slot_num);
+
+ virtual void insert(DynInstPtr inst);
+
+ virtual void remove(DynInstPtr inst);
+
+ virtual void pop();
+
+ virtual DynInstPtr top();
+
+ virtual void squash(InstSeqNum squash_seq_num, unsigned tid);
+
+ protected:
+ /** List of instructions this resource is currently
+ * processing.
+ */
+ std::list<DynInstPtr> instList;
+
+ /** @todo: Add Resource Stats Here */
+
+};
+
+struct InstBufferEntry : public ThePipeline::ScheduleEntry {
+ InstBufferEntry(int stage_num, int res_num, InstBuffer::Command _cmd) :
+ ScheduleEntry(stage_num, res_num), cmd(_cmd)
+ { }
+
+ InstBuffer::Command cmd;
+};
+
+class InstBufferRequest : public ResourceRequest {
+ public:
+ typedef InOrderDynInst::DynInstPtr DynInstPtr;
+
+ public:
+ InstBufferRequest(InstBuffer *res, DynInstPtr inst, int stage_num, int res_idx, int slot_num,
+ InstBuffer::Command _cmd)
+ : ResourceRequest(res, inst, stage_num, res_idx, slot_num),
+ cmd(_cmd)
+ { }
+
+ InstBuffer::Command cmd;
+};
+
+
+#endif //__CPU_INORDER_INST_BUFF_UNIT_HH__
diff --git a/src/cpu/inorder/resources/mem_dep_unit.hh b/src/cpu/inorder/resources/mem_dep_unit.hh
new file mode 100644
index 000000000..0bd850c5c
--- /dev/null
+++ b/src/cpu/inorder/resources/mem_dep_unit.hh
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_GRAD_UNIT_HH__
+#define __CPU_INORDER_GRAD_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/cpu.hh"
+
+class MemDepUnit : public Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ public:
+ MemDepUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu);
+ virtual ~MemDepUnit() {}
+
+ virtual void execute(int slot_num);
+
+ protected:
+ Tick lastCycleGrad;
+ int numCycleGrad;
+
+ bool *nonSpecInstActive[ThePipeline::MaxThreads];
+
+ InstSeqNum *nonSpecSeqNum[ThePipeline::MaxThreads];
+
+ /** @todo: Add Resource Stats Here */
+};
+
+#endif //__CPU_INORDER_GRAD_UNIT_HH__
diff --git a/src/cpu/inorder/resources/mult_div_unit.cc b/src/cpu/inorder/resources/mult_div_unit.cc
new file mode 100644
index 000000000..abef11247
--- /dev/null
+++ b/src/cpu/inorder/resources/mult_div_unit.cc
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include <vector>
+#include <list>
+#include "cpu/inorder/resources/mult_div_unit.hh"
+#include "cpu/inorder/resource_pool.hh"
+#include "cpu/inorder/cpu.hh"
+#include "cpu/op_class.hh"
+
+using namespace std;
+using namespace ThePipeline;
+
+MultDivUnit::MultDivUnit(string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu)
+{
+ multRepeatRate = params->multRepeatRate;
+ multLatency = params->multLatency;
+
+ div8RepeatRate = params->div8RepeatRate;
+ div8Latency = params->div8Latency;
+
+ div16RepeatRate = params->div16RepeatRate;
+ div16Latency = params->div16Latency;
+
+ div24RepeatRate = params->div24RepeatRate;
+ div24Latency = params->div24Latency;
+
+ div32RepeatRate = params->div32RepeatRate;
+ div32Latency = params->div32Latency;
+}
+
+void
+MultDivUnit::regStats()
+{
+ multInstReqsProcessed
+ .name(name() + ".multInstReqsProcessed")
+ .desc("Number of Multiply Requests Processed.");
+
+ divInstReqsProcessed
+ .name(name() + ".divInstReqsProcessed")
+ .desc("Number of Divide Requests Processed.");
+
+ Resource::regStats();
+}
+
+void
+MultDivUnit::init()
+{
+ // Set Up Resource Events to Appropriate Resource BandWidth
+ resourceEvent = new MDUEvent[width];
+
+ initSlots();
+}
+
+int
+MultDivUnit::findSlot(DynInstPtr inst)
+{
+ DPRINTF(InOrderMDU, "Finding slot for inst:%i\n | slots-free:%i | slots-used:%i\n",
+ inst->seqNum, slotsAvail(), slotsInUse());
+
+ return Resource::findSlot(inst);
+}
+
+void
+MultDivUnit::freeSlot(int slot_idx)
+{
+ DPRINTF(InOrderMDU, "Freeing slot for inst:%i\n | slots-free:%i | slots-used:%i\n",
+ reqMap[slot_idx]->getInst()->seqNum, slotsAvail(), slotsInUse());
+
+ Resource::freeSlot(slot_idx);
+}
+
+
+int
+MultDivUnit::getSlot(DynInstPtr inst)
+{
+ // If MDU already has instruction, return current slot.
+ int slot_num = findSlot(inst);
+
+ // If we have this instruction's request already then return
+ if (slot_num != -1 &&
+ inst->resSched.top()->cmd == reqMap[slot_num]->cmd)
+ return slot_num;
+
+ unsigned repeat_rate = 0;
+
+ /** Enforce MDU dependencies after a multiply is seen last */
+ if (lastOpType == IntMultOp) {
+ repeat_rate = multRepeatRate;
+ }
+
+ /** Enforce dependencies after a divide is seen last */
+ if (lastOpType == IntDivOp) {
+ switch (lastDivSize) {
+ case 8:
+ repeat_rate = div8RepeatRate;
+ break;
+
+ case 16:
+ repeat_rate = div16RepeatRate;
+ break;
+
+ case 24:
+ repeat_rate = div24RepeatRate;
+ break;
+
+ case 32:
+ repeat_rate = div32RepeatRate;
+ break;
+ }
+ }
+
+ if (lastMDUCycle + repeat_rate > curTick) {
+ DPRINTF(InOrderMDU, "MDU not ready to process another inst. until %i, denying request.\n",
+ lastMDUCycle + repeat_rate);
+ return -1;
+ } else {
+ int rval = Resource::getSlot(inst);
+ DPRINTF(InOrderMDU, "MDU request should pass: %i.\n",
+ rval);
+
+ if (rval != -1) {
+ }
+
+ return rval;
+ }
+}
+
+int
+MultDivUnit::getDivOpSize(DynInstPtr inst)
+{
+ // Get RT Register from instruction (index #1)
+ uint32_t div_op = inst->readIntSrc(1);
+
+ if (div_op <= 0xFF) {
+ return 8;
+ } else if (div_op <= 0xFFFF) {
+ return 16;
+ } else if (div_op <= 0xFFFFFF) {
+ return 24;
+ } else {
+ return 32;
+ }
+}
+
+void
+MultDivUnit::execute(int slot_num)
+{
+ ResourceRequest* mult_div_req = reqMap[slot_num];
+ DynInstPtr inst = reqMap[slot_num]->inst;
+ Fault fault = reqMap[slot_num]->fault;
+
+ //int tid = inst->readTid();
+ //int seq_num = inst->seqNum;
+
+ switch (mult_div_req->cmd)
+ {
+ case StartMultDiv:
+ DPRINTF(InOrderMDU, "Start MDU called ...\n");
+
+ if (inst->opClass() == IntMultOp) {
+ scheduleEvent(slot_num, multLatency);
+ multInstReqsProcessed++;
+ } else if (inst->opClass() == IntDivOp) {
+ int op_size = getDivOpSize(inst);
+
+ switch (op_size)
+ {
+ case 8:
+ scheduleEvent(slot_num, div8Latency);
+ break;
+
+ case 16:
+ scheduleEvent(slot_num, div16Latency);
+ break;
+
+ case 24:
+ scheduleEvent(slot_num, div24Latency);
+ break;
+
+ case 32:
+ scheduleEvent(slot_num, div32Latency);
+ break;
+ }
+
+ lastDivSize = op_size;
+
+ divInstReqsProcessed++;
+ }
+
+ // Allow to pass through to next stage while
+ // event processes
+ mult_div_req->setCompleted();
+ break;
+
+ case MultDiv:
+ DPRINTF(InOrderMDU, "Execute MDU called ...\n");
+ exeMulDiv(slot_num);
+ mult_div_req->done();
+ break;
+
+
+ case EndMultDiv:
+ //@TODO: Why not allow high-latency requests to sleep
+ // within stage until event wakes up????
+ // Seems wasteful to continually check to see if
+ // this is done when we have a event in parallel
+ // counting down the time
+ {
+ DPRINTF(InOrderMDU, "End MDU called ...\n");
+ if (mult_div_req->getInst()->isExecuted())
+ mult_div_req->done();
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+}
+
+void
+MultDivUnit::exeMulDiv(int slot_num)
+{
+ ResourceRequest* mult_div_req = reqMap[slot_num];
+ DynInstPtr inst = reqMap[slot_num]->inst;
+ Fault fault = reqMap[slot_num]->fault;
+ int tid = inst->readTid();
+ int seq_num = inst->seqNum;
+
+ fault = inst->execute();
+
+ if (fault == NoFault) {
+ inst->setExecuted();
+ mult_div_req->setCompleted();
+
+ DPRINTF(Resource, "[tid:%i]: The result of execution is 0x%x.\n",
+ inst->readTid(), inst->readIntResult(0));
+ } else {
+ warn("inst [sn:%i] had a %s fault", seq_num, fault->name());
+ cpu->trap(fault, tid);
+ }
+}
+
+
+MDUEvent::MDUEvent()
+ : ResourceEvent()
+{ }
+
+void
+MDUEvent::process()
+{
+ MultDivUnit* mdu_res = reinterpret_cast<MultDivUnit*>(resource);
+
+ mdu_res->exeMulDiv(slotIdx);
+
+ ResourceRequest* mult_div_req = resource->reqMap[slotIdx];
+
+ mult_div_req->done();
+}
+
+
diff --git a/src/cpu/inorder/resources/mult_div_unit.hh b/src/cpu/inorder/resources/mult_div_unit.hh
new file mode 100644
index 000000000..762442c4b
--- /dev/null
+++ b/src/cpu/inorder/resources/mult_div_unit.hh
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_MULT_DIV_UNIT_HH__
+#define __CPU_INORDER_MULT_DIV_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/func_unit.hh"
+#include "cpu/op_class.hh"
+#include "cpu/inorder/first_stage.hh"
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+
+class MDUEvent;
+
+class MultDivUnit : public Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ enum Command {
+ StartMultDiv,
+ EndMultDiv,
+ MultDiv
+ };
+
+ public:
+ MultDivUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~MultDivUnit() {}
+
+ public:
+ /** Override default Resource getSlot(). Will only getSlot if
+ * valid mult/div sequence is being maintained
+ */
+ virtual int getSlot(DynInstPtr inst);
+
+ virtual int findSlot(DynInstPtr inst);
+
+ virtual void freeSlot(int slot_idx);
+
+ virtual void init();
+
+ /** Get Operand Size For A Division Operation */
+ int getDivOpSize(DynInstPtr inst);
+
+ /** Override default Resource execute */
+ virtual void execute(int slot_num);
+
+ void exeMulDiv(int slot_num);
+
+ /** Register extra resource stats */
+ virtual void regStats();
+
+ protected:
+ /** Latency & Repeat Rate for Multiply Insts */
+ unsigned multLatency;
+ unsigned multRepeatRate;
+
+ /** Latency & Repeat Rate for 8-bit Divide Insts */
+ unsigned div8Latency;
+ unsigned div8RepeatRate;
+
+ /** Latency & Repeat Rate for 16-bit Divide Insts */
+ unsigned div16Latency;
+ unsigned div16RepeatRate;
+
+ /** Latency & Repeat Rate for 24-bit Divide Insts */
+ unsigned div24Latency;
+ unsigned div24RepeatRate;
+
+ /** Latency & Repeat Rate for 32-bit Divide Insts */
+ unsigned div32Latency;
+ unsigned div32RepeatRate;
+
+ /** Last cycle that MDU was used */
+ Tick lastMDUCycle;
+
+ /** Last type of instruction MDU started processing */
+ OpClass lastOpType;
+
+ /** Last Division Operand of instruction MDU was processing */
+ uint32_t lastDivSize;
+
+ /** Last instruction name the MDU used */
+ std::string lastInstName;
+
+ /** Number of Instruction Requests the Resource Processes */
+ Stats::Scalar<> multInstReqsProcessed;
+
+ /** Number of Instruction Requests the Resource Processes */
+ Stats::Scalar<> divInstReqsProcessed;
+
+ MDUEvent *mduEvent;
+};
+
+class MDUEvent : public ResourceEvent
+{
+ public:
+ MDUEvent();
+ virtual ~MDUEvent() { }
+
+
+ virtual void process();
+};
+
+
+#endif //__CPU_INORDER_MULT_DIV_UNIT_HH__
diff --git a/src/cpu/inorder/resources/resource_list.hh b/src/cpu/inorder/resources/resource_list.hh
new file mode 100644
index 000000000..cbe2ad8c3
--- /dev/null
+++ b/src/cpu/inorder/resources/resource_list.hh
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef CPU_INORDER_RESOURCE_LIST_HH
+#define CPU_INORDER_RESOURCE_LIST_HH
+
+#include "cpu/inorder/resources/cache_unit.hh"
+#include "cpu/inorder/resources/execution_unit.hh"
+#include "cpu/inorder/resources/use_def.hh"
+#include "cpu/inorder/resources/inst_buffer.hh"
+#include "cpu/inorder/resources/decode_unit.hh"
+#include "cpu/inorder/resources/graduation_unit.hh"
+#include "cpu/inorder/resources/tlb_unit.hh"
+#include "cpu/inorder/resources/fetch_seq_unit.hh"
+#include "cpu/inorder/resources/branch_predictor.hh"
+#include "cpu/inorder/resources/agen_unit.hh"
+#include "cpu/inorder/resources/mult_div_unit.hh"
+
+#endif
diff --git a/src/cpu/inorder/resources/tlb_unit.cc b/src/cpu/inorder/resources/tlb_unit.cc
new file mode 100644
index 000000000..9b0decedb
--- /dev/null
+++ b/src/cpu/inorder/resources/tlb_unit.cc
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include <vector>
+#include <list>
+#include "arch/isa_traits.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/resources/tlb_unit.hh"
+#include "cpu/inorder/cpu.hh"
+
+using namespace std;
+using namespace TheISA;
+using namespace ThePipeline;
+
+TLBUnit::TLBUnit(string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : InstBuffer(res_name, res_id, res_width, res_latency, _cpu, params)
+{
+ for (int i=0; i < MaxThreads; i++) {
+ tlbBlocked[i] = false;
+ }
+}
+
+void
+TLBUnit::init()
+{
+ resourceEvent = new TLBUnitEvent[width];
+
+ initSlots();
+}
+
+int
+TLBUnit::getSlot(DynInstPtr inst)
+{
+ if (tlbBlocked[inst->threadNumber]) {
+ return -1;
+ } else {
+ return Resource::getSlot(inst);
+ }
+}
+
+ResourceRequest*
+TLBUnit::getRequest(DynInstPtr _inst, int stage_num,
+ int res_idx, int slot_num,
+ unsigned cmd)
+{
+ return new TLBUnitRequest(this, _inst, stage_num, res_idx, slot_num,
+ cmd);
+}
+
+void
+TLBUnit::execute(int slot_idx)
+{
+ // After this is working, change this to a reinterpret cast
+ // for performance considerations
+ TLBUnitRequest* tlb_req = dynamic_cast<TLBUnitRequest*>(reqMap[slot_idx]);
+ assert(tlb_req);
+
+ DynInstPtr inst = tlb_req->inst;
+ int tid, seq_num, stage_num;
+
+ tid = inst->readTid();
+ seq_num = inst->seqNum;
+ stage_num = tlb_req->getStageNum();
+
+ tlb_req->fault = NoFault;
+
+ switch (tlb_req->cmd)
+ {
+ case FetchLookup:
+ {
+ tlb_req->fault =
+ this->cpu->translateInstReq(tlb_req->memReq, cpu->thread[tid]);
+
+ if (tlb_req->fault != NoFault) {
+ DPRINTF(Resource, "[tid:%i]: %s encountered while translating "
+ "addr:%08p for [sn:%i].\n", tid, tlb_req->fault->name(),
+ tlb_req->memReq->getVaddr(), seq_num);
+ //insert(inst);
+ cpu->pipelineStage[stage_num]->setResStall(tlb_req, tid);
+ tlbBlocked[tid] = true;
+ scheduleEvent(slot_idx, 1);
+
+ // @TODO: SHOULDNT BREAK EXECUTION at misspeculated PC Fault
+ // Let CPU handle the fault
+ cpu->trap(tlb_req->fault, tid);
+ } else {
+ DPRINTF(Resource, "[tid:%i]: [sn:%i] virt. addr %08p translated "
+ "to phys. addr:%08p.\n", tid, seq_num,
+ tlb_req->memReq->getVaddr(),
+ tlb_req->memReq->getPaddr());
+ tlb_req->done();
+ }
+ }
+ break;
+
+ case DataLookup:
+ {
+ DPRINTF(Resource, "[tid:%i]: [sn:%i]: Attempting to translate %08p.\n",
+ tid, seq_num, tlb_req->memReq->getVaddr());
+
+ tlb_req->fault =
+ this->cpu->translateInstReq(tlb_req->memReq, cpu->thread[tid]);
+
+ if (tlb_req->fault != NoFault) {
+ DPRINTF(Resource, "[tid:%i]: %s encountered while translating "
+ "addr:%08p for [sn:%i].\n", tid, tlb_req->fault->name(),
+ tlb_req->memReq->getVaddr(), seq_num);
+ //insert(inst);
+ cpu->pipelineStage[stage_num]->setResStall(tlb_req, tid);
+ tlbBlocked[tid] = true;
+ scheduleEvent(slot_idx, 1);
+
+ // Let CPU handle the fault
+ cpu->trap(tlb_req->fault, tid);
+ } else {
+ DPRINTF(Resource, "[tid:%i]: [sn:%i] virt. addr %08p translated "
+ "to phys. addr:%08p.\n", tid, seq_num,
+ tlb_req->memReq->getVaddr(),
+ tlb_req->memReq->getPaddr());
+ tlb_req->done();
+ }
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+}
+
+TLBUnitEvent::TLBUnitEvent()
+ : ResourceEvent()
+{ }
+
+void
+TLBUnitEvent::process()
+{
+ DynInstPtr inst = resource->reqMap[slotIdx]->inst;
+ int stage_num = resource->reqMap[slotIdx]->getStageNum();
+ int tid = inst->threadNumber;
+
+ DPRINTF(Resource, "Waking up from TLB Miss caused by [sn:%i].\n",
+ inst->seqNum);
+
+ TLBUnit* tlb_res = dynamic_cast<TLBUnit*>(resource);
+ assert(tlb_res);
+
+ tlb_res->tlbBlocked[tid] = false;
+
+ tlb_res->cpu->pipelineStage[stage_num]->unsetResStall(resource->reqMap[slotIdx], tid);
+
+ // Effectively NOP the instruction but still allow it
+ // to commit
+ //while (!inst->resSched.empty() &&
+ // inst->resSched.top()->stageNum != ThePipeline::NumStages - 1) {
+ //inst->resSched.pop();
+ //}
+}
diff --git a/src/cpu/inorder/resources/tlb_unit.hh b/src/cpu/inorder/resources/tlb_unit.hh
new file mode 100644
index 000000000..c7fee6030
--- /dev/null
+++ b/src/cpu/inorder/resources/tlb_unit.hh
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_TLB_UNIT_HH__
+#define __CPU_INORDER_TLB_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/inorder/resources/inst_buffer.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/cpu.hh"
+
+class TLBUnit : public InstBuffer {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ enum TLBCommand {
+ FetchLookup,
+ DataLookup
+ };
+
+ public:
+ TLBUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~TLBUnit() {}
+
+ void init();
+
+ int getSlot(DynInstPtr inst);
+
+ virtual ResourceRequest* getRequest(DynInstPtr _inst, int stage_num,
+ int res_idx, int slot_num,
+ unsigned cmd);
+
+ virtual void execute(int slot_num);
+
+ bool tlbBlocked[ThePipeline::MaxThreads];
+
+ protected:
+ /** List of instructions this resource is currently
+ * processing.
+ */
+ std::list<DynInstPtr> instList;
+
+ /** @todo: Add Resource Stats Here */
+
+};
+
+class TLBUnitEvent : public ResourceEvent {
+ public:
+ /** Constructs a resource event. */
+ TLBUnitEvent();
+ virtual ~TLBUnitEvent() {}
+
+ /** Processes a resource event. */
+ virtual void process();
+};
+
+class TLBUnitRequest : public ResourceRequest {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ public:
+ TLBUnitRequest(TLBUnit *res, DynInstPtr inst, int stage_num, int res_idx, int slot_num,
+ unsigned _cmd)
+ : ResourceRequest(res, inst, stage_num, res_idx, slot_num, _cmd)
+ {
+ Addr aligned_addr;
+ int req_size;
+ unsigned flags;
+
+ if (_cmd == TLBUnit::FetchLookup) {
+ aligned_addr = inst->getMemAddr();
+ req_size = sizeof(MachInst);
+ flags = 0;
+ } else {
+ aligned_addr = inst->getMemAddr();;
+ req_size = inst->getMemAccSize();
+ flags = inst->getMemFlags();
+ }
+
+ // @TODO: Add Vaddr & Paddr functions
+ inst->memReq = new Request(inst->readTid(), aligned_addr, req_size,
+ flags, inst->readPC(), res->cpu->readCpuId(), inst->readTid());
+
+ memReq = inst->memReq;
+ }
+
+ RequestPtr memReq;
+};
+
+
+#endif //__CPU_INORDER_TLB_UNIT_HH__
diff --git a/src/cpu/inorder/resources/use_def.cc b/src/cpu/inorder/resources/use_def.cc
new file mode 100644
index 000000000..ed697c4ca
--- /dev/null
+++ b/src/cpu/inorder/resources/use_def.cc
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include <vector>
+#include <list>
+#include "arch/isa_traits.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/resources/use_def.hh"
+#include "cpu/inorder/cpu.hh"
+
+using namespace std;
+using namespace TheISA;
+using namespace ThePipeline;
+
+UseDefUnit::UseDefUnit(string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
+ : Resource(res_name, res_id, res_width, res_latency, _cpu),
+ maxSeqNum((InstSeqNum)-1)
+{
+ for (int tid = 0; tid < ThePipeline::MaxThreads; tid++) {
+ nonSpecInstActive[tid] = &cpu->nonSpecInstActive[tid];
+ nonSpecSeqNum[tid] = &cpu->nonSpecSeqNum[tid];
+
+ outReadSeqNum[tid] = maxSeqNum;
+ outWriteSeqNum[tid] = maxSeqNum;
+
+ regDepMap[tid] = &cpu->archRegDepMap[tid];
+ }
+}
+
+ResReqPtr
+UseDefUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
+ int slot_num, unsigned cmd)
+{
+ return new UseDefRequest(this, inst, stage_num, id, slot_num, cmd,
+ inst->resSched.top()->idx);
+}
+
+
+ResReqPtr
+UseDefUnit::findRequest(DynInstPtr inst)
+{
+ map<int, ResReqPtr>::iterator map_it = reqMap.begin();
+ map<int, ResReqPtr>::iterator map_end = reqMap.end();
+
+ while (map_it != map_end) {
+ UseDefRequest* ud_req = dynamic_cast<UseDefRequest*>((*map_it).second);
+ assert(ud_req);
+
+ if (ud_req &&
+ ud_req->getInst() == inst &&
+ ud_req->cmd == inst->resSched.top()->cmd &&
+ ud_req->useDefIdx == inst->resSched.top()->idx) {
+ return ud_req;
+ }
+ map_it++;
+ }
+
+ return NULL;
+}
+
+void
+UseDefUnit::execute(int slot_idx)
+{
+ // After this is working, change this to a reinterpret cast
+ // for performance considerations
+ UseDefRequest* ud_req = dynamic_cast<UseDefRequest*>(reqMap[slot_idx]);
+ assert(ud_req);
+
+ DynInstPtr inst = ud_req->inst;
+ int tid = inst->readTid();
+ int seq_num = inst->seqNum;
+ int ud_idx = ud_req->useDefIdx;
+
+ // If there is a non-speculative instruction
+ // in the pipeline then stall instructions here
+ if (*nonSpecInstActive[tid] == true &&
+ seq_num > *nonSpecSeqNum[tid]) {
+ DPRINTF(Resource, "[tid:%i]: [sn:%i] cannot execute because there is "
+ "non-speculative instruction [sn:%i] has not graduated.\n",
+ tid, seq_num, *nonSpecSeqNum[tid]);
+ return;
+ } else if (inst->isNonSpeculative()) {
+ *nonSpecInstActive[tid] = true;
+ *nonSpecSeqNum[tid] = seq_num;
+ }
+
+ switch (ud_req->cmd)
+ {
+ case ReadSrcReg:
+ {
+ int reg_idx = inst->_srcRegIdx[ud_idx];
+
+ DPRINTF(Resource, "[tid:%i]: Attempting to read source register idx %i.\n",
+ tid, ud_idx);
+
+ // Ask register dependency map if it is OK to read from Arch. Reg. File
+ if (regDepMap[tid]->canRead(reg_idx, inst)) {
+ // Read From Register File
+ if (inst->seqNum <= outReadSeqNum[tid]) {
+ if (reg_idx <= FP_Base_DepTag) {
+ DPRINTF(Resource, "[tid:%i]: Reading Int Reg %i from Register File.\n",
+ tid, reg_idx);
+ inst->setIntSrc(ud_idx,
+ cpu->readIntReg(reg_idx,inst->readTid()));
+ } else if (reg_idx <= Ctrl_Base_DepTag) {
+ reg_idx -= FP_Base_DepTag;
+ DPRINTF(Resource, "[tid:%i]: Reading Float Reg %i from Register File.\n",
+ tid, reg_idx);
+ inst->setIntSrc(ud_idx, // Always Read FloatRegBits For Now
+ cpu->readFloatRegBits(reg_idx, inst->readTid()));
+ } else {
+ reg_idx -= Ctrl_Base_DepTag;
+ DPRINTF(Resource, "[tid:%i]: Reading Misc Reg %i from Register File.\n",
+ tid, reg_idx);
+ inst->setIntSrc(ud_idx,
+ cpu->readMiscReg(reg_idx, inst->readTid()));
+ }
+
+ outReadSeqNum[tid] = maxSeqNum;
+
+ ud_req->done();
+ } else {
+ DPRINTF(Resource, "[tid:%i]: Unable to read because of [sn:%i] hasnt read it's"
+ " registers yet.\n", tid, outReadSeqNum[tid]);
+ DPRINTF(InOrderStall, "STALL: [tid:%i]: waiting for [sn:%i] to write\n",
+ tid, outReadSeqNum[tid]);
+ }
+
+ } else {
+ DynInstPtr forward_inst = regDepMap[tid]->canForward(reg_idx, ud_idx, inst);
+
+ if (forward_inst) {
+
+ if (inst->seqNum <= outReadSeqNum[tid]) {
+ int dest_reg_idx = forward_inst->getDestIdxNum(reg_idx);
+
+ if (reg_idx <= FP_Base_DepTag) {
+ DPRINTF(Resource, "[tid:%i]: Forwarding dest. reg value 0x%x from "
+ "[sn:%i] to [sn:%i] source #%i.\n",
+ tid, forward_inst->readIntResult(dest_reg_idx) ,
+ forward_inst->seqNum, inst->seqNum, ud_idx);
+ inst->setIntSrc(ud_idx, forward_inst->readIntResult(dest_reg_idx));
+ } else if (reg_idx <= Ctrl_Base_DepTag) {
+ DPRINTF(Resource, "[tid:%i]: Forwarding dest. reg value 0x%x from "
+ "[sn:%i] to [sn:%i] source #%i.\n",
+ tid, forward_inst->readFloatResult(dest_reg_idx) ,
+ forward_inst->seqNum, inst->seqNum, ud_idx);
+ inst->setFloatSrc(ud_idx, forward_inst->readFloatResult(dest_reg_idx));
+ } else {
+ DPRINTF(Resource, "[tid:%i]: Forwarding dest. reg value 0x%x from "
+ "[sn:%i] to [sn:%i] source #%i.\n",
+ tid, forward_inst->readIntResult(dest_reg_idx) ,
+ forward_inst->seqNum, inst->seqNum, ud_idx);
+ inst->setIntSrc(ud_idx, forward_inst->readIntResult(dest_reg_idx));
+ }
+
+ outReadSeqNum[tid] = maxSeqNum;
+
+ ud_req->done();
+ } else {
+ DPRINTF(Resource, "[tid:%i]: Unable to read because of [sn:%i] hasnt read it's"
+ " registers yet.\n", tid, outReadSeqNum[tid]);
+ DPRINTF(InOrderStall, "STALL: [tid:%i]: waiting for [sn:%i] to forward\n",
+ tid, outReadSeqNum[tid]);
+ }
+ } else {
+ DPRINTF(Resource, "[tid:%i]: Source register idx: %i is not ready to read.\n",
+ tid, reg_idx);
+ DPRINTF(InOrderStall, "STALL: [tid:%i]: waiting to read register (idx=%i)\n",
+ tid, reg_idx);
+ outReadSeqNum[tid] = inst->seqNum;
+ }
+ }
+ }
+ break;
+
+ case WriteDestReg:
+ {
+ int reg_idx = inst->_destRegIdx[ud_idx];
+
+ if (regDepMap[tid]->canWrite(reg_idx, inst)) {
+ DPRINTF(Resource, "[tid:%i]: Attempting to write to Register File.\n",
+ tid);
+
+ if (inst->seqNum <= outReadSeqNum[tid]) {
+ if (reg_idx <= FP_Base_DepTag) {
+ DPRINTF(Resource, "[tid:%i]: Writing 0x%x to register idx %i.\n",
+ tid, inst->readIntResult(ud_idx), reg_idx);
+
+ // Remove Dependencies
+ regDepMap[tid]->removeFront(reg_idx, inst);
+
+ cpu->setIntReg(reg_idx,
+ inst->readIntResult(ud_idx),
+ inst->readTid());
+ } else if(reg_idx <= Ctrl_Base_DepTag) {
+
+ // Remove Dependencies
+ regDepMap[tid]->removeFront(reg_idx, inst);
+
+ reg_idx -= FP_Base_DepTag;
+
+ cpu->setFloatReg(reg_idx, // Check for FloatRegBits Here
+ inst->readFloatResult(ud_idx),
+ inst->readTid());
+ } else {
+ // Remove Dependencies
+ regDepMap[tid]->removeFront(reg_idx, inst);
+
+ reg_idx -= Ctrl_Base_DepTag;
+ cpu->setMiscReg(reg_idx,
+ inst->readIntResult(ud_idx),
+ inst->readTid());
+ }
+
+ outWriteSeqNum[tid] = maxSeqNum;
+
+ ud_req->done();
+ } else {
+ DPRINTF(Resource, "[tid:%i]: Unable to write because of [sn:%i] hasnt read it's"
+ " registers yet.\n", tid, outReadSeqNum);
+ DPRINTF(InOrderStall, "STALL: [tid:%i]: waiting for [sn:%i] to read\n",
+ tid, outReadSeqNum);
+ }
+ } else {
+ DPRINTF(Resource, "[tid:%i]: Dest. register idx: %i is not ready to write.\n",
+ tid, reg_idx);
+ DPRINTF(InOrderStall, "STALL: [tid:%i]: waiting to write register (idx=%i)\n",
+ tid, reg_idx);
+ outWriteSeqNum[tid] = inst->seqNum;
+ }
+ }
+ break;
+
+ default:
+ fatal("Unrecognized command to %s", resName);
+ }
+
+}
+
+void
+UseDefUnit::squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num, unsigned tid)
+{
+ DPRINTF(Resource, "[tid:%i]: Updating Due To Squash After [sn:%i].\n",
+ tid, squash_seq_num);
+
+ std::vector<int> slot_remove_list;
+
+ map<int, ResReqPtr>::iterator map_it = reqMap.begin();
+ map<int, ResReqPtr>::iterator map_end = reqMap.end();
+
+ while (map_it != map_end) {
+ ResReqPtr req_ptr = (*map_it).second;
+
+ if (req_ptr &&
+ req_ptr->getInst()->readTid() == tid &&
+ req_ptr->getInst()->seqNum > squash_seq_num) {
+
+ DPRINTF(Resource, "[tid:%i]: Squashing [sn:%i].\n",
+ req_ptr->getInst()->readTid(),
+ req_ptr->getInst()->seqNum);
+
+ regDepMap[tid]->remove(req_ptr->getInst());
+
+ int req_slot_num = req_ptr->getSlot();
+
+ if (latency > 0)
+ unscheduleEvent(req_slot_num);
+
+ // Mark slot for removal from resource
+ slot_remove_list.push_back(req_ptr->getSlot());
+ }
+
+ map_it++;
+ }
+
+ // Now Delete Slot Entry from Req. Map
+ for (int i = 0; i < slot_remove_list.size(); i++) {
+ freeSlot(slot_remove_list[i]);
+ }
+
+ if (outReadSeqNum[tid] >= squash_seq_num) {
+ DPRINTF(Resource, "[tid:%i]: Outstanding Read Seq Num Reset.\n", tid);
+ outReadSeqNum[tid] = maxSeqNum;
+ } else if (outReadSeqNum[tid] != maxSeqNum) {
+ DPRINTF(Resource, "[tid:%i]: No need to reset Outstanding Read Seq Num %i\n",
+ tid, outReadSeqNum[tid]);
+ }
+
+ if (outWriteSeqNum[tid] >= squash_seq_num) {
+ DPRINTF(Resource, "[tid:%i]: Outstanding Write Seq Num Reset.\n", tid);
+ outWriteSeqNum[tid] = maxSeqNum;
+ } else if (outWriteSeqNum[tid] != maxSeqNum) {
+ DPRINTF(Resource, "[tid:%i]: No need to reset Outstanding Write Seq Num %i\n",
+ tid, outWriteSeqNum[tid]);
+ }
+}
diff --git a/src/cpu/inorder/resources/use_def.hh b/src/cpu/inorder/resources/use_def.hh
new file mode 100644
index 000000000..238591117
--- /dev/null
+++ b/src/cpu/inorder/resources/use_def.hh
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_USE_DEF_UNIT_HH__
+#define __CPU_INORDER_USE_DEF_UNIT_HH__
+
+#include <vector>
+#include <list>
+#include <string>
+
+#include "cpu/func_unit.hh"
+#include "cpu/inorder/first_stage.hh"
+#include "cpu/inorder/resource.hh"
+#include "cpu/inorder/inorder_dyn_inst.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/reg_dep_map.hh"
+
+class UseDefUnit : public Resource {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ enum Command {
+ ReadSrcReg,
+ WriteDestReg
+ };
+
+ public:
+ UseDefUnit(std::string res_name, int res_id, int res_width,
+ int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
+ virtual ~UseDefUnit() {}
+
+ virtual ResourceRequest* getRequest(DynInstPtr _inst, int stage_num,
+ int res_idx, int slot_num,
+ unsigned cmd);
+
+ virtual ResReqPtr findRequest(DynInstPtr inst);
+
+ virtual void execute(int slot_num);
+
+ virtual void squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num, unsigned tid);
+
+ const InstSeqNum maxSeqNum;
+
+ protected:
+ RegDepMap *regDepMap[ThePipeline::MaxThreads];
+
+ /** Outstanding Seq. Num. Trying to Read from Register File */
+ InstSeqNum outReadSeqNum[ThePipeline::MaxThreads];
+
+ InstSeqNum outWriteSeqNum[ThePipeline::MaxThreads];
+
+ bool *nonSpecInstActive[ThePipeline::MaxThreads];
+
+ InstSeqNum *nonSpecSeqNum[ThePipeline::MaxThreads];
+
+ /** @todo: Add Resource Stats Here */
+
+ public:
+ class UseDefRequest : public ResourceRequest {
+ public:
+ typedef ThePipeline::DynInstPtr DynInstPtr;
+
+ public:
+ UseDefRequest(UseDefUnit *res, DynInstPtr inst, int stage_num, int res_idx,
+ int slot_num, unsigned cmd, int use_def_idx)
+ : ResourceRequest(res, inst, stage_num, res_idx, slot_num, cmd),
+ useDefIdx(use_def_idx)
+ { }
+
+ int useDefIdx;
+ };
+};
+
+#endif //__CPU_INORDER_USE_DEF_UNIT_HH__
diff --git a/src/cpu/inorder/thread_context.cc b/src/cpu/inorder/thread_context.cc
new file mode 100644
index 000000000..5e2c789cb
--- /dev/null
+++ b/src/cpu/inorder/thread_context.cc
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#include "arch/isa_traits.hh"
+#include "cpu/exetrace.hh"
+#include "cpu/inorder/thread_context.hh"
+
+using namespace TheISA;
+
+void
+InOrderThreadContext::takeOverFrom(ThreadContext *old_context)
+{
+ // some things should already be set up
+ assert(getProcessPtr() == old_context->getProcessPtr());
+
+ // copy over functional state
+ setStatus(old_context->status());
+ copyArchRegs(old_context);
+ //setCpuId(0/*old_context->readCpuId()*/);
+
+ thread->funcExeInst = old_context->readFuncExeInst();
+ old_context->setStatus(ThreadContext::Unallocated);
+ thread->inSyscall = false;
+ thread->trapPending = false;
+}
+
+void
+InOrderThreadContext::activate(int delay)
+{
+ DPRINTF(InOrderCPU, "Calling activate on Thread Context %d\n",
+ getThreadNum());
+
+ if (thread->status() == ThreadContext::Active)
+ return;
+
+ // @TODO: Make this process useful again...
+ //if (thread->status() == ThreadContext::Unallocated) {
+ // Allows the CPU to drain partitioned resources
+ // before inserting thread into the CPU
+ // (e.g. bind physical registers)
+ //cpu->activateWhenReady(thread->readTid());
+ //return;
+ //}
+
+ thread->setStatus(ThreadContext::Active);
+
+ // status() == Suspended
+ cpu->activateContext(thread->readTid(), delay);
+}
+
+
+void
+InOrderThreadContext::suspend(int delay)
+{
+ DPRINTF(InOrderCPU, "Calling suspend on Thread Context %d\n",
+ getThreadNum());
+
+ if (thread->status() == ThreadContext::Suspended)
+ return;
+
+ thread->setStatus(ThreadContext::Suspended);
+ cpu->suspendContext(thread->readTid(), delay);
+}
+
+void
+InOrderThreadContext::deallocate(int delay)
+{
+ DPRINTF(InOrderCPU, "Calling deallocate on Thread Context %d\n",
+ getThreadNum());
+
+ if (thread->status() == ThreadContext::Unallocated)
+ return;
+
+ thread->setStatus(ThreadContext::Unallocated);
+ cpu->deallocateContext(thread->readTid(), delay);
+}
+
+void
+InOrderThreadContext::halt(int delay)
+{
+ DPRINTF(InOrderCPU, "Calling halt on Thread Context %d\n",
+ getThreadNum());
+
+ if (thread->status() == ThreadContext::Halted)
+ return;
+
+ thread->setStatus(ThreadContext::Halted);
+ cpu->haltContext(thread->readTid(), delay);
+}
+
+
+void
+InOrderThreadContext::regStats(const std::string &name)
+{
+#if FULL_SYSTEM
+ thread->kernelStats = new Kernel::Statistics(cpu->system);
+ thread->kernelStats->regStats(name + ".kern");
+#endif
+ ;
+}
+
+
+void
+InOrderThreadContext::serialize(std::ostream &os)
+{
+#if FULL_SYSTEM
+ if (thread->kernelStats)
+ thread->kernelStats->serialize(os);
+#endif
+ ;
+}
+
+
+void
+InOrderThreadContext::unserialize(Checkpoint *cp, const std::string &section)
+{
+#if FULL_SYSTEM
+ if (thread->kernelStats)
+ thread->kernelStats->unserialize(cp, section);
+#endif
+ ;
+}
+
+TheISA::MachInst
+InOrderThreadContext:: getInst()
+{
+ return thread->getInst();
+}
+
+
+void
+InOrderThreadContext::copyArchRegs(ThreadContext *tc)
+{
+ unsigned tid = thread->readTid();
+ PhysRegIndex renamed_reg;
+
+ // First loop through the integer registers.
+ for (int i = 0; i < TheISA::NumIntRegs; ++i) {
+ renamed_reg = cpu->renameMap[tid].lookup(i);
+
+ DPRINTF(InOrderCPU, "Copying over register %i, had data %lli, "
+ "now has data %lli.\n",
+ renamed_reg, cpu->readIntReg(renamed_reg, tid),
+ tc->readIntReg(i));
+
+ cpu->setIntReg(renamed_reg, tc->readIntReg(i), tid);
+ }
+
+ // Then loop through the floating point registers.
+ for (int i = 0; i < TheISA::NumFloatRegs; ++i) {
+ renamed_reg = cpu->renameMap[tid].lookup(i + TheISA::FP_Base_DepTag);
+ cpu->setFloatRegBits(renamed_reg, tc->readFloatRegBits(i), tid);
+ }
+
+ // Copy the misc regs.
+ TheISA::copyMiscRegs(tc, this);
+
+ // Then finally set the PC and the next PC.
+ cpu->setPC(tc->readPC(), tid);
+ cpu->setNextPC(tc->readNextPC(), tid);
+ cpu->setNextNPC(tc->readNextNPC(), tid);
+ this->thread->funcExeInst = tc->readFuncExeInst();
+}
+
+
+void
+InOrderThreadContext::clearArchRegs()
+{}
+
+
+uint64_t
+InOrderThreadContext::readIntReg(int reg_idx)
+{
+ return cpu->readIntReg(reg_idx, thread->readTid());
+}
+
+FloatReg
+InOrderThreadContext::readFloatReg(int reg_idx, int width)
+{
+ return cpu->readFloatReg(reg_idx, thread->readTid(), width);
+}
+
+FloatReg
+InOrderThreadContext::readFloatReg(int reg_idx)
+{
+ return cpu->readFloatReg(reg_idx, thread->readTid());
+}
+
+FloatRegBits
+InOrderThreadContext::readFloatRegBits(int reg_idx, int width)
+{
+ return cpu->readFloatRegBits(reg_idx, thread->readTid(), width);
+}
+
+FloatRegBits
+InOrderThreadContext::readFloatRegBits(int reg_idx)
+{
+ return cpu->readFloatRegBits(reg_idx, thread->readTid());
+}
+
+uint64_t
+InOrderThreadContext::readRegOtherThread(int reg_idx, unsigned tid)
+{
+ return cpu->readRegOtherThread(reg_idx, tid);
+}
+
+void
+InOrderThreadContext::setIntReg(int reg_idx, uint64_t val)
+{
+ cpu->setIntReg(reg_idx, val, thread->readTid());
+
+ // Squash if we're not already in a state update mode.
+ //if (!thread->trapPending && !thread->inSyscall) {
+ // cpu->squashFromTC(thread->readTid());
+ //}
+}
+
+void
+InOrderThreadContext::setFloatReg(int reg_idx, FloatReg val, int width)
+{
+ cpu->setFloatReg(reg_idx, val, thread->readTid(), width);
+
+ // Squash if we're not already in a state update mode.
+ //if (!thread->trapPending && !thread->inSyscall) {
+ //cpu->squashFromTC(thread->readTid());
+ //}
+}
+
+void
+InOrderThreadContext::setFloatReg(int reg_idx, FloatReg val)
+{
+ cpu->setFloatReg(reg_idx, val, thread->readTid());
+
+ // Squash if we're not already in a state update mode.
+ //if (!thread->trapPending && !thread->inSyscall) {
+ //cpu->squashFromTC(thread->readTid());
+ //}
+}
+
+void
+InOrderThreadContext::setFloatRegBits(int reg_idx, FloatRegBits val,
+ int width)
+{
+ cpu->setFloatRegBits(reg_idx, val, thread->readTid(), width);
+
+ // Squash if we're not already in a state update mode.
+ //if (!thread->trapPending && !thread->inSyscall) {
+ //cpu->squashFromTC(thread->readTid());
+ //}
+}
+
+void
+InOrderThreadContext::setFloatRegBits(int reg_idx, FloatRegBits val)
+{
+ cpu->setFloatRegBits(reg_idx, val, thread->readTid());
+
+ // Squash if we're not already in a state update mode.
+ //if (!thread->trapPending && !thread->inSyscall) {
+ //cpu->squashFromTC(thread->readTid());
+ //}
+}
+
+void
+InOrderThreadContext::setRegOtherThread(int misc_reg, const MiscReg &val, unsigned tid)
+{
+ cpu->setRegOtherThread(misc_reg, val, tid);
+}
+
+void
+InOrderThreadContext::setPC(uint64_t val)
+{
+ DPRINTF(InOrderCPU, "Setting PC to %08p\n", val);
+ cpu->setPC(val, thread->readTid());
+
+ // Squash if we're not already in a state update mode.
+ //if (!thread->trapPending && !thread->inSyscall) {
+ //cpu->squashFromTC(thread->readTid());
+ //}
+}
+
+void
+InOrderThreadContext::setNextPC(uint64_t val)
+{
+ DPRINTF(InOrderCPU, "Setting NPC to %08p\n", val);
+ cpu->setNextPC(val, thread->readTid());
+
+ // Squash if we're not already in a state update mode.
+ //if (!thread->trapPending && !thread->inSyscall) {
+ //cpu->squashFromTC(thread->readTid());
+ //}
+}
+
+void
+InOrderThreadContext::setNextNPC(uint64_t val)
+{
+ DPRINTF(InOrderCPU, "Setting NNPC to %08p\n", val);
+ cpu->setNextNPC(val, thread->readTid());
+
+ // Squash if we're not already in a state update mode.
+ //if (!thread->trapPending && !thread->inSyscall) {
+ //cpu->squashFromTC(thread->readTid());
+ //}
+}
+
+void
+InOrderThreadContext::setMiscRegNoEffect(int misc_reg, const MiscReg &val)
+{
+ cpu->setMiscRegNoEffect(misc_reg, val, thread->readTid());
+
+ // Squash if we're not already in a state update mode.
+ //if (!thread->trapPending && !thread->inSyscall) {
+ //cpu->squashFromTC(thread->readTid());
+ //}
+}
+
+void
+InOrderThreadContext::setMiscReg(int misc_reg, const MiscReg &val)
+{
+ cpu->setMiscReg(misc_reg, val, thread->readTid());
+
+ // Squash if we're not already in a state update mode.
+ //if (!thread->trapPending && !thread->inSyscall) {
+ //cpu->squashFromTC(thread->readTid());
+ //}
+}
+
+TheISA::IntReg
+InOrderThreadContext::getSyscallArg(int i)
+{
+ return cpu->getSyscallArg(i, thread->readTid());
+}
+
+void
+InOrderThreadContext::setSyscallArg(int i, IntReg val)
+{
+ cpu->setSyscallArg(i, val, thread->readTid());
+}
+
+void
+InOrderThreadContext::setSyscallReturn(SyscallReturn return_value)
+{
+ cpu->setSyscallReturn(return_value, thread->readTid());
+}
diff --git a/src/cpu/inorder/thread_context.hh b/src/cpu/inorder/thread_context.hh
new file mode 100644
index 000000000..919d5d2b6
--- /dev/null
+++ b/src/cpu/inorder/thread_context.hh
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2007 MIPS Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Korey Sewell
+ *
+ */
+
+#ifndef __CPU_INORDER_THREAD_CONTEXT_HH__
+#define __CPU_INORDER_THREAD_CONTEXT_HH__
+
+#include "cpu/exetrace.hh"
+#include "cpu/thread_context.hh"
+#include "cpu/inorder/thread_state.hh"
+#include "cpu/inorder/cpu.hh"
+
+class TranslatingPort;
+
+/**
+ * Derived ThreadContext class for use with the InOrderCPU. It
+ * provides the interface for any external objects to access a
+ * single thread's state and some general CPU state. Any time
+ * external objects try to update state through this interface,
+ * the CPU will create an event to squash all in-flight
+ * instructions in order to ensure state is maintained correctly.
+ * It must be defined specifically for the InOrderCPU because
+ * not all architectural state is located within the O3ThreadState
+ * (such as the commit PC, and registers), and specific actions
+ * must be taken when using this interface (such as squashing all
+ * in-flight instructions when doing a write to this interface).
+ */
+class InOrderThreadContext : public ThreadContext
+{
+ public:
+ InOrderThreadContext() { }
+
+ /** Pointer to the CPU. */
+ InOrderCPU *cpu;
+
+ /** Pointer to the thread state that this TC corrseponds to. */
+ InOrderThreadState *thread;
+
+
+ /** Returns a pointer to the ITB. */
+ TheISA::ITB *getITBPtr() { return cpu->itb; }
+
+ /** Returns a pointer to the DTB. */
+ TheISA::DTB *getDTBPtr() { return cpu->dtb; }
+
+ System *getSystemPtr() { return cpu->system; }
+
+ /** Returns a pointer to this CPU. */
+ virtual BaseCPU *getCpuPtr() { return cpu; }
+
+ /** Returns a pointer to this CPU. */
+ virtual std::string getCpuName() { return cpu->name(); }
+
+ /** Reads this CPU's ID. */
+ virtual int cpuId() { return cpu->cpuId(); }
+
+ virtual int contextId() { return thread->contextId(); }
+
+ virtual void setContextId(int id) { thread->setContextId(id); }
+
+ /** Returns this thread's ID number. */
+ virtual int threadId() { return thread->threadId(); }
+ virtual void setThreadId(int id) { return thread->setThreadId(id); }
+
+ virtual uint64_t readMicroPC()
+ { return 0; }
+
+ virtual void setMicroPC(uint64_t val) { };
+
+ virtual uint64_t readNextMicroPC()
+ { return 0; }
+
+ virtual void setNextMicroPC(uint64_t val) { };
+
+ virtual TranslatingPort *getMemPort() { return thread->getMemPort(); }
+
+ /** Returns a pointer to this thread's process. */
+ virtual Process *getProcessPtr() { return thread->getProcessPtr(); }
+
+ /** Returns this thread's status. */
+ virtual Status status() const { return thread->status(); }
+
+ /** Sets this thread's status. */
+ virtual void setStatus(Status new_status)
+ { thread->setStatus(new_status); }
+
+ /** Returns a pointer to the last graduated/committed instruction in the thread */
+ //DynInstPtr getLastGradInst() { return thread->getLastGradInst(); }
+
+ /** Set the status to Active. Optional delay indicates number of
+ * cycles to wait before beginning execution. */
+ virtual void activate(int delay = 1);
+
+ /** Set the status to Suspended. */
+ virtual void suspend(int delay = 0);
+
+ /** Set the status to Unallocated. */
+ virtual void deallocate(int delay = 1);
+
+ /** Set the status to Halted. */
+ virtual void halt(int delay = 0);
+
+ /** Takes over execution of a thread from another CPU. */
+ virtual void takeOverFrom(ThreadContext *old_context);
+
+ /** Registers statistics associated with this TC. */
+ virtual void regStats(const std::string &name);
+
+ /** Serializes state. */
+ virtual void serialize(std::ostream &os);
+ /** Unserializes state. */
+ virtual void unserialize(Checkpoint *cp, const std::string &section);
+
+ /** Returns this thread's ID number. */
+ virtual int getThreadNum() { return thread->readTid(); }
+
+ /** Returns the instruction this thread is currently committing.
+ * Only used when an instruction faults.
+ */
+ virtual TheISA::MachInst getInst();
+
+ /** Copies the architectural registers from another TC into this TC. */
+ virtual void copyArchRegs(ThreadContext *tc);
+
+ /** Resets all architectural registers to 0. */
+ virtual void clearArchRegs();
+
+ /** Reads an integer register. */
+ virtual uint64_t readIntReg(int reg_idx);
+
+ virtual FloatReg readFloatReg(int reg_idx, int width);
+
+ virtual FloatReg readFloatReg(int reg_idx);
+
+ virtual FloatRegBits readFloatRegBits(int reg_idx, int width);
+
+ virtual FloatRegBits readFloatRegBits(int reg_idx);
+
+ virtual uint64_t readRegOtherThread(int misc_reg, unsigned tid);
+
+ /** Sets an integer register to a value. */
+ virtual void setIntReg(int reg_idx, uint64_t val);
+
+ virtual void setFloatReg(int reg_idx, FloatReg val, int width);
+
+ virtual void setFloatReg(int reg_idx, FloatReg val);
+
+ virtual void setFloatRegBits(int reg_idx, FloatRegBits val, int width);
+
+ virtual void setFloatRegBits(int reg_idx, FloatRegBits val);
+
+ virtual void setRegOtherThread(int misc_reg, const MiscReg &val, unsigned tid);
+
+ /** Reads this thread's PC. */
+ virtual uint64_t readPC()
+ { return cpu->readPC(thread->readTid()); }
+
+ /** Sets this thread's PC. */
+ virtual void setPC(uint64_t val);
+
+ /** Reads this thread's next PC. */
+ virtual uint64_t readNextPC()
+ { return cpu->readNextPC(thread->readTid()); }
+
+ /** Sets this thread's next PC. */
+ virtual void setNextPC(uint64_t val);
+
+ virtual uint64_t readNextNPC()
+ { return cpu->readNextNPC(thread->readTid()); }
+
+ virtual void setNextNPC(uint64_t val);
+
+ /** Reads a miscellaneous register. */
+ virtual MiscReg readMiscRegNoEffect(int misc_reg)
+ { return cpu->readMiscRegNoEffect(misc_reg, thread->readTid()); }
+
+ /** Reads a misc. register, including any side-effects the
+ * read might have as defined by the architecture. */
+ virtual MiscReg readMiscReg(int misc_reg)
+ { return cpu->readMiscReg(misc_reg, thread->readTid()); }
+
+ /** Sets a misc. register. */
+ virtual void setMiscRegNoEffect(int misc_reg, const MiscReg &val);
+
+ /** Sets a misc. register, including any side-effects the
+ * write might have as defined by the architecture. */
+ virtual void setMiscReg(int misc_reg, const MiscReg &val);
+
+ virtual void activateContext(int delay)
+ { cpu->activateContext(thread->readTid(), delay); }
+
+ virtual void deallocateContext()
+ { cpu->deallocateContext(thread->readTid()); }
+
+ /** Returns the number of consecutive store conditional failures. */
+ // @todo: Figure out where these store cond failures should go.
+ virtual unsigned readStCondFailures()
+ { return thread->storeCondFailures; }
+
+ /** Sets the number of consecutive store conditional failures. */
+ virtual void setStCondFailures(unsigned sc_failures)
+ { thread->storeCondFailures = sc_failures; }
+
+ // Only really makes sense for old CPU model. Lots of code
+ // outside the CPU still checks this function, so it will
+ // always return false to keep everything working.
+ /** Checks if the thread is misspeculating. Because it is
+ * very difficult to determine if the thread is
+ * misspeculating, this is set as false. */
+ virtual bool misspeculating() { return false; }
+
+ /** Gets a syscall argument by index. */
+ virtual IntReg getSyscallArg(int i);
+
+ /** Sets a syscall argument. */
+ virtual void setSyscallArg(int i, IntReg val);
+
+ /** Sets the syscall return value. */
+ virtual void setSyscallReturn(SyscallReturn return_value);
+
+ /** Executes a syscall in SE mode. */
+ virtual void syscall(int64_t callnum)
+ { return cpu->syscall(callnum, thread->readTid()); }
+
+ /** Reads the funcExeInst counter. */
+ virtual Counter readFuncExeInst() { return thread->funcExeInst; }
+
+ virtual void changeRegFileContext(unsigned param,
+ unsigned val)
+ { panic("Not supported!"); }
+
+ /** This function exits the thread context in the CPU and returns
+ * 1 if the CPU has no more active threads (meaning it's OK to exit);
+ * Used in syscall-emulation mode when a thread executes the 'exit'
+ * syscall.
+ */
+ virtual int exit()
+ {
+ this->deallocate();
+
+ // If there are still threads executing in the system (for now
+ // this single cpu)
+ if (this->cpu->numActiveThreads() - 1 > 0)
+ return 0; // don't exit simulation
+ else
+ return 1; // exit simulation
+ }
+
+ virtual void setThreadRescheduleCondition(uint64_t cond)
+ {
+ this->deallocate();
+
+ this->setStatus(ThreadContext::Suspended);
+
+ activateContext(cond);
+ }
+};
+
+#endif
diff --git a/src/cpu/inorder/thread_state.hh b/src/cpu/inorder/thread_state.hh
new file mode 100644
index 000000000..eb4fe40b2
--- /dev/null
+++ b/src/cpu/inorder/thread_state.hh
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2006 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Kevin Lim
+ */
+
+#ifndef __CPU_INORDER_THREAD_STATE_HH__
+#define __CPU_INORDER_THREAD_STATE_HH__
+
+#include "arch/faults.hh"
+#include "arch/isa_traits.hh"
+#include "cpu/thread_context.hh"
+#include "cpu/thread_state.hh"
+
+class Event;
+class FunctionalMemory;
+class Process;
+class InOrderCPU;
+
+/**
+ * Class that has various thread state, such as the status, the
+ * current instruction being processed, whether or not the thread has
+ * a trap pending or is being externally updated, the ThreadContext
+ * pointer, etc. It also handles anything related to a specific
+ * thread's process, such as syscalls and checking valid addresses.
+ */
+class InOrderThreadState : public ThreadState {
+ typedef ThreadContext::Status Status;
+
+ private:
+ /** Pointer to the CPU. */
+ InOrderCPU *cpu;
+
+ public:
+ /** Whether or not the thread is currently in syscall mode, and
+ * thus able to be externally updated without squashing.
+ */
+ bool inSyscall;
+
+ /** Whether or not the thread is currently waiting on a trap, and
+ * thus able to be externally updated without squashing.
+ */
+ bool trapPending;
+
+
+ InOrderThreadState(InOrderCPU *_cpu, int _thread_num, Process *_process, int _asid)
+ : ThreadState(reinterpret_cast<BaseCPU*>(_cpu), 0/*_thread_num*/, _process, 0/*_asid*/),
+ cpu(_cpu), inSyscall(0), trapPending(0)
+ { }
+
+ /** Handles the syscall. */
+ void syscall(int64_t callnum) { process->syscall(callnum, tc); }
+
+ /** Pointer to the ThreadContext of this thread. */
+ ThreadContext *tc;
+
+ /** Returns a pointer to the TC of this thread. */
+ ThreadContext *getTC() { return tc; }
+
+ int readTid() { return 0; }
+
+ /** Pointer to the last graduated instruction in the thread */
+ //DynInstPtr lastGradInst;
+};
+
+#endif // __CPU_INORDER_THREAD_STATE_HH__