summaryrefslogtreecommitdiff
path: root/src/cpu/inorder/InOrderCPU.py
diff options
context:
space:
mode:
authorKorey Sewell <ksewell@umich.edu>2011-02-04 00:08:22 -0500
committerKorey Sewell <ksewell@umich.edu>2011-02-04 00:08:22 -0500
commit68d962f8aff7d2fcc2f8ee77878dd5cab73b69f2 (patch)
treec4bb7d88b864e5ee353b743553bbea0efd34cbb8 /src/cpu/inorder/InOrderCPU.py
parent56ce8acd412747b728b7ad02537a3afd202ae8e8 (diff)
downloadgem5-68d962f8aff7d2fcc2f8ee77878dd5cab73b69f2.tar.xz
inorder: add a fetch buffer to fetch unit
Give fetch unit it's own parameterizable fetch buffer to read from. Very inefficient (architecturally and in simulation) to continually fetch at the granularity of the wordsize. As expected, the number of fetch memory requests drops dramatically
Diffstat (limited to 'src/cpu/inorder/InOrderCPU.py')
-rw-r--r--src/cpu/inorder/InOrderCPU.py5
1 files changed, 3 insertions, 2 deletions
diff --git a/src/cpu/inorder/InOrderCPU.py b/src/cpu/inorder/InOrderCPU.py
index 5d24ae4fd..4766a1ac1 100644
--- a/src/cpu/inorder/InOrderCPU.py
+++ b/src/cpu/inorder/InOrderCPU.py
@@ -48,6 +48,9 @@ class InOrderCPU(BaseCPU):
dcache_port = Port("Data Port")
_cached_ports = ['icache_port', 'dcache_port']
+ fetchBuffSize = Param.Unsigned(4, "Fetch Buffer Size (Number of Cache Blocks Stored)")
+ memBlockSize = Param.Unsigned(64, "Memory Block Size")
+
predType = Param.String("tournament", "Branch predictor type ('local', 'tournament')")
localPredictorSize = Param.Unsigned(2048, "Size of local predictor")
localCtrBits = Param.Unsigned(2, "Bits per counter")
@@ -69,8 +72,6 @@ class InOrderCPU(BaseCPU):
functionTraceStart = Param.Tick(0, "Cycle to start function trace")
stageTracing = Param.Bool(False, "Enable tracing of each stage in CPU")
- memBlockSize = Param.Unsigned(64, "Memory Block Size")
-
multLatency = Param.Unsigned(1, "Latency for Multiply Operations")
multRepeatRate = Param.Unsigned(1, "Repeat Rate for Multiply Operations")
div8Latency = Param.Unsigned(1, "Latency for 8-bit Divide Operations")