summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--configs/ruby/Ruby.py5
-rw-r--r--src/cpu/testers/directedtest/RubyDirectedTester.hh4
-rw-r--r--src/cpu/testers/rubytest/RubyTester.hh4
-rw-r--r--src/mem/protocol/MESI_Three_Level-L0cache.sm62
-rw-r--r--src/mem/protocol/MESI_Three_Level-L1cache.sm18
-rw-r--r--src/mem/protocol/MESI_Two_Level-L1cache.sm81
-rw-r--r--src/mem/protocol/MESI_Two_Level-L2cache.sm20
-rw-r--r--src/mem/protocol/MESI_Two_Level-dir.sm24
-rw-r--r--src/mem/protocol/MI_example-cache.sm20
-rw-r--r--src/mem/protocol/MI_example-dir.sm20
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm67
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L2cache.sm60
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dir.sm36
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dma.sm14
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm47
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dir.sm56
-rw-r--r--src/mem/protocol/MOESI_hammer-cache.sm65
-rw-r--r--src/mem/protocol/MOESI_hammer-dir.sm26
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm6
-rw-r--r--src/mem/ruby/common/DataBlock.hh2
-rw-r--r--src/mem/ruby/common/Histogram.cc2
-rw-r--r--src/mem/ruby/common/Histogram.hh10
-rw-r--r--src/mem/ruby/common/SubBlock.cc7
-rw-r--r--src/mem/ruby/common/SubBlock.hh7
-rw-r--r--src/mem/ruby/common/TypeDefines.hh3
-rw-r--r--src/mem/ruby/filters/H3BloomFilter.cc10
-rw-r--r--src/mem/ruby/filters/H3BloomFilter.hh2
-rw-r--r--src/mem/ruby/filters/MultiBitSelBloomFilter.cc6
-rw-r--r--src/mem/ruby/filters/MultiBitSelBloomFilter.hh2
-rw-r--r--src/mem/ruby/network/MessageBuffer.cc26
-rw-r--r--src/mem/ruby/network/MessageBuffer.hh7
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc2
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/flit.cc80
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/flit.hh33
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.cc245
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.hh4
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.cc22
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.hh8
-rw-r--r--src/mem/ruby/network/simple/Switch.cc6
-rw-r--r--src/mem/ruby/network/simple/Throttle.cc26
-rw-r--r--src/mem/ruby/network/simple/Throttle.hh12
-rw-r--r--src/mem/ruby/profiler/AccessTraceForAddress.hh12
-rw-r--r--src/mem/ruby/profiler/AddressProfiler.cc6
-rw-r--r--src/mem/ruby/profiler/AddressProfiler.hh2
-rw-r--r--src/mem/ruby/profiler/Profiler.cc13
-rw-r--r--src/mem/ruby/profiler/Profiler.hh9
-rw-r--r--src/mem/ruby/profiler/StoreTrace.cc2
-rw-r--r--src/mem/ruby/profiler/StoreTrace.hh4
-rw-r--r--src/mem/ruby/slicc_interface/AbstractCacheEntry.cc25
-rw-r--r--src/mem/ruby/slicc_interface/AbstractCacheEntry.hh24
-rw-r--r--src/mem/ruby/slicc_interface/AbstractController.hh14
-rw-r--r--src/mem/ruby/structures/AbstractReplacementPolicy.cc2
-rw-r--r--src/mem/ruby/structures/AbstractReplacementPolicy.hh6
-rw-r--r--src/mem/ruby/structures/BankedArray.cc6
-rw-r--r--src/mem/ruby/structures/BankedArray.hh8
-rw-r--r--src/mem/ruby/structures/CacheMemory.cc109
-rw-r--r--src/mem/ruby/structures/CacheMemory.hh28
-rw-r--r--src/mem/ruby/structures/DirectoryMemory.cc2
-rw-r--r--src/mem/ruby/structures/DirectoryMemory.hh1
-rw-r--r--src/mem/ruby/structures/LRUPolicy.cc8
-rw-r--r--src/mem/ruby/structures/LRUPolicy.hh4
-rw-r--r--src/mem/ruby/structures/PseudoLRUPolicy.cc12
-rw-r--r--src/mem/ruby/structures/PseudoLRUPolicy.hh6
-rw-r--r--src/mem/ruby/structures/RubyMemoryControl.cc6
-rw-r--r--src/mem/ruby/structures/RubyMemoryControl.hh12
-rw-r--r--src/mem/ruby/system/CacheRecorder.cc15
-rw-r--r--src/mem/ruby/system/CacheRecorder.hh2
-rw-r--r--src/mem/ruby/system/RubySystem.py11
-rw-r--r--src/mem/ruby/system/Sequencer.cc42
-rw-r--r--src/mem/ruby/system/System.cc27
-rw-r--r--src/mem/ruby/system/System.hh10
-rw-r--r--src/mem/slicc/ast/EnumDeclAST.py2
-rw-r--r--src/mem/slicc/ast/FormalParamAST.py22
-rw-r--r--src/mem/slicc/ast/FuncCallExprAST.py17
-rw-r--r--src/mem/slicc/ast/FuncDeclAST.py17
-rw-r--r--src/mem/slicc/ast/InPortDeclAST.py4
-rw-r--r--src/mem/slicc/ast/MethodCallExprAST.py21
-rw-r--r--src/mem/slicc/ast/StateDeclAST.py4
-rw-r--r--src/mem/slicc/parser.py10
-rw-r--r--src/mem/slicc/symbols/Func.py35
-rw-r--r--src/mem/slicc/symbols/StateMachine.py10
81 files changed, 898 insertions, 829 deletions
diff --git a/configs/ruby/Ruby.py b/configs/ruby/Ruby.py
index 6d78dd89d..44dbb925f 100644
--- a/configs/ruby/Ruby.py
+++ b/configs/ruby/Ruby.py
@@ -82,6 +82,9 @@ def define_options(parser):
parser.add_option("--recycle-latency", type="int", default=10,
help="Recycle latency for ruby controller input buffers")
+ parser.add_option("--random_seed", type="int", default=1234,
+ help="Used for seeding the random number generator")
+
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
eval("%s.define_options(parser)" % protocol)
@@ -231,9 +234,9 @@ def create_system(options, full_system, system, piobus = None, dma_ports = []):
if buildEnv['TARGET_ISA'] == "x86":
cpu_seq.pio_slave_port = piobus.master
- ruby.number_of_virtual_networks = ruby.network.number_of_virtual_networks
ruby._cpu_ports = cpu_sequencers
ruby.num_of_sequencers = len(cpu_sequencers)
+ ruby.random_seed = options.random_seed
# Create a backing copy of physical memory in case required
if options.access_backing_store:
diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.hh b/src/cpu/testers/directedtest/RubyDirectedTester.hh
index 74a891178..2a1e7fc1f 100644
--- a/src/cpu/testers/directedtest/RubyDirectedTester.hh
+++ b/src/cpu/testers/directedtest/RubyDirectedTester.hh
@@ -109,9 +109,9 @@ class RubyDirectedTester : public MemObject
RubyDirectedTester(const RubyDirectedTester& obj);
RubyDirectedTester& operator=(const RubyDirectedTester& obj);
- uint64_t m_requests_completed;
+ uint64 m_requests_completed;
std::vector<MasterPort*> ports;
- uint64_t m_requests_to_complete;
+ uint64 m_requests_to_complete;
DirectedGenerator* generator;
};
diff --git a/src/cpu/testers/rubytest/RubyTester.hh b/src/cpu/testers/rubytest/RubyTester.hh
index 94a982e32..c9f0b8dfc 100644
--- a/src/cpu/testers/rubytest/RubyTester.hh
+++ b/src/cpu/testers/rubytest/RubyTester.hh
@@ -143,10 +143,10 @@ class RubyTester : public MemObject
std::vector<Cycles> m_last_progress_vector;
int m_num_cpus;
- uint64_t m_checks_completed;
+ uint64 m_checks_completed;
std::vector<MasterPort*> writePorts;
std::vector<MasterPort*> readPorts;
- uint64_t m_checks_to_complete;
+ uint64 m_checks_to_complete;
int m_deadlock_threshold;
int m_num_writers;
int m_num_readers;
diff --git a/src/mem/protocol/MESI_Three_Level-L0cache.sm b/src/mem/protocol/MESI_Three_Level-L0cache.sm
index fb9e762da..8e44766ea 100644
--- a/src/mem/protocol/MESI_Three_Level-L0cache.sm
+++ b/src/mem/protocol/MESI_Three_Level-L0cache.sm
@@ -145,22 +145,22 @@ machine(L0Cache, "MESI Directory L0 Cache")
// inclusive cache returns L0 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
+ Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
if(is_valid(Dcache_entry)) {
return Dcache_entry;
}
- Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
+ Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
return Icache_entry;
}
Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
+ Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
return Dcache_entry;
}
Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
+ Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
return Icache_entry;
}
@@ -189,7 +189,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
return L0Cache_State_to_permission(tbe.TBEState);
@@ -206,7 +206,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -217,7 +217,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -260,7 +260,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
assert(in_msg.Dest == machineID);
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
@@ -301,7 +301,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
if (is_valid(Icache_entry)) {
// The tag matches for the L0, so the L0 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L0
@@ -309,19 +309,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
if (is_valid(Dcache_entry)) {
// The block is in the wrong L0, put the request on the queue to the shared L2
trigger(Event:L0_Replacement, in_msg.LineAddress,
- Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (Icache.cacheAvail(in_msg.LineAddress)) {
// L0 does't have the line, but we have space for it
// in the L0 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L0, so we need to make room in the L0
trigger(Event:L0_Replacement, Icache.cacheProbe(in_msg.LineAddress),
getICacheEntry(Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
@@ -331,7 +331,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
if (is_valid(Dcache_entry)) {
// The tag matches for the L0, so the L0 ask the L1 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L0
@@ -339,19 +339,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
if (is_valid(Icache_entry)) {
// The block is in the wrong L0, put the request on the queue to the private L1
trigger(Event:L0_Replacement, in_msg.LineAddress,
- Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ Icache_entry, TBEs[in_msg.LineAddress]);
}
if (Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L0 let's see if the L1 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L0
trigger(Event:L0_Replacement, Dcache.cacheProbe(in_msg.LineAddress),
getDCacheEntry(Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -459,38 +459,21 @@ machine(L0Cache, "MESI Directory L0 Cache")
}
}
- action(h_load_hit, "hd", desc="If not prefetch, notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
- action(h_ifetch_hit, "hi", desc="If not prefetch, notify sequencer the ifetch completed.") {
+ action(hx_load_hit, "hx", desc="If not prefetch, notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(hx_load_hit, "hxd", desc="notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, true);
- }
-
- action(hx_ifetch_hit, "hxi", desc="notify sequencer the ifetch completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, true);
}
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
@@ -498,7 +481,6 @@ machine(L0Cache, "MESI Directory L0 Cache")
action(hhx_store_hit, "\hx", desc="If not prefetch, notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, true);
cache_entry.Dirty := true;
}
@@ -507,7 +489,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
@@ -643,7 +625,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
}
transition({S,E,M}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
@@ -730,7 +712,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
transition(Inst_IS, Data, S) {
u_writeInstToCache;
- hx_ifetch_hit;
+ hx_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
@@ -738,7 +720,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
transition(Inst_IS, Data_Exclusive, E) {
u_writeInstToCache;
- hx_ifetch_hit;
+ hx_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
diff --git a/src/mem/protocol/MESI_Three_Level-L1cache.sm b/src/mem/protocol/MESI_Three_Level-L1cache.sm
index 9bab20def..6c8df8d75 100644
--- a/src/mem/protocol/MESI_Three_Level-L1cache.sm
+++ b/src/mem/protocol/MESI_Three_Level-L1cache.sm
@@ -161,7 +161,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// inclusive cache returns L1 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry cache_entry := static_cast(Entry, "pointer", cache.lookup(addr));
+ Entry cache_entry := static_cast(Entry, "pointer", cache[addr]);
return cache_entry;
}
@@ -186,7 +186,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
@@ -203,7 +203,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -214,7 +214,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -271,7 +271,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
@@ -307,7 +307,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:INV) {
if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
@@ -343,7 +343,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
if (messageBufferFromL0_in.isReady()) {
peek(messageBufferFromL0_in, CoherenceMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Class == CoherenceClass:INV_DATA) {
trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
@@ -363,7 +363,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// No room in the L1, so we need to make room in the L1
Entry victim_entry :=
getCacheEntry(cache.cacheProbe(in_msg.addr));
- TBE victim_tbe := TBEs.lookup(cache.cacheProbe(in_msg.addr));
+ TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.addr)];
if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
trigger(Event:L0_Invalidate_Own,
@@ -628,7 +628,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
diff --git a/src/mem/protocol/MESI_Two_Level-L1cache.sm b/src/mem/protocol/MESI_Two_Level-L1cache.sm
index f4978050d..184f735c7 100644
--- a/src/mem/protocol/MESI_Two_Level-L1cache.sm
+++ b/src/mem/protocol/MESI_Two_Level-L1cache.sm
@@ -164,22 +164,22 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// inclusive cache returns L1 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
if(is_valid(L1Dcache_entry)) {
return L1Dcache_entry;
}
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
return L1Icache_entry;
}
Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
return L1Dcache_entry;
}
Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
return L1Icache_entry;
}
@@ -208,7 +208,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
@@ -225,7 +225,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -236,7 +236,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -305,7 +305,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// cache. We should drop this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
// Check to see if it is in the OTHER L1
@@ -315,7 +315,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
@@ -323,13 +323,13 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// in the L1 so let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
} else {
// Data prefetch
@@ -339,7 +339,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// cache. We should drop this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
// Check to see if it is in the OTHER L1
@@ -349,7 +349,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
@@ -357,13 +357,13 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// the L1 let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -377,7 +377,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
@@ -417,7 +417,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
@@ -450,7 +450,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
if (is_valid(L1Icache_entry)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L1
@@ -458,19 +458,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 so let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
@@ -480,7 +480,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
if (is_valid(L1Dcache_entry)) {
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L1
@@ -488,19 +488,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -809,47 +809,36 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
sequencer.invalidateSC(address);
}
- action(h_load_hit, "hd",
- desc="Notify sequencer the load completed.")
+ action(h_load_hit, "h",
+ desc="If not prefetch, notify sequencer the load completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
- action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
+ action(hx_load_hit, "hx",
+ desc="If not prefetch, notify sequencer the load completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
- {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.readCallback(address, cache_entry.DataBlk, true);
}
- action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
+ action(hh_store_hit, "\h",
+ desc="If not prefetch, notify sequencer that store completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
- action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
+ action(hhx_store_hit, "\hx",
+ desc="If not prefetch, notify sequencer that store completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.writeCallback(address, cache_entry.DataBlk, true);
cache_entry.Dirty := true;
}
@@ -858,7 +847,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.isPrefetch := false;
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
@@ -1091,7 +1080,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
transition({S,E,M}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
diff --git a/src/mem/protocol/MESI_Two_Level-L2cache.sm b/src/mem/protocol/MESI_Two_Level-L2cache.sm
index 739a6f713..e4f719d9f 100644
--- a/src/mem/protocol/MESI_Two_Level-L2cache.sm
+++ b/src/mem/protocol/MESI_Two_Level-L2cache.sm
@@ -157,7 +157,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
// inclusive cache, returns L2 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache.lookup(addr));
+ return static_cast(Entry, "pointer", L2cache[addr]);
}
bool isSharer(Addr addr, MachineID requestor, Entry cache_entry) {
@@ -196,7 +196,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
return L2Cache_State_to_permission(tbe.TBEState);
@@ -213,7 +213,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -224,7 +224,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -288,7 +288,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
if(L1unblockNetwork_in.isReady()) {
peek(L1unblockNetwork_in, ResponseMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
in_msg.Sender, in_msg.Type, in_msg.Destination);
@@ -312,7 +312,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
// test wether it's from a local L1 or an off chip source
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
if(in_msg.Type == CoherenceResponseType:DATA) {
@@ -351,7 +351,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
if(L1RequestL2Network_in.isReady()) {
peek(L1RequestL2Network_in, RequestMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
@@ -376,10 +376,10 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.addr));
if (isDirty(L2cache_entry)) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
- L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
} else {
trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.addr),
- L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
}
}
}
@@ -591,7 +591,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.L1_GetS_IDs.clear();
tbe.DataBlk := cache_entry.DataBlk;
tbe.Dirty := cache_entry.Dirty;
diff --git a/src/mem/protocol/MESI_Two_Level-dir.sm b/src/mem/protocol/MESI_Two_Level-dir.sm
index 6c5c84f2f..22aabee4e 100644
--- a/src/mem/protocol/MESI_Two_Level-dir.sm
+++ b/src/mem/protocol/MESI_Two_Level-dir.sm
@@ -101,7 +101,7 @@ machine(Directory, "MESI Two Level directory protocol")
void wakeUpBuffers(Addr a);
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
@@ -133,7 +133,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
return Directory_State_to_permission(tbe.TBEState);
@@ -149,7 +149,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -160,7 +160,7 @@ machine(Directory, "MESI Two Level directory protocol")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -194,13 +194,13 @@ machine(Directory, "MESI Two Level directory protocol")
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
- trigger(Event:Fetch, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg);
error("Invalid message");
@@ -214,9 +214,9 @@ machine(Directory, "MESI Two Level directory protocol")
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:CleanReplacement, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -230,9 +230,9 @@ machine(Directory, "MESI Two Level directory protocol")
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -390,7 +390,7 @@ machine(Directory, "MESI Two Level directory protocol")
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(requestNetwork_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.addr;
tbe.Len := in_msg.Len;
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
index d247ce663..3380cd7e6 100644
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -152,7 +152,7 @@ machine(L1Cache, "MI Example L1 Cache")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
@@ -172,7 +172,7 @@ machine(L1Cache, "MI Example L1 Cache")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -183,7 +183,7 @@ machine(L1Cache, "MI Example L1 Cache")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -205,7 +205,7 @@ machine(L1Cache, "MI Example L1 Cache")
peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
@@ -231,7 +231,7 @@ machine(L1Cache, "MI Example L1 Cache")
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
@@ -254,11 +254,11 @@ machine(L1Cache, "MI Example L1 Cache")
// make room for the block
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(cacheMemory.cacheProbe(in_msg.LineAddress)));
+ TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
}
else {
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- cache_entry, TBEs.lookup(in_msg.LineAddress));
+ cache_entry, TBEs[in_msg.LineAddress]);
}
}
}
@@ -353,7 +353,6 @@ machine(L1Cache, "MI Example L1 Cache")
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, false);
}
@@ -361,7 +360,6 @@ machine(L1Cache, "MI Example L1 Cache")
peek(responseNetwork_in, ResponseMsg) {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender));
}
@@ -370,7 +368,6 @@ machine(L1Cache, "MI Example L1 Cache")
action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, false);
}
@@ -378,7 +375,6 @@ machine(L1Cache, "MI Example L1 Cache")
peek(responseNetwork_in, ResponseMsg) {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender));
}
@@ -400,7 +396,7 @@ machine(L1Cache, "MI Example L1 Cache")
action(v_allocateTBE, "v", desc="Allocate TBE") {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
}
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm
index c9f6b9be6..a22691bda 100644
--- a/src/mem/protocol/MI_example-dir.sm
+++ b/src/mem/protocol/MI_example-dir.sm
@@ -111,7 +111,7 @@ machine(Directory, "Directory protocol")
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
@@ -155,7 +155,7 @@ machine(Directory, "Directory protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
@@ -174,7 +174,7 @@ machine(Directory, "Directory protocol")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -185,7 +185,7 @@ machine(Directory, "Directory protocol")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -207,7 +207,7 @@ machine(Directory, "Directory protocol")
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
} else if (in_msg.Type == DMARequestType:WRITE) {
@@ -222,7 +222,7 @@ machine(Directory, "Directory protocol")
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.addr, tbe);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
@@ -245,7 +245,7 @@ machine(Directory, "Directory protocol")
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, tbe);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
@@ -403,7 +403,7 @@ machine(Directory, "Directory protocol")
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
@@ -414,7 +414,7 @@ machine(Directory, "Directory protocol")
action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DmaRequestor := in_msg.Requestor;
}
}
@@ -422,7 +422,7 @@ machine(Directory, "Directory protocol")
action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
peek(requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
}
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
index 7a8f35333..8a2eee1e2 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -190,7 +190,7 @@ machine(L1Cache, "Directory protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
@@ -217,7 +217,7 @@ machine(L1Cache, "Directory protocol")
if(is_valid(cache_entry)) {
testAndRead(addr, cache_entry.DataBlk, pkt);
} else {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -236,7 +236,7 @@ machine(L1Cache, "Directory protocol")
return num_functional_writes;
}
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
return num_functional_writes;
@@ -269,7 +269,7 @@ machine(L1Cache, "Directory protocol")
if (useTimerTable_in.isReady()) {
trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
getCacheEntry(useTimerTable.readyAddress()),
- TBEs.lookup(useTimerTable.readyAddress()));
+ TBEs[useTimerTable.readyAddress()]);
}
}
@@ -279,7 +279,7 @@ machine(L1Cache, "Directory protocol")
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -299,29 +299,29 @@ machine(L1Cache, "Directory protocol")
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
trigger(Event:Own_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
trigger(Event:Fwd_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:Fwd_DMA, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Writeback_Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
trigger(Event:Writeback_Ack_Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -335,13 +335,13 @@ machine(L1Cache, "Directory protocol")
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Exclusive_Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -365,7 +365,7 @@ machine(L1Cache, "Directory protocol")
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
@@ -373,19 +373,19 @@ machine(L1Cache, "Directory protocol")
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
@@ -396,7 +396,7 @@ machine(L1Cache, "Directory protocol")
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
@@ -404,19 +404,19 @@ machine(L1Cache, "Directory protocol")
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -635,32 +635,21 @@ machine(L1Cache, "Directory protocol")
}
}
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(h_ifetch_hit, "hi", desc="Notify the sequencer about ifetch completion.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
action(hx_load_hit, "hx", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.readCallback(address, cache_entry.DataBlk, true);
}
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
@@ -668,8 +657,6 @@ machine(L1Cache, "Directory protocol")
action(xx_store_hit, "\xx", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.writeCallback(address, cache_entry.DataBlk, true);
cache_entry.Dirty := true;
}
@@ -677,7 +664,7 @@ machine(L1Cache, "Directory protocol")
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
assert(is_valid(cache_entry));
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
tbe.Dirty := cache_entry.Dirty;
@@ -977,7 +964,7 @@ machine(L1Cache, "Directory protocol")
}
transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
index e1d665292..38c6e9f9b 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
@@ -232,7 +232,7 @@ machine(L2Cache, "Token protocol")
void unset_tbe();
Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache.lookup(address));
+ return static_cast(Entry, "pointer", L2cache[address]);
}
bool isDirTagPresent(Addr addr) {
@@ -519,7 +519,7 @@ machine(L2Cache, "Token protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
return L2Cache_State_to_permission(tbe.TBEState);
@@ -542,7 +542,7 @@ machine(L2Cache, "Token protocol")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -553,7 +553,7 @@ machine(L2Cache, "Token protocol")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -582,7 +582,7 @@ machine(L2Cache, "Token protocol")
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -598,26 +598,26 @@ machine(L2Cache, "Token protocol")
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
trigger(Event:Fwd_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if(in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:Fwd_DMA, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Writeback_Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -631,25 +631,25 @@ machine(L2Cache, "Token protocol")
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:L1_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:L1_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
trigger(Event:L1_PUTO, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
trigger(Event:L1_PUTX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTS) {
Entry cache_entry := getCacheEntry(in_msg.addr);
if (isOnlySharer(cache_entry, in_msg.addr, in_msg.Requestor)) {
trigger(Event:L1_PUTS_only, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
else {
trigger(Event:L1_PUTS, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
} else {
error("Unexpected message");
@@ -667,35 +667,35 @@ machine(L2Cache, "Token protocol")
if (in_msg.Type == CoherenceResponseType:ACK) {
if (in_msg.SenderMachine == MachineType:L2Cache) {
trigger(Event:ExtAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
else {
trigger(Event:IntAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
trigger(Event:Unblock, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
Entry cache_entry := getCacheEntry(in_msg.addr);
if (is_invalid(cache_entry) &&
L2cache.cacheAvail(in_msg.addr) == false) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
- TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ TBEs[L2cache.cacheProbe(in_msg.addr)]);
}
else {
trigger(Event:L1_WBDIRTYDATA, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA) {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -703,15 +703,15 @@ machine(L2Cache, "Token protocol")
L2cache.cacheAvail(in_msg.addr) == false) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
- TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ TBEs[L2cache.cacheProbe(in_msg.addr)]);
}
else {
trigger(Event:L1_WBCLEANDATA, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DmaAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -1223,7 +1223,7 @@ machine(L2Cache, "Token protocol")
action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
if(is_valid(cache_entry)) {
tbe.DataBlk := cache_entry.DataBlk;
tbe.Dirty := cache_entry.Dirty;
diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm
index ba58a6e9a..dcd37cc33 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm
@@ -122,7 +122,7 @@ machine(Directory, "Directory protocol")
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
@@ -234,26 +234,26 @@ machine(Directory, "Directory protocol")
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
trigger(Event:Last_Unblock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
trigger(Event:Unblock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
trigger(Event:Dirty_Writeback, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
trigger(Event:Clean_Writeback, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_ACK, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
error("Invalid message");
}
@@ -265,21 +265,21 @@ machine(Directory, "Directory protocol")
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
- trigger(Event:PUTX, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
- trigger(Event:PUTO, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
- trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else {
error("Invalid message");
}
@@ -292,9 +292,9 @@ machine(Directory, "Directory protocol")
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -540,7 +540,7 @@ machine(Directory, "Directory protocol")
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
peek (requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.PhysicalAddress := in_msg.addr;
tbe.Len := in_msg.Len;
tbe.DataBlk := in_msg.DataBlk;
diff --git a/src/mem/protocol/MOESI_CMP_directory-dma.sm b/src/mem/protocol/MOESI_CMP_directory-dma.sm
index 75c621243..e9931f25b 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dma.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dma.sm
@@ -108,10 +108,10 @@ machine(DMA, "DMA Controller")
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
error("Invalid request type");
}
@@ -124,14 +124,14 @@ machine(DMA, "DMA Controller")
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else {
error("Invalid response type");
}
@@ -144,7 +144,7 @@ machine(DMA, "DMA Controller")
if (triggerQueue_in.isReady()) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
- trigger(Event:All_Acks, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -240,7 +240,7 @@ machine(DMA, "DMA Controller")
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
}
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
index 1d47f1c8a..af6e4c0d5 100644
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -366,7 +366,7 @@ machine(L1Cache, "Token protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := L1_TBEs.lookup(addr);
+ TBE tbe := L1_TBEs[addr];
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
@@ -459,7 +459,7 @@ machine(L1Cache, "Token protocol")
// Use Timer
in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
if (useTimerTable_in.isReady()) {
- TBE tbe := L1_TBEs.lookup(useTimerTable.readyAddress());
+ TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
(persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
@@ -487,7 +487,7 @@ machine(L1Cache, "Token protocol")
if (reissueTimerTable_in.isReady()) {
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
getCacheEntry(reissueTimerTable.readyAddress()),
- L1_TBEs.lookup(reissueTimerTable.readyAddress()));
+ L1_TBEs[reissueTimerTable.readyAddress()]);
}
}
@@ -510,7 +510,7 @@ machine(L1Cache, "Token protocol")
// React to the message based on the current state of the table
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs.lookup(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
if (persistentTable.isLocked(in_msg.addr)) {
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
@@ -548,7 +548,7 @@ machine(L1Cache, "Token protocol")
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs.lookup(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
// Mark TBE flag if response received off-chip. Use this to update average latency estimate
if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
@@ -559,7 +559,7 @@ machine(L1Cache, "Token protocol")
// came from an off-chip L2 cache
if (is_valid(tbe)) {
- // L1_TBEs.lookup(in_msg.addr).ExternalResponse := true;
+ // L1_TBEs[in_msg.addr].ExternalResponse := true;
// profile_offchipL2_response(in_msg.addr);
}
}
@@ -619,7 +619,7 @@ machine(L1Cache, "Token protocol")
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs.lookup(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETX) {
if (in_msg.isLocal) {
@@ -665,7 +665,7 @@ machine(L1Cache, "Token protocol")
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
- TBE tbe := L1_TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := L1_TBEs[in_msg.LineAddress];
if (in_msg.Type == RubyRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
@@ -695,7 +695,7 @@ machine(L1Cache, "Token protocol")
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- L1_TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
@@ -726,7 +726,7 @@ machine(L1Cache, "Token protocol")
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- L1_TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -1284,22 +1284,12 @@ machine(L1Cache, "Token protocol")
}
}
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
- address, cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false,
- MachineType:L1Cache);
- }
-
- action(h_ifetch_hit, "hi", desc="Notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, false,
MachineType:L1Cache);
}
@@ -1309,8 +1299,6 @@ machine(L1Cache, "Token protocol")
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
peek(responseNetwork_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.readCallback(address, cache_entry.DataBlk,
isExternalHit(address, in_msg.Sender),
machineIDToMachineType(in_msg.Sender));
@@ -1322,7 +1310,6 @@ machine(L1Cache, "Token protocol")
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, false,
MachineType:L1Cache);
cache_entry.Dirty := true;
@@ -1334,8 +1321,6 @@ machine(L1Cache, "Token protocol")
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
peek(responseNetwork_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.writeCallback(address, cache_entry.DataBlk,
isExternalHit(address, in_msg.Sender),
machineIDToMachineType(in_msg.Sender));
@@ -1347,7 +1332,7 @@ machine(L1Cache, "Token protocol")
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(L1_TBEs);
L1_TBEs.allocate(address);
- set_tbe(L1_TBEs.lookup(address));
+ set_tbe(L1_TBEs[address]);
tbe.IssueCount := 0;
peek(mandatoryQueue_in, RubyRequest) {
tbe.PC := in_msg.ProgramCounter;
@@ -1717,7 +1702,7 @@ machine(L1Cache, "Token protocol")
}
transition({S, SM, S_L, SM_L}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
@@ -1799,7 +1784,7 @@ machine(L1Cache, "Token protocol")
// Transitions from Owned
transition({O, OM}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
@@ -1889,7 +1874,7 @@ machine(L1Cache, "Token protocol")
// Transitions from Modified
transition({MM, MM_W}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
@@ -1964,7 +1949,7 @@ machine(L1Cache, "Token protocol")
// Transitions from Dirty Exclusive
transition({M, M_W}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm
index fd6a62ef2..fdef75181 100644
--- a/src/mem/protocol/MOESI_CMP_token-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dir.sm
@@ -175,7 +175,7 @@ machine(Directory, "Token protocol")
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
@@ -218,7 +218,7 @@ machine(Directory, "Token protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
@@ -245,7 +245,7 @@ machine(Directory, "Token protocol")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -256,7 +256,7 @@ machine(Directory, "Token protocol")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -280,9 +280,9 @@ machine(Directory, "Token protocol")
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -295,7 +295,7 @@ machine(Directory, "Token protocol")
in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
if (reissueTimerTable_in.isReady()) {
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
- TBEs.lookup(reissueTimerTable.readyAddress()));
+ TBEs[reissueTimerTable.readyAddress()]);
}
}
@@ -307,13 +307,13 @@ machine(Directory, "Token protocol")
if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
trigger(Event:Data_All_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack_All_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -321,14 +321,14 @@ machine(Directory, "Token protocol")
} else {
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
trigger(Event:Data_Owner, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if ((in_msg.Type == CoherenceResponseType:ACK) ||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
trigger(Event:Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -360,38 +360,38 @@ machine(Directory, "Token protocol")
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
}
} else {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
}
} else {
// unlocked
- trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
}
}
else {
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
}
} else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
// unlocked
- trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
} else {
error("Invalid message");
}
@@ -405,9 +405,9 @@ machine(Directory, "Token protocol")
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
} else {
error("Invalid message");
}
@@ -419,14 +419,14 @@ machine(Directory, "Token protocol")
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
if (in_msg.Type == DMARequestType:READ) {
- trigger(Event:DMA_READ, in_msg.LineAddress, TBEs.lookup(in_msg.LineAddress));
+ trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
} else if (in_msg.Type == DMARequestType:WRITE) {
if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
trigger(Event:DMA_WRITE, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
}
} else {
error("Invalid message");
@@ -691,7 +691,7 @@ machine(Directory, "Token protocol")
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
diff --git a/src/mem/protocol/MOESI_hammer-cache.sm b/src/mem/protocol/MOESI_hammer-cache.sm
index 269e47dfd..d5539e021 100644
--- a/src/mem/protocol/MOESI_hammer-cache.sm
+++ b/src/mem/protocol/MOESI_hammer-cache.sm
@@ -210,7 +210,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
if(is_valid(cache_entry)) {
testAndRead(addr, cache_entry.DataBlk, pkt);
} else {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -229,7 +229,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
return num_functional_writes;
}
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
return num_functional_writes;
@@ -274,7 +274,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
@@ -337,7 +337,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
peek(triggerQueue_in, TriggerMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == TriggerType:L2_to_L1) {
trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
@@ -360,7 +360,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
peek(responseToCache_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
@@ -385,7 +385,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
peek(forwardToCache_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if ((in_msg.Type == CoherenceRequestType:GETX) ||
(in_msg.Type == CoherenceRequestType:GETF)) {
@@ -429,7 +429,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == RubyRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
@@ -452,7 +452,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
@@ -477,14 +477,14 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
trigger(Event:L1_to_L2,
l1i_victim_addr,
getL1ICacheEntry(l1i_victim_addr),
- TBEs.lookup(l1i_victim_addr));
+ TBEs[l1i_victim_addr]);
} else {
Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
// The L2 does not have room, so we replace a line from the L2
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
}
@@ -510,7 +510,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
@@ -534,14 +534,14 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
trigger(Event:L1_to_L2,
l1d_victim_addr,
getL1DCacheEntry(l1d_victim_addr),
- TBEs.lookup(l1d_victim_addr));
+ TBEs[l1d_victim_addr]);
} else {
Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
// The L2 does not have room, so we replace a line from the L2
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
}
@@ -857,18 +857,9 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
}
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false,
- testAndClearLocalHit(cache_entry));
- }
-
- action(h_ifetch_hit, "hi", desc="Notify sequencer the ifetch completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, false,
testAndClearLocalHit(cache_entry));
}
@@ -878,8 +869,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
assert(is_valid(tbe));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
peek(responseToCache_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
+
sequencer.readCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
tbe.ForwardRequestTime, tbe.FirstResponseTime);
@@ -890,7 +880,6 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
peek(mandatoryQueue_in, RubyRequest) {
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, false,
testAndClearLocalHit(cache_entry));
@@ -912,8 +901,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
assert(is_valid(tbe));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
peek(responseToCache_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
+
sequencer.writeCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
tbe.ForwardRequestTime, tbe.FirstResponseTime);
@@ -926,8 +914,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
assert(is_valid(cache_entry));
assert(is_valid(tbe));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
+
sequencer.writeCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
tbe.ForwardRequestTime, tbe.FirstResponseTime);
@@ -939,7 +926,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
tbe.Dirty := cache_entry.Dirty;
tbe.Sharers := false;
@@ -948,7 +935,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
action(it_allocateTBE, "it", desc="Allocate TBE") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.Dirty := false;
tbe.Sharers := false;
}
@@ -1521,7 +1508,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition({S, SM, ISM}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstHit;
k_popMandatoryQueue;
}
@@ -1535,7 +1522,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition(SR, Ifetch, S) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
@@ -1583,7 +1570,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstHit;
k_popMandatoryQueue;
}
@@ -1597,7 +1584,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition(OR, Ifetch, O) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
@@ -1648,7 +1635,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
// Transitions from Modified
transition({MM, M}, {Ifetch}) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstHit;
k_popMandatoryQueue;
}
@@ -1674,7 +1661,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition(MMR, Ifetch, MM) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
@@ -1755,7 +1742,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition(MR, Ifetch, M) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
diff --git a/src/mem/protocol/MOESI_hammer-dir.sm b/src/mem/protocol/MOESI_hammer-dir.sm
index b78d40510..27794a3bd 100644
--- a/src/mem/protocol/MOESI_hammer-dir.sm
+++ b/src/mem/protocol/MOESI_hammer-dir.sm
@@ -195,7 +195,7 @@ machine(Directory, "AMD Hammer-like protocol")
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
@@ -250,7 +250,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
@@ -267,7 +267,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -278,7 +278,7 @@ machine(Directory, "AMD Hammer-like protocol")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -317,7 +317,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (triggerQueue_in.isReady()) {
peek(triggerQueue_in, TriggerMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks_and_owner_data, in_msg.addr,
pf_entry, tbe);
@@ -341,7 +341,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (unblockNetwork_in.isReady()) {
peek(unblockNetwork_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
trigger(Event:Unblock, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
@@ -370,7 +370,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (responseToDir_in.isReady()) {
peek(responseToDir_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
@@ -393,7 +393,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
@@ -410,7 +410,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:PUT) {
trigger(Event:PUT, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:PUTF) {
@@ -428,7 +428,7 @@ machine(Directory, "AMD Hammer-like protocol")
trigger(Event:Pf_Replacement,
probeFilter.cacheProbe(in_msg.addr),
getProbeFilterEntry(probeFilter.cacheProbe(in_msg.addr)),
- TBEs.lookup(probeFilter.cacheProbe(in_msg.addr)));
+ TBEs[probeFilter.cacheProbe(in_msg.addr)]);
}
}
} else {
@@ -444,7 +444,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
} else if (in_msg.Type == DMARequestType:WRITE) {
@@ -567,7 +567,7 @@ machine(Directory, "AMD Hammer-like protocol")
check_allocate(TBEs);
peek(requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.PhysicalAddress := address;
tbe.ResponseType := CoherenceResponseType:NULL;
}
@@ -577,7 +577,7 @@ machine(Directory, "AMD Hammer-like protocol")
check_allocate(TBEs);
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DmaDataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
index 63f4b90ea..d032adfd8 100644
--- a/src/mem/protocol/RubySlicc_Types.sm
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -50,10 +50,7 @@ structure(InPort, external = "yes", primitive="yes") {
}
external_type(NodeID, default="0", primitive="yes");
-structure (MachineID, external = "yes", non_obj="yes") {
- MachineType getType();
- NodeID getNum();
-}
+external_type(MachineID);
structure (Set, external = "yes", non_obj="yes") {
void setSize(int);
@@ -159,7 +156,6 @@ structure (CacheMemory, external = "yes") {
Cycles getTagLatency();
Cycles getDataLatency();
void setMRU(Addr);
- void setMRU(AbstractCacheEntry);
void recordRequestType(CacheRequestType, Addr);
bool checkResourceAvailable(CacheResourceType, Addr);
diff --git a/src/mem/ruby/common/DataBlock.hh b/src/mem/ruby/common/DataBlock.hh
index 129a88e25..ac08fac82 100644
--- a/src/mem/ruby/common/DataBlock.hh
+++ b/src/mem/ruby/common/DataBlock.hh
@@ -67,8 +67,6 @@ class DataBlock
private:
void alloc();
uint8_t *m_data;
- //! true if this DataBlock is responsible for deleting m_data,
- //! false otherwise.
bool m_alloc;
};
diff --git a/src/mem/ruby/common/Histogram.cc b/src/mem/ruby/common/Histogram.cc
index 31de160cf..e377bc253 100644
--- a/src/mem/ruby/common/Histogram.cc
+++ b/src/mem/ruby/common/Histogram.cc
@@ -84,7 +84,7 @@ Histogram::doubleBinSize()
}
void
-Histogram::add(int64_t value)
+Histogram::add(int64 value)
{
assert(value >= 0);
m_max = max(m_max, value);
diff --git a/src/mem/ruby/common/Histogram.hh b/src/mem/ruby/common/Histogram.hh
index f02c4bedd..c34e39af1 100644
--- a/src/mem/ruby/common/Histogram.hh
+++ b/src/mem/ruby/common/Histogram.hh
@@ -40,7 +40,7 @@ class Histogram
Histogram(int binsize = 1, uint32_t bins = 50);
~Histogram();
- void add(int64_t value);
+ void add(int64 value);
void add(Histogram& hist);
void doubleBinSize();
@@ -51,10 +51,10 @@ class Histogram
uint64_t size() const { return m_count; }
uint32_t getBins() const { return m_data.size(); }
int getBinSize() const { return m_binsize; }
- int64_t getTotal() const { return m_sumSamples; }
+ int64 getTotal() const { return m_sumSamples; }
uint64_t getSquaredTotal() const { return m_sumSquaredSamples; }
uint64_t getData(int index) const { return m_data[index]; }
- int64_t getMax() const { return m_max; }
+ int64 getMax() const { return m_max; }
void printWithMultiplier(std::ostream& out, double multiplier) const;
void printPercent(std::ostream& out) const;
@@ -62,12 +62,12 @@ class Histogram
private:
std::vector<uint64_t> m_data;
- int64_t m_max; // the maximum value seen so far
+ int64 m_max; // the maximum value seen so far
uint64_t m_count; // the number of elements added
int m_binsize; // the size of each bucket
uint32_t m_largest_bin; // the largest bin used
- int64_t m_sumSamples; // the sum of all samples
+ int64 m_sumSamples; // the sum of all samples
uint64_t m_sumSquaredSamples; // the sum of the square of all samples
double getStandardDeviation() const;
diff --git a/src/mem/ruby/common/SubBlock.cc b/src/mem/ruby/common/SubBlock.cc
index f1839df72..5175cb950 100644
--- a/src/mem/ruby/common/SubBlock.cc
+++ b/src/mem/ruby/common/SubBlock.cc
@@ -41,7 +41,7 @@ SubBlock::SubBlock(Addr addr, int size)
}
void
-SubBlock::mergeFrom(const DataBlock& data)
+SubBlock::internalMergeFrom(const DataBlock& data)
{
int size = getSize();
assert(size > 0);
@@ -52,7 +52,7 @@ SubBlock::mergeFrom(const DataBlock& data)
}
void
-SubBlock::mergeTo(DataBlock& data) const
+SubBlock::internalMergeTo(DataBlock& data) const
{
int size = getSize();
assert(size > 0);
@@ -68,3 +68,6 @@ SubBlock::print(std::ostream& out) const
{
out << "[" << m_address << ", " << getSize() << ", " << m_data << "]";
}
+
+
+
diff --git a/src/mem/ruby/common/SubBlock.hh b/src/mem/ruby/common/SubBlock.hh
index f336328fa..ad1d68ae1 100644
--- a/src/mem/ruby/common/SubBlock.hh
+++ b/src/mem/ruby/common/SubBlock.hh
@@ -56,12 +56,15 @@ class SubBlock
// Merging to and from DataBlocks - We only need to worry about
// updates when we are using DataBlocks
- void mergeTo(DataBlock& data) const;
- void mergeFrom(const DataBlock& data);
+ void mergeTo(DataBlock& data) const { internalMergeTo(data); }
+ void mergeFrom(const DataBlock& data) { internalMergeFrom(data); }
void print(std::ostream& out) const;
private:
+ void internalMergeTo(DataBlock& data) const;
+ void internalMergeFrom(const DataBlock& data);
+
// Data Members (m_ prefix)
Addr m_address;
std::vector<uint8_t> m_data;
diff --git a/src/mem/ruby/common/TypeDefines.hh b/src/mem/ruby/common/TypeDefines.hh
index f29efe8b5..203b63779 100644
--- a/src/mem/ruby/common/TypeDefines.hh
+++ b/src/mem/ruby/common/TypeDefines.hh
@@ -30,6 +30,9 @@
#ifndef TYPEDEFINES_H
#define TYPEDEFINES_H
+typedef unsigned long long uint64;
+typedef long long int64;
+
typedef unsigned int LinkID;
typedef unsigned int NodeID;
typedef unsigned int SwitchID;
diff --git a/src/mem/ruby/filters/H3BloomFilter.cc b/src/mem/ruby/filters/H3BloomFilter.cc
index b0d277782..21b9152be 100644
--- a/src/mem/ruby/filters/H3BloomFilter.cc
+++ b/src/mem/ruby/filters/H3BloomFilter.cc
@@ -507,8 +507,8 @@ H3BloomFilter::print(ostream& out) const
int
H3BloomFilter::get_index(Addr addr, int i)
{
- uint64_t x = makeLineAddress(addr);
- // uint64_t y = (x*mults_list[i] + adds_list[i]) % primes_list[i];
+ uint64 x = makeLineAddress(addr);
+ // uint64 y = (x*mults_list[i] + adds_list[i]) % primes_list[i];
int y = hash_H3(x,i);
if (isParallel) {
@@ -519,10 +519,10 @@ H3BloomFilter::get_index(Addr addr, int i)
}
int
-H3BloomFilter::hash_H3(uint64_t value, int index)
+H3BloomFilter::hash_H3(uint64 value, int index)
{
- uint64_t mask = 1;
- uint64_t val = value;
+ uint64 mask = 1;
+ uint64 val = value;
int result = 0;
for (int i = 0; i < 64; i++) {
diff --git a/src/mem/ruby/filters/H3BloomFilter.hh b/src/mem/ruby/filters/H3BloomFilter.hh
index b6628f5e1..8596d6acb 100644
--- a/src/mem/ruby/filters/H3BloomFilter.hh
+++ b/src/mem/ruby/filters/H3BloomFilter.hh
@@ -68,7 +68,7 @@ class H3BloomFilter : public AbstractBloomFilter
private:
int get_index(Addr addr, int hashNumber);
- int hash_H3(uint64_t value, int index);
+ int hash_H3(uint64 value, int index);
std::vector<int> m_filter;
int m_filter_size;
diff --git a/src/mem/ruby/filters/MultiBitSelBloomFilter.cc b/src/mem/ruby/filters/MultiBitSelBloomFilter.cc
index f326030e9..3cdca7e3b 100644
--- a/src/mem/ruby/filters/MultiBitSelBloomFilter.cc
+++ b/src/mem/ruby/filters/MultiBitSelBloomFilter.cc
@@ -171,7 +171,7 @@ MultiBitSelBloomFilter::get_index(Addr addr, int i)
// m_skip_bits is used to perform BitSelect after skipping some
// bits. Used to simulate BitSel hashing on larger than cache-line
// granularities
- uint64_t x = (makeLineAddress(addr) >> m_skip_bits);
+ uint64 x = (makeLineAddress(addr) >> m_skip_bits);
int y = hash_bitsel(x, i, m_num_hashes, 30, m_filter_size_bits);
//36-bit addresses, 6-bit cache lines
@@ -183,10 +183,10 @@ MultiBitSelBloomFilter::get_index(Addr addr, int i)
}
int
-MultiBitSelBloomFilter::hash_bitsel(uint64_t value, int index, int jump,
+MultiBitSelBloomFilter::hash_bitsel(uint64 value, int index, int jump,
int maxBits, int numBits)
{
- uint64_t mask = 1;
+ uint64 mask = 1;
int result = 0;
int bit, i;
diff --git a/src/mem/ruby/filters/MultiBitSelBloomFilter.hh b/src/mem/ruby/filters/MultiBitSelBloomFilter.hh
index b4fac0671..e43dcd6f1 100644
--- a/src/mem/ruby/filters/MultiBitSelBloomFilter.hh
+++ b/src/mem/ruby/filters/MultiBitSelBloomFilter.hh
@@ -68,7 +68,7 @@ class MultiBitSelBloomFilter : public AbstractBloomFilter
private:
int get_index(Addr addr, int hashNumber);
- int hash_bitsel(uint64_t value, int index, int jump, int maxBits,
+ int hash_bitsel(uint64 value, int index, int jump, int maxBits,
int numBits);
std::vector<int> m_filter;
diff --git a/src/mem/ruby/network/MessageBuffer.cc b/src/mem/ruby/network/MessageBuffer.cc
index e9c575028..a72d8509e 100644
--- a/src/mem/ruby/network/MessageBuffer.cc
+++ b/src/mem/ruby/network/MessageBuffer.cc
@@ -362,6 +362,32 @@ MessageBuffer::isReady() const
(m_prio_heap.front()->getLastEnqueueTime() <= m_receiver->clockEdge()));
}
+bool
+MessageBuffer::functionalRead(Packet *pkt)
+{
+ // Check the priority heap and read any messages that may
+ // correspond to the address in the packet.
+ for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
+ Message *msg = m_prio_heap[i].get();
+ if (msg->functionalRead(pkt)) return true;
+ }
+
+ // Read the messages in the stall queue that correspond
+ // to the address in the packet.
+ for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
+ map_iter != m_stall_msg_map.end();
+ ++map_iter) {
+
+ for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
+ it != (map_iter->second).end(); ++it) {
+
+ Message *msg = (*it).get();
+ if (msg->functionalRead(pkt)) return true;
+ }
+ }
+ return false;
+}
+
uint32_t
MessageBuffer::functionalWrite(Packet *pkt)
{
diff --git a/src/mem/ruby/network/MessageBuffer.hh b/src/mem/ruby/network/MessageBuffer.hh
index 2625acabd..732b7ec6c 100644
--- a/src/mem/ruby/network/MessageBuffer.hh
+++ b/src/mem/ruby/network/MessageBuffer.hh
@@ -136,6 +136,11 @@ class MessageBuffer : public SimObject
void setIncomingLink(int link_id) { m_input_link_id = link_id; }
void setVnet(int net) { m_vnet_id = net; }
+ // Function for figuring out if any of the messages in the buffer can
+ // satisfy the read request for the address in the packet.
+ // Return value, if true, indicates that the request was fulfilled.
+ bool functionalRead(Packet *pkt);
+
// Function for figuring out if any of the messages in the buffer need
// to be updated with the data from the packet.
// Return value indicates the number of messages that were updated.
@@ -179,7 +184,7 @@ class MessageBuffer : public SimObject
int m_not_avail_count; // count the # of times I didn't have N
// slots available
- uint64_t m_msg_counter;
+ uint64 m_msg_counter;
int m_priority_rank;
const bool m_strict_fifo;
const bool m_randomization;
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
index f72cea5a8..d834ea1a3 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
@@ -281,7 +281,7 @@ NetworkInterface::wakeup()
int vnet = t_flit->get_vnet();
m_net_ptr->increment_received_flits(vnet);
- Cycles network_delay = curCycle() - t_flit->get_creation_time();
+ Cycles network_delay = curCycle() - t_flit->get_enqueue_time();
Cycles queueing_delay = t_flit->get_delay();
m_net_ptr->increment_network_latency(network_delay, vnet);
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/flit.cc b/src/mem/ruby/network/garnet/flexible-pipeline/flit.cc
index aaf19b3b5..7cf68560f 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/flit.cc
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/flit.cc
@@ -31,10 +31,14 @@
#include "mem/ruby/network/garnet/flexible-pipeline/flit.hh"
flit::flit(int id, int vc, int vnet, int size, MsgPtr msg_ptr, Cycles curTime)
- : m_id(id), m_vnet(vnet), m_vc(vc), m_size(size), m_creation_time(curTime)
{
+ m_size = size;
m_msg_ptr = msg_ptr;
+ m_enqueue_time = curTime;
m_time = curTime;
+ m_id = id;
+ m_vnet = vnet;
+ m_vc = vc;
if (size == 1) {
m_type = HEAD_TAIL_;
@@ -48,6 +52,78 @@ flit::flit(int id, int vc, int vnet, int size, MsgPtr msg_ptr, Cycles curTime)
m_type = BODY_;
}
+int
+flit::get_size()
+{
+ return m_size;
+}
+
+int
+flit::get_id()
+{
+ return m_id;
+}
+
+Cycles
+flit::get_time()
+{
+ return m_time;
+}
+
+Cycles
+flit::get_enqueue_time()
+{
+ return m_enqueue_time;
+}
+
+void
+flit::set_time(Cycles time)
+{
+ m_time = time;
+}
+
+int
+flit::get_vnet()
+{
+ return m_vnet;
+}
+
+int
+flit::get_vc()
+{
+ return m_vc;
+}
+
+void
+flit::set_vc(int vc)
+{
+ m_vc = vc;
+}
+
+MsgPtr&
+flit::get_msg_ptr()
+{
+ return m_msg_ptr;
+}
+
+flit_type
+flit::get_type()
+{
+ return m_type;
+}
+
+void
+flit::set_delay(Cycles delay)
+{
+ src_delay = delay;
+}
+
+Cycles
+flit::get_delay()
+{
+ return src_delay;
+}
+
void
flit::print(std::ostream& out) const
{
@@ -56,7 +132,7 @@ flit::print(std::ostream& out) const
out << "Type=" << m_type << " ";
out << "Vnet=" << m_vnet << " ";
out << "VC=" << m_vc << " ";
- out << "Creation Time=" << m_creation_time << " ";
+ out << "Enqueue Time=" << m_enqueue_time << " ";
out << "]";
}
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/flit.hh b/src/mem/ruby/network/garnet/flexible-pipeline/flit.hh
index 4049a9212..ff4afbc08 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/flit.hh
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/flit.hh
@@ -43,18 +43,18 @@ class flit
public:
flit(int id, int vc, int vnet, int size, MsgPtr msg_ptr, Cycles curTime);
- int get_size() const { return m_size; }
- int get_id() const { return m_id; }
- Cycles get_time() const { return m_time; }
- Cycles get_creation_time() const { return m_creation_time; }
- void set_time(Cycles time) { m_time = time; }
- int get_vnet() const { return m_vnet; }
- int get_vc() const { return m_vc; }
- void set_vc(int vc) { m_vc = vc; }
- MsgPtr& get_msg_ptr() { return m_msg_ptr; }
- flit_type get_type() const { return m_type; }
- void set_delay(Cycles delay) { src_delay = delay; }
- Cycles get_delay() const { return src_delay; }
+ int get_size();
+ int get_id();
+ Cycles get_time();
+ Cycles get_enqueue_time();
+ void set_time(Cycles time);
+ int get_vnet();
+ int get_vc();
+ void set_vc(int vc);
+ MsgPtr& get_msg_ptr();
+ flit_type get_type();
+ void set_delay(Cycles delay);
+ Cycles get_delay();
void print(std::ostream& out) const;
static bool
@@ -71,12 +71,11 @@ class flit
bool functionalWrite(Packet *pkt);
private:
- const int m_id;
- const int m_vnet;
+ int m_id;
+ int m_vnet;
int m_vc;
- const int m_size;
- const Cycles m_creation_time;
- Cycles m_time;
+ int m_size;
+ Cycles m_enqueue_time, m_time;
flit_type m_type;
MsgPtr m_msg_ptr;
Cycles src_delay;
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc
index 697357ccb..de038d211 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -49,8 +49,9 @@ operator<(const LinkOrder& l1, const LinkOrder& l2)
}
PerfectSwitch::PerfectSwitch(SwitchID sid, Switch *sw, uint32_t virt_nets)
- : Consumer(sw), m_switch_id(sid), m_switch(sw)
+ : Consumer(sw)
{
+ m_switch_id = sid;
m_round_robin_start = 0;
m_wakeups_wo_switch = 0;
m_virtual_networks = virt_nets;
@@ -103,6 +104,9 @@ PerfectSwitch::~PerfectSwitch()
void
PerfectSwitch::operateVnet(int vnet)
{
+ MsgPtr msg_ptr;
+ Message *net_msg_ptr = NULL;
+
// This is for round-robin scheduling
int incoming = m_round_robin_start;
m_round_robin_start++;
@@ -119,6 +123,10 @@ PerfectSwitch::operateVnet(int vnet)
incoming = 0;
}
+ // temporary vectors to store the routing results
+ vector<LinkID> output_links;
+ vector<NetDest> output_link_destinations;
+
// Is there a message waiting?
if (m_in[incoming].size() <= vnet) {
continue;
@@ -129,151 +137,138 @@ PerfectSwitch::operateVnet(int vnet)
continue;
}
- operateMessageBuffer(buffer, incoming, vnet);
- }
- }
-}
-
-void
-PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
- int vnet)
-{
- MsgPtr msg_ptr;
- Message *net_msg_ptr = NULL;
-
- // temporary vectors to store the routing results
- vector<LinkID> output_links;
- vector<NetDest> output_link_destinations;
-
- while (buffer->isReady()) {
- DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
-
- // Peek at message
- msg_ptr = buffer->peekMsgPtr();
- net_msg_ptr = msg_ptr.get();
- DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
-
- output_links.clear();
- output_link_destinations.clear();
- NetDest msg_dsts = net_msg_ptr->getDestination();
-
- // Unfortunately, the token-protocol sends some
- // zero-destination messages, so this assert isn't valid
- // assert(msg_dsts.count() > 0);
-
- assert(m_link_order.size() == m_routing_table.size());
- assert(m_link_order.size() == m_out.size());
-
- if (m_network_ptr->getAdaptiveRouting()) {
- if (m_network_ptr->isVNetOrdered(vnet)) {
- // Don't adaptively route
- for (int out = 0; out < m_out.size(); out++) {
- m_link_order[out].m_link = out;
- m_link_order[out].m_value = 0;
- }
- } else {
- // Find how clogged each link is
- for (int out = 0; out < m_out.size(); out++) {
- int out_queue_length = 0;
- for (int v = 0; v < m_virtual_networks; v++) {
- out_queue_length += m_out[out][v]->getSize();
+ while (buffer->isReady()) {
+ DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
+
+ // Peek at message
+ msg_ptr = buffer->peekMsgPtr();
+ net_msg_ptr = msg_ptr.get();
+ DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
+
+ output_links.clear();
+ output_link_destinations.clear();
+ NetDest msg_dsts = net_msg_ptr->getDestination();
+
+ // Unfortunately, the token-protocol sends some
+ // zero-destination messages, so this assert isn't valid
+ // assert(msg_dsts.count() > 0);
+
+ assert(m_link_order.size() == m_routing_table.size());
+ assert(m_link_order.size() == m_out.size());
+
+ if (m_network_ptr->getAdaptiveRouting()) {
+ if (m_network_ptr->isVNetOrdered(vnet)) {
+ // Don't adaptively route
+ for (int out = 0; out < m_out.size(); out++) {
+ m_link_order[out].m_link = out;
+ m_link_order[out].m_value = 0;
+ }
+ } else {
+ // Find how clogged each link is
+ for (int out = 0; out < m_out.size(); out++) {
+ int out_queue_length = 0;
+ for (int v = 0; v < m_virtual_networks; v++) {
+ out_queue_length += m_out[out][v]->getSize();
+ }
+ int value =
+ (out_queue_length << 8) |
+ random_mt.random(0, 0xff);
+ m_link_order[out].m_link = out;
+ m_link_order[out].m_value = value;
+ }
+
+ // Look at the most empty link first
+ sort(m_link_order.begin(), m_link_order.end());
}
- int value =
- (out_queue_length << 8) |
- random_mt.random(0, 0xff);
- m_link_order[out].m_link = out;
- m_link_order[out].m_value = value;
}
- // Look at the most empty link first
- sort(m_link_order.begin(), m_link_order.end());
- }
- }
+ for (int i = 0; i < m_routing_table.size(); i++) {
+ // pick the next link to look at
+ int link = m_link_order[i].m_link;
+ NetDest dst = m_routing_table[link];
+ DPRINTF(RubyNetwork, "dst: %s\n", dst);
- for (int i = 0; i < m_routing_table.size(); i++) {
- // pick the next link to look at
- int link = m_link_order[i].m_link;
- NetDest dst = m_routing_table[link];
- DPRINTF(RubyNetwork, "dst: %s\n", dst);
+ if (!msg_dsts.intersectionIsNotEmpty(dst))
+ continue;
- if (!msg_dsts.intersectionIsNotEmpty(dst))
- continue;
+ // Remember what link we're using
+ output_links.push_back(link);
- // Remember what link we're using
- output_links.push_back(link);
+ // Need to remember which destinations need this message in
+ // another vector. This Set is the intersection of the
+ // routing_table entry and the current destination set. The
+ // intersection must not be empty, since we are inside "if"
+ output_link_destinations.push_back(msg_dsts.AND(dst));
- // Need to remember which destinations need this message in
- // another vector. This Set is the intersection of the
- // routing_table entry and the current destination set. The
- // intersection must not be empty, since we are inside "if"
- output_link_destinations.push_back(msg_dsts.AND(dst));
-
- // Next, we update the msg_destination not to include
- // those nodes that were already handled by this link
- msg_dsts.removeNetDest(dst);
- }
+ // Next, we update the msg_destination not to include
+ // those nodes that were already handled by this link
+ msg_dsts.removeNetDest(dst);
+ }
- assert(msg_dsts.count() == 0);
+ assert(msg_dsts.count() == 0);
- // Check for resources - for all outgoing queues
- bool enough = true;
- for (int i = 0; i < output_links.size(); i++) {
- int outgoing = output_links[i];
+ // Check for resources - for all outgoing queues
+ bool enough = true;
+ for (int i = 0; i < output_links.size(); i++) {
+ int outgoing = output_links[i];
- if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
- enough = false;
+ if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
+ enough = false;
- DPRINTF(RubyNetwork, "Checking if node is blocked ..."
- "outgoing: %d, vnet: %d, enough: %d\n",
- outgoing, vnet, enough);
- }
+ DPRINTF(RubyNetwork, "Checking if node is blocked ..."
+ "outgoing: %d, vnet: %d, enough: %d\n",
+ outgoing, vnet, enough);
+ }
- // There were not enough resources
- if (!enough) {
- scheduleEvent(Cycles(1));
- DPRINTF(RubyNetwork, "Can't deliver message since a node "
- "is blocked\n");
- DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
- break; // go to next incoming port
- }
+ // There were not enough resources
+ if (!enough) {
+ scheduleEvent(Cycles(1));
+ DPRINTF(RubyNetwork, "Can't deliver message since a node "
+ "is blocked\n");
+ DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
+ break; // go to next incoming port
+ }
- MsgPtr unmodified_msg_ptr;
+ MsgPtr unmodified_msg_ptr;
- if (output_links.size() > 1) {
- // If we are sending this message down more than one link
- // (size>1), we need to make a copy of the message so each
- // branch can have a different internal destination we need
- // to create an unmodified MsgPtr because the MessageBuffer
- // enqueue func will modify the message
+ if (output_links.size() > 1) {
+ // If we are sending this message down more than one link
+ // (size>1), we need to make a copy of the message so each
+ // branch can have a different internal destination we need
+ // to create an unmodified MsgPtr because the MessageBuffer
+ // enqueue func will modify the message
- // This magic line creates a private copy of the message
- unmodified_msg_ptr = msg_ptr->clone();
- }
+ // This magic line creates a private copy of the message
+ unmodified_msg_ptr = msg_ptr->clone();
+ }
- // Dequeue msg
- buffer->dequeue();
- m_pending_message_count[vnet]--;
+ // Dequeue msg
+ buffer->dequeue();
+ m_pending_message_count[vnet]--;
- // Enqueue it - for all outgoing queues
- for (int i=0; i<output_links.size(); i++) {
- int outgoing = output_links[i];
+ // Enqueue it - for all outgoing queues
+ for (int i=0; i<output_links.size(); i++) {
+ int outgoing = output_links[i];
- if (i > 0) {
- // create a private copy of the unmodified message
- msg_ptr = unmodified_msg_ptr->clone();
- }
+ if (i > 0) {
+ // create a private copy of the unmodified message
+ msg_ptr = unmodified_msg_ptr->clone();
+ }
- // Change the internal destination set of the message so it
- // knows which destinations this link is responsible for.
- net_msg_ptr = msg_ptr.get();
- net_msg_ptr->getDestination() = output_link_destinations[i];
+ // Change the internal destination set of the message so it
+ // knows which destinations this link is responsible for.
+ net_msg_ptr = msg_ptr.get();
+ net_msg_ptr->getDestination() =
+ output_link_destinations[i];
- // Enqeue msg
- DPRINTF(RubyNetwork, "Enqueuing net msg from "
- "inport[%d][%d] to outport [%d][%d].\n",
- incoming, vnet, outgoing, vnet);
+ // Enqeue msg
+ DPRINTF(RubyNetwork, "Enqueuing net msg from "
+ "inport[%d][%d] to outport [%d][%d].\n",
+ incoming, vnet, outgoing, vnet);
- m_out[outgoing][vnet]->enqueue(msg_ptr);
+ m_out[outgoing][vnet]->enqueue(msg_ptr);
+ }
+ }
}
}
}
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.hh b/src/mem/ruby/network/simple/PerfectSwitch.hh
index 1cc986964..f55281d54 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.hh
+++ b/src/mem/ruby/network/simple/PerfectSwitch.hh
@@ -85,10 +85,8 @@ class PerfectSwitch : public Consumer
PerfectSwitch& operator=(const PerfectSwitch& obj);
void operateVnet(int vnet);
- void operateMessageBuffer(MessageBuffer *b, int incoming, int vnet);
- const SwitchID m_switch_id;
- Switch * const m_switch;
+ SwitchID m_switch_id;
// vector of queues from the components
std::vector<std::vector<MessageBuffer*> > m_in;
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.cc b/src/mem/ruby/network/simple/SimpleNetwork.cc
index 09daa7960..5b7d7ebad 100644
--- a/src/mem/ruby/network/simple/SimpleNetwork.cc
+++ b/src/mem/ruby/network/simple/SimpleNetwork.cc
@@ -38,15 +38,23 @@
#include "mem/ruby/network/simple/Switch.hh"
#include "mem/ruby/network/simple/Throttle.hh"
#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/system/System.hh"
using namespace std;
using m5::stl_helpers::deletePointers;
SimpleNetwork::SimpleNetwork(const Params *p)
- : Network(p), m_buffer_size(p->buffer_size),
- m_endpoint_bandwidth(p->endpoint_bandwidth),
- m_adaptive_routing(p->adaptive_routing)
+ : Network(p)
{
+ m_buffer_size = p->buffer_size;
+ m_endpoint_bandwidth = p->endpoint_bandwidth;
+ m_adaptive_routing = p->adaptive_routing;
+
+ // Note: the parent Network Object constructor is called before the
+ // SimpleNetwork child constructor. Therefore, the member variables
+ // used below should already be initialized.
+ m_endpoint_switches.resize(m_nodes);
+
// record the routers
for (vector<BasicRouter*>::const_iterator i = p->routers.begin();
i != p->routers.end(); ++i) {
@@ -91,6 +99,8 @@ SimpleNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
m_switches[src]->addOutPort(m_fromNetQueues[dest], routing_table_entry,
simple_link->m_latency,
simple_link->m_bw_multiplier);
+
+ m_endpoint_switches[dest] = m_switches[src];
}
// From an endpoint node to a switch
@@ -223,6 +233,12 @@ SimpleNetwork::functionalRead(Packet *pkt)
}
}
+ for (unsigned int i = 0; i < m_int_link_buffers.size(); ++i) {
+ if (m_int_link_buffers[i]->functionalRead(pkt)) {
+ return true;
+ }
+ }
+
return false;
}
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.hh b/src/mem/ruby/network/simple/SimpleNetwork.hh
index efb342e6e..fe0c1838b 100644
--- a/src/mem/ruby/network/simple/SimpleNetwork.hh
+++ b/src/mem/ruby/network/simple/SimpleNetwork.hh
@@ -95,9 +95,11 @@ class SimpleNetwork : public Network
std::vector<Switch*> m_switches;
std::vector<MessageBuffer*> m_int_link_buffers;
int m_num_connected_buffers;
- const int m_buffer_size;
- const int m_endpoint_bandwidth;
- const bool m_adaptive_routing;
+ std::vector<Switch*> m_endpoint_switches;
+
+ int m_buffer_size;
+ int m_endpoint_bandwidth;
+ bool m_adaptive_routing;
//Statistical variables
Stats::Formula m_msg_counts[MessageSizeType_NUM];
diff --git a/src/mem/ruby/network/simple/Switch.cc b/src/mem/ruby/network/simple/Switch.cc
index e5988e505..b9d0b8010 100644
--- a/src/mem/ruby/network/simple/Switch.cc
+++ b/src/mem/ruby/network/simple/Switch.cc
@@ -184,6 +184,12 @@ Switch::print(std::ostream& out) const
bool
Switch::functionalRead(Packet *pkt)
{
+ // Access the buffers in the switch for performing a functional read
+ for (unsigned int i = 0; i < m_port_buffers.size(); ++i) {
+ if (m_port_buffers[i]->functionalRead(pkt)) {
+ return true;
+ }
+ }
return false;
}
diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc
index c97531e58..785e09aa2 100644
--- a/src/mem/ruby/network/simple/Throttle.cc
+++ b/src/mem/ruby/network/simple/Throttle.cc
@@ -31,7 +31,6 @@
#include "base/cast.hh"
#include "base/cprintf.hh"
#include "debug/RubyNetwork.hh"
-#include "mem/ruby/network/simple/Switch.hh"
#include "mem/ruby/network/simple/Throttle.hh"
#include "mem/ruby/network/MessageBuffer.hh"
#include "mem/ruby/network/Network.hh"
@@ -49,10 +48,27 @@ static int network_message_to_size(Message* net_msg_ptr);
Throttle::Throttle(int sID, RubySystem *rs, NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth,
- Switch *em)
- : Consumer(em), m_switch_id(sID), m_switch(em), m_node(node),
- m_ruby_system(rs)
+ ClockedObject *em)
+ : Consumer(em), m_ruby_system(rs)
{
+ init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth);
+ m_sID = sID;
+}
+
+Throttle::Throttle(RubySystem *rs, NodeID node, Cycles link_latency,
+ int link_bandwidth_multiplier, int endpoint_bandwidth,
+ ClockedObject *em)
+ : Consumer(em), m_ruby_system(rs)
+{
+ init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth);
+ m_sID = 0;
+}
+
+void
+Throttle::init(NodeID node, Cycles link_latency,
+ int link_bandwidth_multiplier, int endpoint_bandwidth)
+{
+ m_node = node;
m_vnets = 0;
assert(link_bandwidth_multiplier > 0);
@@ -82,7 +98,7 @@ Throttle::addLinks(const vector<MessageBuffer*>& in_vec,
// Set consumer and description
in_ptr->setConsumer(this);
- string desc = "[Queue to Throttle " + to_string(m_switch_id) + " " +
+ string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
to_string(m_node) + "]";
}
}
diff --git a/src/mem/ruby/network/simple/Throttle.hh b/src/mem/ruby/network/simple/Throttle.hh
index 405593bb1..85bf9691a 100644
--- a/src/mem/ruby/network/simple/Throttle.hh
+++ b/src/mem/ruby/network/simple/Throttle.hh
@@ -47,18 +47,20 @@
#include "mem/ruby/system/System.hh"
class MessageBuffer;
-class Switch;
class Throttle : public Consumer
{
public:
Throttle(int sID, RubySystem *rs, NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth,
- Switch *em);
+ ClockedObject *em);
+ Throttle(RubySystem *rs, NodeID node, Cycles link_latency,
+ int link_bandwidth_multiplier, int endpoint_bandwidth,
+ ClockedObject *em);
~Throttle() {}
std::string name()
- { return csprintf("Throttle-%i", m_switch_id); }
+ { return csprintf("Throttle-%i", m_sID); }
void addLinks(const std::vector<MessageBuffer*>& in_vec,
const std::vector<MessageBuffer*>& out_vec);
@@ -95,10 +97,8 @@ class Throttle : public Consumer
unsigned int m_vnets;
std::vector<int> m_units_remaining;
- const int m_switch_id;
- Switch *m_switch;
+ int m_sID;
NodeID m_node;
-
int m_link_bandwidth_multiplier;
Cycles m_link_latency;
int m_wakeups_wo_switch;
diff --git a/src/mem/ruby/profiler/AccessTraceForAddress.hh b/src/mem/ruby/profiler/AccessTraceForAddress.hh
index 3e9d54499..af42489bc 100644
--- a/src/mem/ruby/profiler/AccessTraceForAddress.hh
+++ b/src/mem/ruby/profiler/AccessTraceForAddress.hh
@@ -67,12 +67,12 @@ class AccessTraceForAddress
private:
Addr m_addr;
- uint64_t m_loads;
- uint64_t m_stores;
- uint64_t m_atomics;
- uint64_t m_total;
- uint64_t m_user;
- uint64_t m_sharing;
+ uint64 m_loads;
+ uint64 m_stores;
+ uint64 m_atomics;
+ uint64 m_total;
+ uint64 m_user;
+ uint64 m_sharing;
Set m_touched_by;
Histogram* m_histogram_ptr;
};
diff --git a/src/mem/ruby/profiler/AddressProfiler.cc b/src/mem/ruby/profiler/AddressProfiler.cc
index 52c693330..0e7ea7e36 100644
--- a/src/mem/ruby/profiler/AddressProfiler.cc
+++ b/src/mem/ruby/profiler/AddressProfiler.cc
@@ -67,7 +67,7 @@ printSorted(ostream& out, int num_of_sequencers, const AddressMap &record_map,
{
const int records_printed = 100;
- uint64_t misses = 0;
+ uint64 misses = 0;
std::vector<const AccessTraceForAddress *> sorted;
AddressMap::const_iterator i = record_map.begin();
@@ -95,8 +95,8 @@ printSorted(ostream& out, int num_of_sequencers, const AddressMap &record_map,
Histogram all_records_log(-1);
// Allows us to track how many lines where touched by n processors
- std::vector<int64_t> m_touched_vec;
- std::vector<int64_t> m_touched_weighted_vec;
+ std::vector<int64> m_touched_vec;
+ std::vector<int64> m_touched_weighted_vec;
m_touched_vec.resize(num_of_sequencers+1);
m_touched_weighted_vec.resize(num_of_sequencers+1);
for (int j = 0; j < m_touched_vec.size(); j++) {
diff --git a/src/mem/ruby/profiler/AddressProfiler.hh b/src/mem/ruby/profiler/AddressProfiler.hh
index ebd44080b..39544c0a2 100644
--- a/src/mem/ruby/profiler/AddressProfiler.hh
+++ b/src/mem/ruby/profiler/AddressProfiler.hh
@@ -75,7 +75,7 @@ class AddressProfiler
AddressProfiler(const AddressProfiler& obj);
AddressProfiler& operator=(const AddressProfiler& obj);
- int64_t m_sharing_miss_counter;
+ int64 m_sharing_miss_counter;
AddressMap m_dataAccessTrace;
AddressMap m_macroBlockAccessTrace;
diff --git a/src/mem/ruby/profiler/Profiler.cc b/src/mem/ruby/profiler/Profiler.cc
index b3b37e5a6..7decd497a 100644
--- a/src/mem/ruby/profiler/Profiler.cc
+++ b/src/mem/ruby/profiler/Profiler.cc
@@ -61,10 +61,11 @@ using namespace std;
using m5::stl_helpers::operator<<;
Profiler::Profiler(const RubySystemParams *p, RubySystem *rs)
- : m_ruby_system(rs), m_hot_lines(p->hot_lines),
- m_all_instructions(p->all_instructions),
- m_num_vnets(p->number_of_virtual_networks)
+ : m_ruby_system(rs)
{
+ m_hot_lines = p->hot_lines;
+ m_all_instructions = p->all_instructions;
+
m_address_profiler_ptr = new AddressProfiler(p->num_of_sequencers, this);
m_address_profiler_ptr->setHotLines(m_hot_lines);
m_address_profiler_ptr->setAllInstructions(m_all_instructions);
@@ -97,7 +98,8 @@ Profiler::regStats(const std::string &pName)
.desc("delay histogram for all message")
.flags(Stats::nozero | Stats::pdf | Stats::oneline);
- for (int i = 0; i < m_num_vnets; i++) {
+ uint32_t numVNets = Network::getNumberOfVirtualNetworks();
+ for (int i = 0; i < numVNets; i++) {
delayVCHistogram.push_back(new Stats::Histogram());
delayVCHistogram[i]
->init(10)
@@ -249,6 +251,7 @@ Profiler::collateStats()
m_inst_profiler_ptr->collateStats();
}
+ uint32_t numVNets = Network::getNumberOfVirtualNetworks();
for (uint32_t i = 0; i < MachineType_NUM; i++) {
for (map<uint32_t, AbstractController*>::iterator it =
m_ruby_system->m_abstract_controls[i].begin();
@@ -257,7 +260,7 @@ Profiler::collateStats()
AbstractController *ctr = (*it).second;
delayHistogram.add(ctr->getDelayHist());
- for (uint32_t i = 0; i < m_num_vnets; i++) {
+ for (uint32_t i = 0; i < numVNets; i++) {
delayVCHistogram[i]->add(ctr->getDelayVCHist(i));
}
}
diff --git a/src/mem/ruby/profiler/Profiler.hh b/src/mem/ruby/profiler/Profiler.hh
index 6cfdab1d5..146beadd6 100644
--- a/src/mem/ruby/profiler/Profiler.hh
+++ b/src/mem/ruby/profiler/Profiler.hh
@@ -80,8 +80,8 @@ class Profiler
void addAddressTraceSample(const RubyRequest& msg, NodeID id);
// added by SS
- bool getHotLines() const { return m_hot_lines; }
- bool getAllInstructions() const { return m_all_instructions; }
+ bool getHotLines() { return m_hot_lines; }
+ bool getAllInstructions() { return m_all_instructions; }
private:
// Private copy constructor and assignment operator
@@ -129,9 +129,8 @@ class Profiler
Stats::Scalar m_IncompleteTimes[MachineType_NUM];
//added by SS
- const bool m_hot_lines;
- const bool m_all_instructions;
- const uint32_t m_num_vnets;
+ bool m_hot_lines;
+ bool m_all_instructions;
};
#endif // __MEM_RUBY_PROFILER_PROFILER_HH__
diff --git a/src/mem/ruby/profiler/StoreTrace.cc b/src/mem/ruby/profiler/StoreTrace.cc
index c3c1f8a19..40bf2e7b6 100644
--- a/src/mem/ruby/profiler/StoreTrace.cc
+++ b/src/mem/ruby/profiler/StoreTrace.cc
@@ -33,7 +33,7 @@ using namespace std;
bool StoreTrace::s_init = false; // Total number of store lifetimes of
// all lines
-int64_t StoreTrace::s_total_samples = 0; // Total number of store
+int64 StoreTrace::s_total_samples = 0; // Total number of store
// lifetimes of all lines
Histogram* StoreTrace::s_store_count_ptr = NULL;
Histogram* StoreTrace::s_store_first_to_stolen_ptr = NULL;
diff --git a/src/mem/ruby/profiler/StoreTrace.hh b/src/mem/ruby/profiler/StoreTrace.hh
index a686594f8..9c1b83cd6 100644
--- a/src/mem/ruby/profiler/StoreTrace.hh
+++ b/src/mem/ruby/profiler/StoreTrace.hh
@@ -53,7 +53,7 @@ class StoreTrace
private:
static bool s_init;
- static int64_t s_total_samples; // Total number of store lifetimes
+ static int64 s_total_samples; // Total number of store lifetimes
// of all lines
static Histogram* s_store_count_ptr;
static Histogram* s_store_first_to_stolen_ptr;
@@ -66,7 +66,7 @@ class StoreTrace
Tick m_last_store;
int m_stores_this_interval;
- int64_t m_total_samples; // Total number of store lifetimes of this line
+ int64 m_total_samples; // Total number of store lifetimes of this line
Histogram m_store_count;
Histogram m_store_first_to_stolen;
Histogram m_store_last_to_stolen;
diff --git a/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc b/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
index 416aea73b..01fd3f522 100644
--- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
+++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc
@@ -28,9 +28,6 @@
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
-#include "base/trace.hh"
-#include "debug/RubyCache.hh"
-
AbstractCacheEntry::AbstractCacheEntry()
{
m_Permission = AccessPermission_NotPresent;
@@ -51,25 +48,3 @@ AbstractCacheEntry::changePermission(AccessPermission new_perm)
m_locked = -1;
}
}
-
-void
-AbstractCacheEntry::setLocked(int context)
-{
- DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", m_Address, context);
- m_locked = context;
-}
-
-void
-AbstractCacheEntry::clearLocked()
-{
- DPRINTF(RubyCache, "Clear Lock for addr: %x\n", m_Address);
- m_locked = -1;
-}
-
-bool
-AbstractCacheEntry::isLocked(int context) const
-{
- DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
- m_Address, m_locked, context);
- return m_locked == context;
-}
diff --git a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
index 926556781..6c7a4a008 100644
--- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
+++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
@@ -56,28 +56,10 @@ class AbstractCacheEntry : public AbstractEntry
virtual DataBlock& getDataBlk()
{ panic("getDataBlk() not implemented!"); }
- // Functions for locking and unlocking the cache entry. These are required
- // for supporting atomic memory accesses.
- void setLocked(int context);
- void clearLocked();
- bool isLocked(int context) const;
- void setSetIndex(uint32_t s) { m_set_index = s; }
- uint32_t getSetIndex() const { return m_set_index; }
-
- void setWayIndex(uint32_t s) { m_way_index = s; }
- uint32_t getWayIndex() const { return m_way_index; }
-
- // Address of this block, required by CacheMemory
- Addr m_Address;
- // Holds info whether the address is locked.
- // Required for implementing LL/SC operations.
- int m_locked;
-
- private:
- // Set and way coordinates of the entry within the cache memory object.
- uint32_t m_set_index;
- uint32_t m_way_index;
+ Addr m_Address; // Address of this block, required by CacheMemory
+ int m_locked; // Holds info whether the address is locked,
+ // required for implementing LL/SC
};
inline std::ostream&
diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh
index 34160c149..94361034a 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.hh
+++ b/src/mem/ruby/slicc_interface/AbstractController.hh
@@ -139,14 +139,14 @@ class AbstractController : public MemObject, public Consumer
void wakeUpAllBuffers();
protected:
- const NodeID m_version;
+ NodeID m_version;
MachineID m_machineID;
- const NodeID m_clusterID;
+ NodeID m_clusterID;
// MasterID used by some components of gem5.
- const MasterID m_masterId;
+ MasterID m_masterId;
- Network *m_net_ptr;
+ Network* m_net_ptr;
bool m_is_blocking;
std::map<Addr, MessageBuffer*> m_block_map;
@@ -157,9 +157,9 @@ class AbstractController : public MemObject, public Consumer
unsigned int m_in_ports;
unsigned int m_cur_in_port;
- const int m_number_of_TBEs;
- const int m_transitions_per_cycle;
- const unsigned int m_buffer_size;
+ int m_number_of_TBEs;
+ int m_transitions_per_cycle;
+ unsigned int m_buffer_size;
Cycles m_recycle_latency;
//! Counter for the number of cycles when the transitions carried out
diff --git a/src/mem/ruby/structures/AbstractReplacementPolicy.cc b/src/mem/ruby/structures/AbstractReplacementPolicy.cc
index d802ecd31..fbcce6e2d 100644
--- a/src/mem/ruby/structures/AbstractReplacementPolicy.cc
+++ b/src/mem/ruby/structures/AbstractReplacementPolicy.cc
@@ -66,7 +66,7 @@ AbstractReplacementPolicy::~AbstractReplacementPolicy()
}
Tick
-AbstractReplacementPolicy::getLastAccess(int64_t set, int64_t way)
+AbstractReplacementPolicy::getLastAccess(int64 set, int64 way)
{
return m_last_ref_ptr[set][way];
}
diff --git a/src/mem/ruby/structures/AbstractReplacementPolicy.hh b/src/mem/ruby/structures/AbstractReplacementPolicy.hh
index c118f3c11..03ef0d2fd 100644
--- a/src/mem/ruby/structures/AbstractReplacementPolicy.hh
+++ b/src/mem/ruby/structures/AbstractReplacementPolicy.hh
@@ -44,13 +44,13 @@ class AbstractReplacementPolicy : public SimObject
virtual ~AbstractReplacementPolicy();
/* touch a block. a.k.a. update timestamp */
- virtual void touch(int64_t set, int64_t way, Tick time) = 0;
+ virtual void touch(int64 set, int64 way, Tick time) = 0;
/* returns the way to replace */
- virtual int64_t getVictim(int64_t set) const = 0;
+ virtual int64 getVictim(int64 set) const = 0;
/* get the time of the last access */
- Tick getLastAccess(int64_t set, int64_t way);
+ Tick getLastAccess(int64 set, int64 way);
virtual bool useOccupancy() const { return false; }
diff --git a/src/mem/ruby/structures/BankedArray.cc b/src/mem/ruby/structures/BankedArray.cc
index b25962df6..8bc3cf584 100644
--- a/src/mem/ruby/structures/BankedArray.cc
+++ b/src/mem/ruby/structures/BankedArray.cc
@@ -49,7 +49,7 @@ BankedArray::BankedArray(unsigned int banks, Cycles accessLatency,
}
bool
-BankedArray::tryAccess(int64_t idx)
+BankedArray::tryAccess(int64 idx)
{
if (accessLatency == 0)
return true;
@@ -65,7 +65,7 @@ BankedArray::tryAccess(int64_t idx)
}
void
-BankedArray::reserve(int64_t idx)
+BankedArray::reserve(int64 idx)
{
if (accessLatency == 0)
return;
@@ -91,7 +91,7 @@ BankedArray::reserve(int64_t idx)
}
unsigned int
-BankedArray::mapIndexToBank(int64_t idx)
+BankedArray::mapIndexToBank(int64 idx)
{
if (banks == 1) {
return 0;
diff --git a/src/mem/ruby/structures/BankedArray.hh b/src/mem/ruby/structures/BankedArray.hh
index 179676f19..438186944 100644
--- a/src/mem/ruby/structures/BankedArray.hh
+++ b/src/mem/ruby/structures/BankedArray.hh
@@ -51,7 +51,7 @@ class BankedArray
{
public:
AccessRecord() : idx(0), startAccess(0), endAccess(0) {}
- int64_t idx;
+ int64 idx;
Tick startAccess;
Tick endAccess;
};
@@ -60,7 +60,7 @@ class BankedArray
// otherwise, schedule the event and wait for it to complete
std::vector<AccessRecord> busyBanks;
- unsigned int mapIndexToBank(int64_t idx);
+ unsigned int mapIndexToBank(int64 idx);
public:
BankedArray(unsigned int banks, Cycles accessLatency,
@@ -68,9 +68,9 @@ class BankedArray
// Note: We try the access based on the cache index, not the address
// This is so we don't get aliasing on blocks being replaced
- bool tryAccess(int64_t idx);
+ bool tryAccess(int64 idx);
- void reserve(int64_t idx);
+ void reserve(int64 idx);
Cycles getLatency() const { return accessLatency; }
};
diff --git a/src/mem/ruby/structures/CacheMemory.cc b/src/mem/ruby/structures/CacheMemory.cc
index ac6f823ce..7eba450c1 100644
--- a/src/mem/ruby/structures/CacheMemory.cc
+++ b/src/mem/ruby/structures/CacheMemory.cc
@@ -98,7 +98,7 @@ CacheMemory::~CacheMemory()
}
// convert a Address to its location in the cache
-int64_t
+int64
CacheMemory::addressToCacheSet(Addr address) const
{
assert(address == makeLineAddress(address));
@@ -109,7 +109,7 @@ CacheMemory::addressToCacheSet(Addr address) const
// Given a cache index: returns the index of the tag in a set.
// returns -1 if the tag is not found.
int
-CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
+CacheMemory::findTagInSet(int64 cacheSet, Addr tag) const
{
assert(tag == makeLineAddress(tag));
// search the set for the tags
@@ -124,7 +124,7 @@ CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
// Given a cache index: returns the index of the tag in a set.
// returns -1 if the tag is not found.
int
-CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet,
+CacheMemory::findTagInSetIgnorePermissions(int64 cacheSet,
Addr tag) const
{
assert(tag == makeLineAddress(tag));
@@ -158,12 +158,62 @@ CacheMemory::getAddressAtIdx(int idx) const
return entry->m_Address;
}
+bool
+CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == makeLineAddress(address));
+ DPRINTF(RubyCache, "address: %s\n", address);
+ int64 cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if (loc != -1) {
+ // Do we even have a tag match?
+ AbstractCacheEntry* entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
+ data_ptr = &(entry->getDataBlk());
+
+ if (entry->m_Permission == AccessPermission_Read_Write) {
+ return true;
+ }
+ if ((entry->m_Permission == AccessPermission_Read_Only) &&
+ (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
+ return true;
+ }
+ // The line must not be accessible
+ }
+ data_ptr = NULL;
+ return false;
+}
+
+bool
+CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == makeLineAddress(address));
+ DPRINTF(RubyCache, "address: %s\n", address);
+ int64 cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+
+ if (loc != -1) {
+ // Do we even have a tag match?
+ AbstractCacheEntry* entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
+ data_ptr = &(entry->getDataBlk());
+
+ return m_cache[cacheSet][loc]->m_Permission !=
+ AccessPermission_NotPresent;
+ }
+
+ data_ptr = NULL;
+ return false;
+}
+
// tests to see if an address is present in the cache
bool
CacheMemory::isTagPresent(Addr address) const
{
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc == -1) {
@@ -183,7 +233,7 @@ CacheMemory::cacheAvail(Addr address) const
{
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
for (int i = 0; i < m_cache_assoc; i++) {
AbstractCacheEntry* entry = m_cache[cacheSet][i];
@@ -201,7 +251,7 @@ CacheMemory::cacheAvail(Addr address) const
}
AbstractCacheEntry*
-CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
+CacheMemory::allocate(Addr address, AbstractCacheEntry* entry, bool touch)
{
assert(address == makeLineAddress(address));
assert(!isTagPresent(address));
@@ -209,7 +259,7 @@ CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
DPRINTF(RubyCache, "address: %s\n", address);
// Find the first open slot
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
for (int i = 0; i < m_cache_assoc; i++) {
if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
@@ -220,8 +270,6 @@ CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
address);
set[i]->m_locked = -1;
m_tag_index[address] = i;
- entry->setSetIndex(cacheSet);
- entry->setWayIndex(i);
if (touch) {
m_replacementPolicy_ptr->touch(cacheSet, i, curTick());
@@ -239,7 +287,7 @@ CacheMemory::deallocate(Addr address)
assert(address == makeLineAddress(address));
assert(isTagPresent(address));
DPRINTF(RubyCache, "address: %s\n", address);
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc != -1) {
delete m_cache[cacheSet][loc];
@@ -255,7 +303,7 @@ CacheMemory::cacheProbe(Addr address) const
assert(address == makeLineAddress(address));
assert(!cacheAvail(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
m_Address;
}
@@ -265,7 +313,7 @@ AbstractCacheEntry*
CacheMemory::lookup(Addr address)
{
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if(loc == -1) return NULL;
return m_cache[cacheSet][loc];
@@ -276,7 +324,7 @@ const AbstractCacheEntry*
CacheMemory::lookup(Addr address) const
{
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if(loc == -1) return NULL;
return m_cache[cacheSet][loc];
@@ -286,7 +334,7 @@ CacheMemory::lookup(Addr address) const
void
CacheMemory::setMRU(Addr address)
{
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if(loc != -1)
@@ -294,19 +342,11 @@ CacheMemory::setMRU(Addr address)
}
void
-CacheMemory::setMRU(const AbstractCacheEntry *e)
-{
- uint32_t cacheSet = e->getSetIndex();
- uint32_t loc = e->getWayIndex();
- m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
-}
-
-void
CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
{
- uint64_t warmedUpBlocks = 0;
- uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
- (uint64_t)m_cache_assoc;
+ uint64 warmedUpBlocks = 0;
+ uint64 totalBlocks M5_VAR_USED = (uint64)m_cache_num_sets
+ * (uint64)m_cache_assoc;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
@@ -336,7 +376,8 @@ CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
"recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
- totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
+ (uint64)m_cache_num_sets * (uint64)m_cache_assoc,
+ (float(warmedUpBlocks)/float(totalBlocks))*100.0);
}
void
@@ -369,10 +410,10 @@ CacheMemory::setLocked(Addr address, int context)
{
DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context);
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
- m_cache[cacheSet][loc]->setLocked(context);
+ m_cache[cacheSet][loc]->m_locked = context;
}
void
@@ -380,22 +421,22 @@ CacheMemory::clearLocked(Addr address)
{
DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address);
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
- m_cache[cacheSet][loc]->clearLocked();
+ m_cache[cacheSet][loc]->m_locked = -1;
}
bool
CacheMemory::isLocked(Addr address, int context)
{
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
address, m_cache[cacheSet][loc]->m_locked, context);
- return m_cache[cacheSet][loc]->isLocked(context);
+ return m_cache[cacheSet][loc]->m_locked == context;
}
void
@@ -553,13 +594,13 @@ CacheMemory::checkResourceAvailable(CacheResourceType res, Addr addr)
}
bool
-CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
+CacheMemory::isBlockInvalid(int64 cache_set, int64 loc)
{
return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
}
bool
-CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
+CacheMemory::isBlockNotBusy(int64 cache_set, int64 loc)
{
return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
}
diff --git a/src/mem/ruby/structures/CacheMemory.hh b/src/mem/ruby/structures/CacheMemory.hh
index 94174b286..08551ab87 100644
--- a/src/mem/ruby/structures/CacheMemory.hh
+++ b/src/mem/ruby/structures/CacheMemory.hh
@@ -56,6 +56,15 @@ class CacheMemory : public SimObject
void init();
+ // Public Methods
+ // perform a cache access and see if we hit or not. Return true on a hit.
+ bool tryCacheAccess(Addr address, RubyRequestType type,
+ DataBlock*& data_ptr);
+
+ // similar to above, but doesn't require full access check
+ bool testCacheAccess(Addr address, RubyRequestType type,
+ DataBlock*& data_ptr);
+
// tests to see if an address is present in the cache
bool isTagPresent(Addr address) const;
@@ -89,22 +98,15 @@ class CacheMemory : public SimObject
Cycles getTagLatency() const { return tagArray.getLatency(); }
Cycles getDataLatency() const { return dataArray.getLatency(); }
- bool isBlockInvalid(int64_t cache_set, int64_t loc);
- bool isBlockNotBusy(int64_t cache_set, int64_t loc);
+ bool isBlockInvalid(int64 cache_set, int64 loc);
+ bool isBlockNotBusy(int64 cache_set, int64 loc);
// Hook for checkpointing the contents of the cache
void recordCacheContents(int cntrl, CacheRecorder* tr) const;
// Set this address to most recently used
void setMRU(Addr address);
- // Set this entry to most recently used
- void setMRU(const AbstractCacheEntry *e);
-
- // Functions for locking and unlocking cache lines corresponding to the
- // provided address. These are required for supporting atomic memory
- // accesses. These are to be used when only the address of the cache entry
- // is available. In case the entry itself is available. use the functions
- // provided by the AbstractCacheEntry class.
+
void setLocked (Addr addr, int context);
void clearLocked (Addr addr);
bool isLocked (Addr addr, int context);
@@ -142,12 +144,12 @@ class CacheMemory : public SimObject
private:
// convert a Address to its location in the cache
- int64_t addressToCacheSet(Addr address) const;
+ int64 addressToCacheSet(Addr address) const;
// Given a cache tag: returns the index of the tag in a set.
// returns -1 if the tag is not found.
- int findTagInSet(int64_t line, Addr tag) const;
- int findTagInSetIgnorePermissions(int64_t cacheSet, Addr tag) const;
+ int findTagInSet(int64 line, Addr tag) const;
+ int findTagInSetIgnorePermissions(int64 cacheSet, Addr tag) const;
// Private copy constructor and assignment operator
CacheMemory(const CacheMemory& obj);
diff --git a/src/mem/ruby/structures/DirectoryMemory.cc b/src/mem/ruby/structures/DirectoryMemory.cc
index 82388a895..b840349e1 100644
--- a/src/mem/ruby/structures/DirectoryMemory.cc
+++ b/src/mem/ruby/structures/DirectoryMemory.cc
@@ -37,6 +37,7 @@ using namespace std;
int DirectoryMemory::m_num_directories = 0;
int DirectoryMemory::m_num_directories_bits = 0;
+uint64_t DirectoryMemory::m_total_size_bytes = 0;
int DirectoryMemory::m_numa_high_bit = 0;
DirectoryMemory::DirectoryMemory(const Params *p)
@@ -59,6 +60,7 @@ DirectoryMemory::init()
m_num_directories++;
m_num_directories_bits = ceilLog2(m_num_directories);
+ m_total_size_bytes += m_size_bytes;
if (m_numa_high_bit == 0) {
m_numa_high_bit = RubySystem::getMemorySizeBits() - 1;
diff --git a/src/mem/ruby/structures/DirectoryMemory.hh b/src/mem/ruby/structures/DirectoryMemory.hh
index 98403808b..a549366d0 100644
--- a/src/mem/ruby/structures/DirectoryMemory.hh
+++ b/src/mem/ruby/structures/DirectoryMemory.hh
@@ -76,6 +76,7 @@ class DirectoryMemory : public SimObject
static int m_num_directories;
static int m_num_directories_bits;
+ static uint64_t m_total_size_bytes;
static int m_numa_high_bit;
};
diff --git a/src/mem/ruby/structures/LRUPolicy.cc b/src/mem/ruby/structures/LRUPolicy.cc
index 286d19772..a1e3b277e 100644
--- a/src/mem/ruby/structures/LRUPolicy.cc
+++ b/src/mem/ruby/structures/LRUPolicy.cc
@@ -50,7 +50,7 @@ LRUReplacementPolicyParams::create()
void
-LRUPolicy::touch(int64_t set, int64_t index, Tick time)
+LRUPolicy::touch(int64 set, int64 index, Tick time)
{
assert(index >= 0 && index < m_assoc);
assert(set >= 0 && set < m_num_sets);
@@ -58,11 +58,11 @@ LRUPolicy::touch(int64_t set, int64_t index, Tick time)
m_last_ref_ptr[set][index] = time;
}
-int64_t
-LRUPolicy::getVictim(int64_t set) const
+int64
+LRUPolicy::getVictim(int64 set) const
{
Tick time, smallest_time;
- int64_t smallest_index;
+ int64 smallest_index;
smallest_index = 0;
smallest_time = m_last_ref_ptr[set][0];
diff --git a/src/mem/ruby/structures/LRUPolicy.hh b/src/mem/ruby/structures/LRUPolicy.hh
index 388718319..9a9c9e3eb 100644
--- a/src/mem/ruby/structures/LRUPolicy.hh
+++ b/src/mem/ruby/structures/LRUPolicy.hh
@@ -41,8 +41,8 @@ class LRUPolicy : public AbstractReplacementPolicy
LRUPolicy(const Params * p);
~LRUPolicy();
- void touch(int64_t set, int64_t way, Tick time);
- int64_t getVictim(int64_t set) const;
+ void touch(int64 set, int64 way, Tick time);
+ int64 getVictim(int64 set) const;
};
#endif // __MEM_RUBY_STRUCTURES_LRUPOLICY_HH__
diff --git a/src/mem/ruby/structures/PseudoLRUPolicy.cc b/src/mem/ruby/structures/PseudoLRUPolicy.cc
index a2b21a625..8eee0821b 100644
--- a/src/mem/ruby/structures/PseudoLRUPolicy.cc
+++ b/src/mem/ruby/structures/PseudoLRUPolicy.cc
@@ -38,7 +38,7 @@ PseudoLRUPolicy::PseudoLRUPolicy(const Params * p)
// associativity cannot exceed capacity of tree representation
assert(m_num_sets > 0 &&
m_assoc > 1 &&
- m_assoc <= (int64_t) sizeof(uint64_t)*4);
+ m_assoc <= (int64) sizeof(uint64)*4);
m_trees = NULL;
m_num_levels = 0;
@@ -55,7 +55,7 @@ PseudoLRUPolicy::PseudoLRUPolicy(const Params * p)
m_num_levels++;
}
assert(m_num_levels < sizeof(unsigned int)*4);
- m_trees = new uint64_t[m_num_sets];
+ m_trees = new uint64[m_num_sets];
for (unsigned i = 0; i < m_num_sets; i++) {
m_trees[i] = 0;
}
@@ -75,7 +75,7 @@ PseudoLRUPolicy::~PseudoLRUPolicy()
}
void
-PseudoLRUPolicy::touch(int64_t set, int64_t index, Tick time)
+PseudoLRUPolicy::touch(int64 set, int64 index, Tick time)
{
assert(index >= 0 && index < m_assoc);
assert(set >= 0 && set < m_num_sets);
@@ -93,10 +93,10 @@ PseudoLRUPolicy::touch(int64_t set, int64_t index, Tick time)
m_last_ref_ptr[set][index] = time;
}
-int64_t
-PseudoLRUPolicy::getVictim(int64_t set) const
+int64
+PseudoLRUPolicy::getVictim(int64 set) const
{
- int64_t index = 0;
+ int64 index = 0;
int tree_index = 0;
int node_val;
diff --git a/src/mem/ruby/structures/PseudoLRUPolicy.hh b/src/mem/ruby/structures/PseudoLRUPolicy.hh
index a4a388cf5..fc5add8b1 100644
--- a/src/mem/ruby/structures/PseudoLRUPolicy.hh
+++ b/src/mem/ruby/structures/PseudoLRUPolicy.hh
@@ -53,13 +53,13 @@ class PseudoLRUPolicy : public AbstractReplacementPolicy
PseudoLRUPolicy(const Params * p);
~PseudoLRUPolicy();
- void touch(int64_t set, int64_t way, Tick time);
- int64_t getVictim(int64_t set) const;
+ void touch(int64 set, int64 way, Tick time);
+ int64 getVictim(int64 set) const;
private:
unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */
unsigned int m_num_levels; /** number of levels in the tree */
- uint64_t *m_trees; /** bit representation of the
+ uint64* m_trees; /** bit representation of the
* trees, one for each set */
};
diff --git a/src/mem/ruby/structures/RubyMemoryControl.cc b/src/mem/ruby/structures/RubyMemoryControl.cc
index 413850627..0521aac06 100644
--- a/src/mem/ruby/structures/RubyMemoryControl.cc
+++ b/src/mem/ruby/structures/RubyMemoryControl.cc
@@ -176,7 +176,7 @@ void
RubyMemoryControl::init()
{
m_msg_counter = 0;
- assert(m_tFaw <= 62); // must fit in a uint64_t shift register
+ assert(m_tFaw <= 62); // must fit in a uint64 shift register
m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
@@ -213,7 +213,7 @@ RubyMemoryControl::init()
// m_tfaw_count keeps track of how many 1 bits are set
// in each shift register. When m_tfaw_count is >= 4,
// new activates are not allowed.
- m_tfaw_shift = new uint64_t[m_total_ranks];
+ m_tfaw_shift = new uint64[m_total_ranks];
m_tfaw_count = new int[m_total_ranks];
for (int i = 0; i < m_total_ranks; i++) {
m_tfaw_shift[i] = 0;
@@ -236,7 +236,7 @@ RubyMemoryControl::reset()
{
m_msg_counter = 0;
- assert(m_tFaw <= 62); // must fit in a uint64_t shift register
+ assert(m_tFaw <= 62); // must fit in a uint64 shift register
m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
diff --git a/src/mem/ruby/structures/RubyMemoryControl.hh b/src/mem/ruby/structures/RubyMemoryControl.hh
index 376ce4d75..c68a2da6c 100644
--- a/src/mem/ruby/structures/RubyMemoryControl.hh
+++ b/src/mem/ruby/structures/RubyMemoryControl.hh
@@ -162,11 +162,11 @@ class RubyMemoryControl : public AbstractMemory, public Consumer
// Each entry indicates number of address-bus cycles until bank
// is reschedulable:
- int *m_bankBusyCounter;
- int *m_oldRequest;
+ int* m_bankBusyCounter;
+ int* m_oldRequest;
- uint64_t *m_tfaw_shift;
- int *m_tfaw_count;
+ uint64* m_tfaw_shift;
+ int* m_tfaw_count;
// Each of these indicates number of address-bus cycles until
// we can issue a new request of the corresponding type:
@@ -182,12 +182,12 @@ class RubyMemoryControl : public AbstractMemory, public Consumer
int m_ageCounter; // age of old requests; to detect starvation
int m_idleCount; // watchdog timer for shutting down
- MemCntrlProfiler *m_profiler_ptr;
+ MemCntrlProfiler* m_profiler_ptr;
class MemCntrlEvent : public Event
{
public:
- MemCntrlEvent(RubyMemoryControl *_mem_cntrl)
+ MemCntrlEvent(RubyMemoryControl* _mem_cntrl)
{
mem_cntrl = _mem_cntrl;
}
diff --git a/src/mem/ruby/system/CacheRecorder.cc b/src/mem/ruby/system/CacheRecorder.cc
index 9568d6a88..a2ac6bdf8 100644
--- a/src/mem/ruby/system/CacheRecorder.cc
+++ b/src/mem/ruby/system/CacheRecorder.cc
@@ -58,6 +58,15 @@ CacheRecorder::CacheRecorder(uint8_t* uncompressed_trace,
m_seq_map(seq_map), m_bytes_read(0), m_records_read(0),
m_records_flushed(0), m_block_size_bytes(block_size_bytes)
{
+ if (m_uncompressed_trace != NULL) {
+ if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) {
+ // Block sizes larger than when the trace was recorded are not
+ // supported, as we cannot reliably turn accesses to smaller blocks
+ // into larger ones.
+ panic("Recorded cache block size (%d) < current block size (%d) !!",
+ m_block_size_bytes, RubySystem::getBlockSizeBytes());
+ }
+ }
}
CacheRecorder::~CacheRecorder()
@@ -152,13 +161,13 @@ CacheRecorder::addRecord(int cntrl, Addr data_addr, Addr pc_addr,
m_records.push_back(rec);
}
-uint64_t
-CacheRecorder::aggregateRecords(uint8_t **buf, uint64_t total_size)
+uint64
+CacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size)
{
std::sort(m_records.begin(), m_records.end(), compareTraceRecords);
int size = m_records.size();
- uint64_t current_size = 0;
+ uint64 current_size = 0;
int record_size = sizeof(TraceRecord) + m_block_size_bytes;
for (int i = 0; i < size; ++i) {
diff --git a/src/mem/ruby/system/CacheRecorder.hh b/src/mem/ruby/system/CacheRecorder.hh
index 44110cf9f..a4a7261f4 100644
--- a/src/mem/ruby/system/CacheRecorder.hh
+++ b/src/mem/ruby/system/CacheRecorder.hh
@@ -77,7 +77,7 @@ class CacheRecorder
void addRecord(int cntrl, Addr data_addr, Addr pc_addr,
RubyRequestType type, Tick time, DataBlock& data);
- uint64_t aggregateRecords(uint8_t **data, uint64_t size);
+ uint64 aggregateRecords(uint8_t** data, uint64 size);
/*!
* Function for flushing the memory contents of the caches to the
diff --git a/src/mem/ruby/system/RubySystem.py b/src/mem/ruby/system/RubySystem.py
index 9ffaa5702..81a9a181b 100644
--- a/src/mem/ruby/system/RubySystem.py
+++ b/src/mem/ruby/system/RubySystem.py
@@ -34,6 +34,7 @@ from SimpleMemory import *
class RubySystem(ClockedObject):
type = 'RubySystem'
cxx_header = "mem/ruby/system/System.hh"
+ random_seed = Param.Int(1234, "random seed used by the simulation");
randomization = Param.Bool(False,
"insert random delays on message enqueue times");
block_size_bytes = Param.UInt32(64,
@@ -41,13 +42,11 @@ class RubySystem(ClockedObject):
memory_size_bits = Param.UInt32(64,
"number of bits that a memory address requires");
- phys_mem = Param.SimpleMemory(NULL, "")
-
- access_backing_store = Param.Bool(False, "Use phys_mem as the functional \
- store and only use ruby for timing.")
-
# Profiler related configuration variables
hot_lines = Param.Bool(False, "")
all_instructions = Param.Bool(False, "")
num_of_sequencers = Param.Int("")
- number_of_virtual_networks = Param.Unsigned("")
+ phys_mem = Param.SimpleMemory(NULL, "")
+
+ access_backing_store = Param.Bool(False, "Use phys_mem as the functional \
+ store and only use ruby for timing.")
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 740db7d8d..305758798 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -317,27 +317,28 @@ Sequencer::removeRequest(SequencerRequest* srequest)
void
Sequencer::invalidateSC(Addr address)
{
- AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
- // The controller has lost the coherence permissions, hence the lock
- // on the cache line maintained by the cache should be cleared.
- if (e && e->isLocked(m_version)) {
- e->clearLocked();
+ RequestTable::iterator i = m_writeRequestTable.find(address);
+ if (i != m_writeRequestTable.end()) {
+ SequencerRequest* request = i->second;
+ // The controller has lost the coherence permissions, hence the lock
+ // on the cache line maintained by the cache should be cleared.
+ if (request->m_type == RubyRequestType_Store_Conditional) {
+ m_dataCache_ptr->clearLocked(address);
+ }
}
}
bool
Sequencer::handleLlsc(Addr address, SequencerRequest* request)
{
- AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
- if (!e)
- return true;
-
+ //
// The success flag indicates whether the LLSC operation was successful.
// LL ops will always succeed, but SC may fail if the cache line is no
// longer locked.
+ //
bool success = true;
if (request->m_type == RubyRequestType_Store_Conditional) {
- if (!e->isLocked(m_version)) {
+ if (!m_dataCache_ptr->isLocked(address, m_version)) {
//
// For failed SC requests, indicate the failure to the cpu by
// setting the extra data to zero.
@@ -354,18 +355,19 @@ Sequencer::handleLlsc(Addr address, SequencerRequest* request)
//
// Independent of success, all SC operations must clear the lock
//
- e->clearLocked();
+ m_dataCache_ptr->clearLocked(address);
} else if (request->m_type == RubyRequestType_Load_Linked) {
//
// Note: To fully follow Alpha LLSC semantics, should the LL clear any
// previously locked cache lines?
//
- e->setLocked(m_version);
- } else if (e->isLocked(m_version)) {
+ m_dataCache_ptr->setLocked(address, m_version);
+ } else if ((m_dataCache_ptr->isTagPresent(address)) &&
+ (m_dataCache_ptr->isLocked(address, m_version))) {
//
// Normal writes should clear the locked address
//
- e->clearLocked();
+ m_dataCache_ptr->clearLocked(address);
}
return success;
}
@@ -496,15 +498,19 @@ Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
const Cycles forwardRequestTime,
const Cycles firstResponseTime)
{
- warn_once("Replacement policy updates recently became the responsibility "
- "of SLICC state machines. Make sure to setMRU() near callbacks "
- "in .sm files!");
-
PacketPtr pkt = srequest->pkt;
Addr request_address(pkt->getAddr());
+ Addr request_line_address = makeLineAddress(pkt->getAddr());
RubyRequestType type = srequest->m_type;
Cycles issued_time = srequest->issue_time;
+ // Set this cache entry to the most recently used
+ if (type == RubyRequestType_IFETCH) {
+ m_instCache_ptr->setMRU(request_line_address);
+ } else {
+ m_dataCache_ptr->setMRU(request_line_address);
+ }
+
assert(curCycle() >= issued_time);
Cycles total_latency = curCycle() - issued_time;
diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc
index cb485a47b..c00082010 100644
--- a/src/mem/ruby/system/System.cc
+++ b/src/mem/ruby/system/System.cc
@@ -45,6 +45,7 @@
using namespace std;
+int RubySystem::m_random_seed;
bool RubySystem::m_randomization;
uint32_t RubySystem::m_block_size_bytes;
uint32_t RubySystem::m_block_size_bits;
@@ -59,6 +60,8 @@ RubySystem::RubySystem(const Params *p)
: ClockedObject(p), m_access_backing_store(p->access_backing_store),
m_cache_recorder(NULL)
{
+ m_random_seed = p->random_seed;
+ srandom(m_random_seed);
m_randomization = p->randomization;
m_block_size_bytes = p->block_size_bytes;
@@ -99,8 +102,8 @@ RubySystem::~RubySystem()
void
RubySystem::makeCacheRecorder(uint8_t *uncompressed_trace,
- uint64_t cache_trace_size,
- uint64_t block_size_bytes)
+ uint64 cache_trace_size,
+ uint64 block_size_bytes)
{
vector<Sequencer*> sequencer_map;
Sequencer* sequencer_ptr = NULL;
@@ -204,7 +207,7 @@ RubySystem::memWriteback()
void
RubySystem::writeCompressedTrace(uint8_t *raw_data, string filename,
- uint64_t uncompressed_trace_size)
+ uint64 uncompressed_trace_size)
{
// Create the checkpoint file for the memory
string thefile = CheckpointIn::dir() + "/" + filename.c_str();
@@ -237,7 +240,7 @@ RubySystem::serializeOld(CheckpointOut &cp)
// Store the cache-block size, so we are able to restore on systems with a
// different cache-block size. CacheRecorder depends on the correct
// cache-block size upon unserializing.
- uint64_t block_size_bytes = getBlockSizeBytes();
+ uint64 block_size_bytes = getBlockSizeBytes();
SERIALIZE_SCALAR(block_size_bytes);
// Check that there's a valid trace to use. If not, then memory won't be
@@ -249,7 +252,7 @@ RubySystem::serializeOld(CheckpointOut &cp)
// Aggregate the trace entries together into a single array
uint8_t *raw_data = new uint8_t[4096];
- uint64_t cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data,
+ uint64 cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data,
4096);
string cache_trace_file = name() + ".cache.gz";
writeCompressedTrace(raw_data, cache_trace_file, cache_trace_size);
@@ -264,7 +267,7 @@ RubySystem::serializeOld(CheckpointOut &cp)
void
RubySystem::readCompressedTrace(string filename, uint8_t *&raw_data,
- uint64_t &uncompressed_trace_size)
+ uint64& uncompressed_trace_size)
{
// Read the trace file
gzFile compressedTrace;
@@ -301,19 +304,11 @@ RubySystem::unserialize(CheckpointIn &cp)
// This value should be set to the checkpoint-system's block-size.
// Optional, as checkpoints without it can be run if the
// checkpoint-system's block-size == current block-size.
- uint64_t block_size_bytes = m_block_size_bytes;
+ uint64 block_size_bytes = getBlockSizeBytes();
UNSERIALIZE_OPT_SCALAR(block_size_bytes);
- if (block_size_bytes < m_block_size_bytes) {
- // Block sizes larger than when the trace was recorded are not
- // supported, as we cannot reliably turn accesses to smaller blocks
- // into larger ones.
- panic("Recorded cache block size (%d) < current block size (%d) !!",
- block_size_bytes, m_block_size_bytes);
- }
-
string cache_trace_file;
- uint64_t cache_trace_size = 0;
+ uint64 cache_trace_size = 0;
UNSERIALIZE_SCALAR(cache_trace_file);
UNSERIALIZE_SCALAR(cache_trace_size);
diff --git a/src/mem/ruby/system/System.hh b/src/mem/ruby/system/System.hh
index 70d216201..787e4f4ae 100644
--- a/src/mem/ruby/system/System.hh
+++ b/src/mem/ruby/system/System.hh
@@ -70,6 +70,7 @@ class RubySystem : public ClockedObject
~RubySystem();
// config accessors
+ static int getRandomSeed() { return m_random_seed; }
static int getRandomization() { return m_randomization; }
static uint32_t getBlockSizeBytes() { return m_block_size_bytes; }
static uint32_t getBlockSizeBits() { return m_block_size_bits; }
@@ -117,17 +118,18 @@ class RubySystem : public ClockedObject
RubySystem& operator=(const RubySystem& obj);
void makeCacheRecorder(uint8_t *uncompressed_trace,
- uint64_t cache_trace_size,
- uint64_t block_size_bytes);
+ uint64 cache_trace_size,
+ uint64 block_size_bytes);
void readCompressedTrace(std::string filename,
uint8_t *&raw_data,
- uint64_t &uncompressed_trace_size);
+ uint64& uncompressed_trace_size);
void writeCompressedTrace(uint8_t *raw_data, std::string file,
- uint64_t uncompressed_trace_size);
+ uint64 uncompressed_trace_size);
private:
// configuration parameters
+ static int m_random_seed;
static bool m_randomization;
static uint32_t m_block_size_bytes;
static uint32_t m_block_size_bits;
diff --git a/src/mem/slicc/ast/EnumDeclAST.py b/src/mem/slicc/ast/EnumDeclAST.py
index bc0c1c224..d97c13483 100644
--- a/src/mem/slicc/ast/EnumDeclAST.py
+++ b/src/mem/slicc/ast/EnumDeclAST.py
@@ -67,6 +67,6 @@ class EnumDeclAST(DeclAST):
pairs = { "external" : "yes" }
func = Func(self.symtab, func_id + "_" + t.c_ident,
func_id, self.location,
- self.symtab.find("std::string", Type), [ t ], [], [], "",
+ self.symtab.find("std::string", Type), [ t ], [], "",
pairs)
self.symtab.newSymbol(func)
diff --git a/src/mem/slicc/ast/FormalParamAST.py b/src/mem/slicc/ast/FormalParamAST.py
index ef39b40f0..ce73304f1 100644
--- a/src/mem/slicc/ast/FormalParamAST.py
+++ b/src/mem/slicc/ast/FormalParamAST.py
@@ -46,9 +46,6 @@ class FormalParamAST(AST):
def generate(self):
type = self.type_ast.type
param = "param_%s" % self.ident
- proto = ""
- body = ""
- default = False
# Add to symbol table
v = Var(self.symtab, self.ident, self.location, type, param,
@@ -59,21 +56,6 @@ class FormalParamAST(AST):
"interface" in type and (
type["interface"] == "AbstractCacheEntry" or
type["interface"] == "AbstractEntry")):
- proto = "%s* %s" % (type.c_ident, param)
- body = proto
- elif self.default != None:
- value = ""
- if self.default == True:
- value = "true"
- elif self.default == False:
- value = "false"
- else:
- value = "%s" % self.default
- proto = "const %s& %s = %s" % (type.c_ident, param, value)
- body = "const %s& %s" % (type.c_ident, param)
- default = True
+ return type, "%s* %s" % (type.c_ident, param)
else:
- proto = "const %s& %s" % (type.c_ident, param)
- body = proto
-
- return type, proto, body, default
+ return type, "const %s& %s" % (type.c_ident, param)
diff --git a/src/mem/slicc/ast/FuncCallExprAST.py b/src/mem/slicc/ast/FuncCallExprAST.py
index 0c9880d6d..9336a2297 100644
--- a/src/mem/slicc/ast/FuncCallExprAST.py
+++ b/src/mem/slicc/ast/FuncCallExprAST.py
@@ -93,7 +93,22 @@ class FuncCallExprAST(ExprAST):
if func is None:
self.error("Unrecognized function name: '%s'", func_name_args)
- cvec, type_vec = func.checkArguments(self.exprs)
+ if len(self.exprs) != len(func.param_types):
+ self.error("Wrong number of arguments passed to function : '%s'" +\
+ " Expected %d, got %d", self.proc_name,
+ len(func.param_types), len(self.exprs))
+
+ cvec = []
+ type_vec = []
+ for expr,expected_type in zip(self.exprs, func.param_types):
+ # Check the types of the parameter
+ actual_type,param_code = expr.inline(True)
+ if str(actual_type) != 'OOD' and \
+ str(actual_type) != str(expected_type):
+ expr.error("Type mismatch: expected: %s actual: %s" % \
+ (expected_type, actual_type))
+ cvec.append(param_code)
+ type_vec.append(expected_type)
# OK, the semantics of "trigger" here is that, ports in the
# machine have different priorities. We always check the first
diff --git a/src/mem/slicc/ast/FuncDeclAST.py b/src/mem/slicc/ast/FuncDeclAST.py
index 4e64c0ba5..47ae7076e 100644
--- a/src/mem/slicc/ast/FuncDeclAST.py
+++ b/src/mem/slicc/ast/FuncDeclAST.py
@@ -45,9 +45,7 @@ class FuncDeclAST(DeclAST):
def generate(self, parent = None):
types = []
- proto_params = []
- body_params = []
- default_count = 0
+ params = []
void_type = self.symtab.find("void", Type)
# Generate definition code
@@ -60,17 +58,13 @@ class FuncDeclAST(DeclAST):
for formal in self.formals:
# Lookup parameter types
try:
- type, proto, body, default = formal.generate()
+ type, ident = formal.generate()
types.append(type)
- proto_params.append(proto)
- body_params.append(body)
- if default:
- default_count += 1
+ params.append(ident)
except AttributeError:
types.append(formal.type)
- proto_params.append(None)
- body_params.append(None)
+ params.append(None)
body = self.slicc.codeFormatter()
if self.statements is None:
@@ -93,8 +87,7 @@ class FuncDeclAST(DeclAST):
machine = self.state_machine
func = Func(self.symtab, func_name_args, self.ident, self.location,
- return_type, types, proto_params,
- body_params, str(body), self.pairs, default_count)
+ return_type, types, params, str(body), self.pairs)
if parent is not None:
if not parent.addFunc(func):
diff --git a/src/mem/slicc/ast/InPortDeclAST.py b/src/mem/slicc/ast/InPortDeclAST.py
index 2ef043151..7a019a0e0 100644
--- a/src/mem/slicc/ast/InPortDeclAST.py
+++ b/src/mem/slicc/ast/InPortDeclAST.py
@@ -89,13 +89,13 @@ class InPortDeclAST(DeclAST):
for param in param_types:
trigger_func_name += "_" + param.ident
func = Func(self.symtab, trigger_func_name, "trigger", self.location,
- void_type, param_types, [], [], "", pairs)
+ void_type, param_types, [], "", pairs)
symtab.newSymbol(func)
# Add the stallPort method - this hacks reschedules the controller
# for stalled messages that don't trigger events
func = Func(self.symtab, "stallPort", "stallPort", self.location,
- void_type, [], [], [], "", pairs)
+ void_type, [], [], "", pairs)
symtab.newSymbol(func)
param_types = []
diff --git a/src/mem/slicc/ast/MethodCallExprAST.py b/src/mem/slicc/ast/MethodCallExprAST.py
index 104d6f8df..8be319a40 100644
--- a/src/mem/slicc/ast/MethodCallExprAST.py
+++ b/src/mem/slicc/ast/MethodCallExprAST.py
@@ -56,8 +56,20 @@ class MethodCallExprAST(ExprAST):
self.error("Invalid method call: Type '%s' does not have a method '%s'",
obj_type, methodId)
- func = obj_type.methods[methodId]
- func.checkArguments(self.expr_ast_vec)
+ if len(self.expr_ast_vec) != \
+ len(obj_type.methods[methodId].param_types):
+ # Right number of parameters
+ self.error("Wrong number of parameters for function name: '%s', " + \
+ "expected: , actual: ", proc_name,
+ len(obj_type.methods[methodId].param_types),
+ len(self.expr_ast_vec))
+
+ for actual_type, expected_type in \
+ zip(paramTypes, obj_type.methods[methodId].param_types):
+ if actual_type != expected_type and \
+ str(actual_type["interface"]) != str(expected_type):
+ self.error("Type mismatch: expected: %s actual: %s",
+ expected_type, actual_type)
# Return the return type of the method
return obj_type.methods[methodId].return_type
@@ -66,9 +78,10 @@ class MethodCallExprAST(ExprAST):
pass
class MemberMethodCallExprAST(MethodCallExprAST):
- def __init__(self, slicc, obj_expr_ast, func_call):
+ def __init__(self, slicc, obj_expr_ast, proc_name, expr_ast_vec):
s = super(MemberMethodCallExprAST, self)
- s.__init__(slicc, func_call.proc_name, func_call.exprs)
+ s.__init__(slicc, proc_name, expr_ast_vec)
+
self.obj_expr_ast = obj_expr_ast
def __repr__(self):
diff --git a/src/mem/slicc/ast/StateDeclAST.py b/src/mem/slicc/ast/StateDeclAST.py
index a33ea9245..f0a0b97d3 100644
--- a/src/mem/slicc/ast/StateDeclAST.py
+++ b/src/mem/slicc/ast/StateDeclAST.py
@@ -66,7 +66,7 @@ class StateDeclAST(DeclAST):
pairs = { "external" : "yes" }
func = Func(self.symtab, func_id + "_" +
t.ident, func_id, self.location,
- self.symtab.find("std::string", Type), [ t ], [], [], "",
+ self.symtab.find("std::string", Type), [ t ], [], "",
pairs)
self.symtab.newSymbol(func)
@@ -76,6 +76,6 @@ class StateDeclAST(DeclAST):
pairs = { "external" : "yes" }
func = Func(self.symtab, func_id + "_" +
t.ident, func_id, self.location,
- self.symtab.find("AccessPermission", Type), [ t ], [], [], "",
+ self.symtab.find("AccessPermission", Type), [ t ], [], "",
pairs)
self.symtab.newSymbol(func)
diff --git a/src/mem/slicc/parser.py b/src/mem/slicc/parser.py
index 07c067f68..0cbe9ea63 100644
--- a/src/mem/slicc/parser.py
+++ b/src/mem/slicc/parser.py
@@ -669,13 +669,15 @@ class SLICC(Grammar):
def p_expr__member_method_call(self, p):
"aexpr : aexpr DOT ident '(' exprs ')'"
- p[0] = ast.MemberMethodCallExprAST(self, p[1],
- ast.FuncCallExprAST(self, p[3], p[5]))
+ p[0] = ast.MemberMethodCallExprAST(self, p[1], p[3], p[5])
+
+ def p_expr__member_method_call_lookup(self, p):
+ "aexpr : aexpr '[' exprs ']'"
+ p[0] = ast.MemberMethodCallExprAST(self, p[1], "lookup", p[3])
def p_expr__class_method_call(self, p):
"aexpr : type DOUBLE_COLON ident '(' exprs ')'"
- p[0] = ast.ClassMethodCallExprAST(self, p[1],
- ast.FuncCallExprAST(self, p[3], p[5]))
+ p[0] = ast.ClassMethodCallExprAST(self, p[1], p[3], p[5])
def p_expr__aexpr(self, p):
"expr : aexpr"
diff --git a/src/mem/slicc/symbols/Func.py b/src/mem/slicc/symbols/Func.py
index 695450b9c..d50d0309f 100644
--- a/src/mem/slicc/symbols/Func.py
+++ b/src/mem/slicc/symbols/Func.py
@@ -30,19 +30,16 @@ from slicc.symbols.Type import Type
class Func(Symbol):
def __init__(self, table, ident, name, location, return_type, param_types,
- proto_param_strings, body_param_strings, body,
- pairs, default_count = 0):
+ param_strings, body, pairs):
super(Func, self).__init__(table, ident, location, pairs)
self.return_type = return_type
self.param_types = param_types
- self.proto_param_strings = proto_param_strings
- self.body_param_strings = body_param_strings
+ self.param_strings = param_strings
self.body = body
self.isInternalMachineFunc = False
self.c_ident = ident
self.c_name = name
self.class_name = ""
- self.default_count = default_count
def __repr__(self):
return ""
@@ -60,33 +57,11 @@ class Func(Symbol):
return_type += "*"
return "%s %s(%s);" % (return_type, self.c_name,
- ", ".join(self.proto_param_strings))
+ ", ".join(self.param_strings))
def writeCodeFiles(self, path, includes):
return
- def checkArguments(self, args):
- if len(args) + self.default_count < len(self.param_types) or \
- len(args) > len(self.param_types):
- self.error("Wrong number of arguments passed to function: '%s'" + \
- " Expected at least: %d, got: %d", self.c_ident,
- len(self.param_types) - self.default_count, len(args))
-
- cvec = []
- type_vec = []
- for expr,expected_type in zip(args, self.param_types):
- # Check the types of the parameter
- actual_type,param_code = expr.inline(True)
- if str(actual_type) != 'OOD' and \
- str(actual_type) != str(expected_type) and \
- str(actual_type["interface"]) != str(expected_type):
- expr.error("Type mismatch: expected: %s actual: %s" % \
- (expected_type, actual_type))
- cvec.append(param_code)
- type_vec.append(expected_type)
-
- return cvec, type_vec
-
def generateCode(self):
'''This write a function of object Chip'''
if "external" in self:
@@ -95,14 +70,14 @@ class Func(Symbol):
code = self.symtab.codeFormatter()
# Generate function header
- return_type = self.return_type.c_ident
void_type = self.symtab.find("void", Type)
+ return_type = self.return_type.c_ident
if "return_by_ref" in self and self.return_type != void_type:
return_type += "&"
if "return_by_pointer" in self and self.return_type != void_type:
return_type += "*"
- params = ', '.join(self.body_param_strings)
+ params = ', '.join(self.param_strings)
code('''
$return_type
diff --git a/src/mem/slicc/symbols/StateMachine.py b/src/mem/slicc/symbols/StateMachine.py
index 3dce3c3f2..03c78c8bf 100644
--- a/src/mem/slicc/symbols/StateMachine.py
+++ b/src/mem/slicc/symbols/StateMachine.py
@@ -320,9 +320,9 @@ class $c_ident : public AbstractController
void countTransition(${ident}_State state, ${ident}_Event event);
void possibleTransition(${ident}_State state, ${ident}_Event event);
- uint64_t getEventCount(${ident}_Event event);
+ uint64 getEventCount(${ident}_Event event);
bool isPossible(${ident}_State state, ${ident}_Event event);
- uint64_t getTransitionCount(${ident}_State state, ${ident}_Event event);
+ uint64 getTransitionCount(${ident}_State state, ${ident}_Event event);
private:
''')
@@ -802,7 +802,7 @@ $c_ident::possibleTransition(${ident}_State state,
m_possible[state][event] = true;
}
-uint64_t
+uint64
$c_ident::getEventCount(${ident}_Event event)
{
return m_event_counters[event];
@@ -814,7 +814,7 @@ $c_ident::isPossible(${ident}_State state, ${ident}_Event event)
return m_possible[state][event];
}
-uint64_t
+uint64
$c_ident::getTransitionCount(${ident}_State state,
${ident}_Event event)
{
@@ -1213,6 +1213,8 @@ TransitionResult result =
else:
code('doTransitionWorker(event, state, next_state, addr);')
+ port_to_buf_map, in_msg_bufs, msg_bufs = self.getBufferMaps(ident)
+
code('''
if (result == TransitionResult_Valid) {