summaryrefslogtreecommitdiff
path: root/src/mem/protocol
diff options
context:
space:
mode:
authorNilay Vaish <nilay@cs.wisc.edu>2015-08-19 10:02:01 -0500
committerNilay Vaish <nilay@cs.wisc.edu>2015-08-19 10:02:01 -0500
commit2f44dada688ace9c24f085a8422b3054c3edb72e (patch)
tree372bb043430552b0f4424eaa5571933883fcaaae /src/mem/protocol
parent2d9f3f8582e2de60850852c803a8c8ba0d6b91b5 (diff)
downloadgem5-2f44dada688ace9c24f085a8422b3054c3edb72e.tar.xz
ruby: reverts to changeset: bf82f1f7b040
Diffstat (limited to 'src/mem/protocol')
-rw-r--r--src/mem/protocol/MESI_Three_Level-L0cache.sm62
-rw-r--r--src/mem/protocol/MESI_Three_Level-L1cache.sm18
-rw-r--r--src/mem/protocol/MESI_Two_Level-L1cache.sm81
-rw-r--r--src/mem/protocol/MESI_Two_Level-L2cache.sm20
-rw-r--r--src/mem/protocol/MESI_Two_Level-dir.sm24
-rw-r--r--src/mem/protocol/MI_example-cache.sm20
-rw-r--r--src/mem/protocol/MI_example-dir.sm20
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm67
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L2cache.sm60
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dir.sm36
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dma.sm14
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm47
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dir.sm56
-rw-r--r--src/mem/protocol/MOESI_hammer-cache.sm65
-rw-r--r--src/mem/protocol/MOESI_hammer-dir.sm26
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm6
16 files changed, 272 insertions, 350 deletions
diff --git a/src/mem/protocol/MESI_Three_Level-L0cache.sm b/src/mem/protocol/MESI_Three_Level-L0cache.sm
index fb9e762da..8e44766ea 100644
--- a/src/mem/protocol/MESI_Three_Level-L0cache.sm
+++ b/src/mem/protocol/MESI_Three_Level-L0cache.sm
@@ -145,22 +145,22 @@ machine(L0Cache, "MESI Directory L0 Cache")
// inclusive cache returns L0 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
+ Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
if(is_valid(Dcache_entry)) {
return Dcache_entry;
}
- Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
+ Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
return Icache_entry;
}
Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
+ Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
return Dcache_entry;
}
Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
+ Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
return Icache_entry;
}
@@ -189,7 +189,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
return L0Cache_State_to_permission(tbe.TBEState);
@@ -206,7 +206,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -217,7 +217,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -260,7 +260,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
assert(in_msg.Dest == machineID);
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
@@ -301,7 +301,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
if (is_valid(Icache_entry)) {
// The tag matches for the L0, so the L0 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L0
@@ -309,19 +309,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
if (is_valid(Dcache_entry)) {
// The block is in the wrong L0, put the request on the queue to the shared L2
trigger(Event:L0_Replacement, in_msg.LineAddress,
- Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (Icache.cacheAvail(in_msg.LineAddress)) {
// L0 does't have the line, but we have space for it
// in the L0 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L0, so we need to make room in the L0
trigger(Event:L0_Replacement, Icache.cacheProbe(in_msg.LineAddress),
getICacheEntry(Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
@@ -331,7 +331,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
if (is_valid(Dcache_entry)) {
// The tag matches for the L0, so the L0 ask the L1 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L0
@@ -339,19 +339,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
if (is_valid(Icache_entry)) {
// The block is in the wrong L0, put the request on the queue to the private L1
trigger(Event:L0_Replacement, in_msg.LineAddress,
- Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ Icache_entry, TBEs[in_msg.LineAddress]);
}
if (Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L0 let's see if the L1 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L0
trigger(Event:L0_Replacement, Dcache.cacheProbe(in_msg.LineAddress),
getDCacheEntry(Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -459,38 +459,21 @@ machine(L0Cache, "MESI Directory L0 Cache")
}
}
- action(h_load_hit, "hd", desc="If not prefetch, notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
- action(h_ifetch_hit, "hi", desc="If not prefetch, notify sequencer the ifetch completed.") {
+ action(hx_load_hit, "hx", desc="If not prefetch, notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(hx_load_hit, "hxd", desc="notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, true);
- }
-
- action(hx_ifetch_hit, "hxi", desc="notify sequencer the ifetch completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, true);
}
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
@@ -498,7 +481,6 @@ machine(L0Cache, "MESI Directory L0 Cache")
action(hhx_store_hit, "\hx", desc="If not prefetch, notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, true);
cache_entry.Dirty := true;
}
@@ -507,7 +489,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
@@ -643,7 +625,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
}
transition({S,E,M}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
@@ -730,7 +712,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
transition(Inst_IS, Data, S) {
u_writeInstToCache;
- hx_ifetch_hit;
+ hx_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
@@ -738,7 +720,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
transition(Inst_IS, Data_Exclusive, E) {
u_writeInstToCache;
- hx_ifetch_hit;
+ hx_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
diff --git a/src/mem/protocol/MESI_Three_Level-L1cache.sm b/src/mem/protocol/MESI_Three_Level-L1cache.sm
index 9bab20def..6c8df8d75 100644
--- a/src/mem/protocol/MESI_Three_Level-L1cache.sm
+++ b/src/mem/protocol/MESI_Three_Level-L1cache.sm
@@ -161,7 +161,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// inclusive cache returns L1 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry cache_entry := static_cast(Entry, "pointer", cache.lookup(addr));
+ Entry cache_entry := static_cast(Entry, "pointer", cache[addr]);
return cache_entry;
}
@@ -186,7 +186,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
@@ -203,7 +203,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -214,7 +214,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -271,7 +271,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
@@ -307,7 +307,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:INV) {
if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
@@ -343,7 +343,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
if (messageBufferFromL0_in.isReady()) {
peek(messageBufferFromL0_in, CoherenceMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Class == CoherenceClass:INV_DATA) {
trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
@@ -363,7 +363,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// No room in the L1, so we need to make room in the L1
Entry victim_entry :=
getCacheEntry(cache.cacheProbe(in_msg.addr));
- TBE victim_tbe := TBEs.lookup(cache.cacheProbe(in_msg.addr));
+ TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.addr)];
if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
trigger(Event:L0_Invalidate_Own,
@@ -628,7 +628,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
diff --git a/src/mem/protocol/MESI_Two_Level-L1cache.sm b/src/mem/protocol/MESI_Two_Level-L1cache.sm
index f4978050d..184f735c7 100644
--- a/src/mem/protocol/MESI_Two_Level-L1cache.sm
+++ b/src/mem/protocol/MESI_Two_Level-L1cache.sm
@@ -164,22 +164,22 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// inclusive cache returns L1 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
if(is_valid(L1Dcache_entry)) {
return L1Dcache_entry;
}
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
return L1Icache_entry;
}
Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
return L1Dcache_entry;
}
Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
return L1Icache_entry;
}
@@ -208,7 +208,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
@@ -225,7 +225,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -236,7 +236,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -305,7 +305,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// cache. We should drop this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
// Check to see if it is in the OTHER L1
@@ -315,7 +315,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
@@ -323,13 +323,13 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// in the L1 so let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
} else {
// Data prefetch
@@ -339,7 +339,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// cache. We should drop this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
// Check to see if it is in the OTHER L1
@@ -349,7 +349,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
@@ -357,13 +357,13 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// the L1 let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -377,7 +377,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
@@ -417,7 +417,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
@@ -450,7 +450,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
if (is_valid(L1Icache_entry)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L1
@@ -458,19 +458,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 so let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
@@ -480,7 +480,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
if (is_valid(L1Dcache_entry)) {
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L1
@@ -488,19 +488,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -809,47 +809,36 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
sequencer.invalidateSC(address);
}
- action(h_load_hit, "hd",
- desc="Notify sequencer the load completed.")
+ action(h_load_hit, "h",
+ desc="If not prefetch, notify sequencer the load completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
- action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
+ action(hx_load_hit, "hx",
+ desc="If not prefetch, notify sequencer the load completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
- {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.readCallback(address, cache_entry.DataBlk, true);
}
- action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
+ action(hh_store_hit, "\h",
+ desc="If not prefetch, notify sequencer that store completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
- action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
+ action(hhx_store_hit, "\hx",
+ desc="If not prefetch, notify sequencer that store completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.writeCallback(address, cache_entry.DataBlk, true);
cache_entry.Dirty := true;
}
@@ -858,7 +847,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.isPrefetch := false;
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
@@ -1091,7 +1080,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
transition({S,E,M}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
diff --git a/src/mem/protocol/MESI_Two_Level-L2cache.sm b/src/mem/protocol/MESI_Two_Level-L2cache.sm
index 739a6f713..e4f719d9f 100644
--- a/src/mem/protocol/MESI_Two_Level-L2cache.sm
+++ b/src/mem/protocol/MESI_Two_Level-L2cache.sm
@@ -157,7 +157,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
// inclusive cache, returns L2 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache.lookup(addr));
+ return static_cast(Entry, "pointer", L2cache[addr]);
}
bool isSharer(Addr addr, MachineID requestor, Entry cache_entry) {
@@ -196,7 +196,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
return L2Cache_State_to_permission(tbe.TBEState);
@@ -213,7 +213,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -224,7 +224,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -288,7 +288,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
if(L1unblockNetwork_in.isReady()) {
peek(L1unblockNetwork_in, ResponseMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
in_msg.Sender, in_msg.Type, in_msg.Destination);
@@ -312,7 +312,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
// test wether it's from a local L1 or an off chip source
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
if(in_msg.Type == CoherenceResponseType:DATA) {
@@ -351,7 +351,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
if(L1RequestL2Network_in.isReady()) {
peek(L1RequestL2Network_in, RequestMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
@@ -376,10 +376,10 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.addr));
if (isDirty(L2cache_entry)) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
- L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
} else {
trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.addr),
- L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
}
}
}
@@ -591,7 +591,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.L1_GetS_IDs.clear();
tbe.DataBlk := cache_entry.DataBlk;
tbe.Dirty := cache_entry.Dirty;
diff --git a/src/mem/protocol/MESI_Two_Level-dir.sm b/src/mem/protocol/MESI_Two_Level-dir.sm
index 6c5c84f2f..22aabee4e 100644
--- a/src/mem/protocol/MESI_Two_Level-dir.sm
+++ b/src/mem/protocol/MESI_Two_Level-dir.sm
@@ -101,7 +101,7 @@ machine(Directory, "MESI Two Level directory protocol")
void wakeUpBuffers(Addr a);
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
@@ -133,7 +133,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
return Directory_State_to_permission(tbe.TBEState);
@@ -149,7 +149,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -160,7 +160,7 @@ machine(Directory, "MESI Two Level directory protocol")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -194,13 +194,13 @@ machine(Directory, "MESI Two Level directory protocol")
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
- trigger(Event:Fetch, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg);
error("Invalid message");
@@ -214,9 +214,9 @@ machine(Directory, "MESI Two Level directory protocol")
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:CleanReplacement, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -230,9 +230,9 @@ machine(Directory, "MESI Two Level directory protocol")
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -390,7 +390,7 @@ machine(Directory, "MESI Two Level directory protocol")
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(requestNetwork_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.addr;
tbe.Len := in_msg.Len;
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
index d247ce663..3380cd7e6 100644
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -152,7 +152,7 @@ machine(L1Cache, "MI Example L1 Cache")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
@@ -172,7 +172,7 @@ machine(L1Cache, "MI Example L1 Cache")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -183,7 +183,7 @@ machine(L1Cache, "MI Example L1 Cache")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -205,7 +205,7 @@ machine(L1Cache, "MI Example L1 Cache")
peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
@@ -231,7 +231,7 @@ machine(L1Cache, "MI Example L1 Cache")
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
@@ -254,11 +254,11 @@ machine(L1Cache, "MI Example L1 Cache")
// make room for the block
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(cacheMemory.cacheProbe(in_msg.LineAddress)));
+ TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
}
else {
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- cache_entry, TBEs.lookup(in_msg.LineAddress));
+ cache_entry, TBEs[in_msg.LineAddress]);
}
}
}
@@ -353,7 +353,6 @@ machine(L1Cache, "MI Example L1 Cache")
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, false);
}
@@ -361,7 +360,6 @@ machine(L1Cache, "MI Example L1 Cache")
peek(responseNetwork_in, ResponseMsg) {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender));
}
@@ -370,7 +368,6 @@ machine(L1Cache, "MI Example L1 Cache")
action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, false);
}
@@ -378,7 +375,6 @@ machine(L1Cache, "MI Example L1 Cache")
peek(responseNetwork_in, ResponseMsg) {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender));
}
@@ -400,7 +396,7 @@ machine(L1Cache, "MI Example L1 Cache")
action(v_allocateTBE, "v", desc="Allocate TBE") {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
}
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm
index c9f6b9be6..a22691bda 100644
--- a/src/mem/protocol/MI_example-dir.sm
+++ b/src/mem/protocol/MI_example-dir.sm
@@ -111,7 +111,7 @@ machine(Directory, "Directory protocol")
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
@@ -155,7 +155,7 @@ machine(Directory, "Directory protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
@@ -174,7 +174,7 @@ machine(Directory, "Directory protocol")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -185,7 +185,7 @@ machine(Directory, "Directory protocol")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -207,7 +207,7 @@ machine(Directory, "Directory protocol")
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
} else if (in_msg.Type == DMARequestType:WRITE) {
@@ -222,7 +222,7 @@ machine(Directory, "Directory protocol")
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.addr, tbe);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
@@ -245,7 +245,7 @@ machine(Directory, "Directory protocol")
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, tbe);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
@@ -403,7 +403,7 @@ machine(Directory, "Directory protocol")
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
@@ -414,7 +414,7 @@ machine(Directory, "Directory protocol")
action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DmaRequestor := in_msg.Requestor;
}
}
@@ -422,7 +422,7 @@ machine(Directory, "Directory protocol")
action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
peek(requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
}
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
index 7a8f35333..8a2eee1e2 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -190,7 +190,7 @@ machine(L1Cache, "Directory protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
@@ -217,7 +217,7 @@ machine(L1Cache, "Directory protocol")
if(is_valid(cache_entry)) {
testAndRead(addr, cache_entry.DataBlk, pkt);
} else {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -236,7 +236,7 @@ machine(L1Cache, "Directory protocol")
return num_functional_writes;
}
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
return num_functional_writes;
@@ -269,7 +269,7 @@ machine(L1Cache, "Directory protocol")
if (useTimerTable_in.isReady()) {
trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
getCacheEntry(useTimerTable.readyAddress()),
- TBEs.lookup(useTimerTable.readyAddress()));
+ TBEs[useTimerTable.readyAddress()]);
}
}
@@ -279,7 +279,7 @@ machine(L1Cache, "Directory protocol")
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -299,29 +299,29 @@ machine(L1Cache, "Directory protocol")
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
trigger(Event:Own_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
trigger(Event:Fwd_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:Fwd_DMA, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Writeback_Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
trigger(Event:Writeback_Ack_Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -335,13 +335,13 @@ machine(L1Cache, "Directory protocol")
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Exclusive_Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -365,7 +365,7 @@ machine(L1Cache, "Directory protocol")
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
@@ -373,19 +373,19 @@ machine(L1Cache, "Directory protocol")
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
@@ -396,7 +396,7 @@ machine(L1Cache, "Directory protocol")
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
@@ -404,19 +404,19 @@ machine(L1Cache, "Directory protocol")
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -635,32 +635,21 @@ machine(L1Cache, "Directory protocol")
}
}
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(h_ifetch_hit, "hi", desc="Notify the sequencer about ifetch completion.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
action(hx_load_hit, "hx", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.readCallback(address, cache_entry.DataBlk, true);
}
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
@@ -668,8 +657,6 @@ machine(L1Cache, "Directory protocol")
action(xx_store_hit, "\xx", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.writeCallback(address, cache_entry.DataBlk, true);
cache_entry.Dirty := true;
}
@@ -677,7 +664,7 @@ machine(L1Cache, "Directory protocol")
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
assert(is_valid(cache_entry));
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
tbe.Dirty := cache_entry.Dirty;
@@ -977,7 +964,7 @@ machine(L1Cache, "Directory protocol")
}
transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
index e1d665292..38c6e9f9b 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
@@ -232,7 +232,7 @@ machine(L2Cache, "Token protocol")
void unset_tbe();
Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache.lookup(address));
+ return static_cast(Entry, "pointer", L2cache[address]);
}
bool isDirTagPresent(Addr addr) {
@@ -519,7 +519,7 @@ machine(L2Cache, "Token protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
return L2Cache_State_to_permission(tbe.TBEState);
@@ -542,7 +542,7 @@ machine(L2Cache, "Token protocol")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -553,7 +553,7 @@ machine(L2Cache, "Token protocol")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -582,7 +582,7 @@ machine(L2Cache, "Token protocol")
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -598,26 +598,26 @@ machine(L2Cache, "Token protocol")
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
trigger(Event:Fwd_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if(in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:Fwd_DMA, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Writeback_Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -631,25 +631,25 @@ machine(L2Cache, "Token protocol")
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:L1_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:L1_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
trigger(Event:L1_PUTO, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
trigger(Event:L1_PUTX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTS) {
Entry cache_entry := getCacheEntry(in_msg.addr);
if (isOnlySharer(cache_entry, in_msg.addr, in_msg.Requestor)) {
trigger(Event:L1_PUTS_only, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
else {
trigger(Event:L1_PUTS, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
} else {
error("Unexpected message");
@@ -667,35 +667,35 @@ machine(L2Cache, "Token protocol")
if (in_msg.Type == CoherenceResponseType:ACK) {
if (in_msg.SenderMachine == MachineType:L2Cache) {
trigger(Event:ExtAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
else {
trigger(Event:IntAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
trigger(Event:Unblock, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
Entry cache_entry := getCacheEntry(in_msg.addr);
if (is_invalid(cache_entry) &&
L2cache.cacheAvail(in_msg.addr) == false) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
- TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ TBEs[L2cache.cacheProbe(in_msg.addr)]);
}
else {
trigger(Event:L1_WBDIRTYDATA, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA) {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -703,15 +703,15 @@ machine(L2Cache, "Token protocol")
L2cache.cacheAvail(in_msg.addr) == false) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
- TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ TBEs[L2cache.cacheProbe(in_msg.addr)]);
}
else {
trigger(Event:L1_WBCLEANDATA, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DmaAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -1223,7 +1223,7 @@ machine(L2Cache, "Token protocol")
action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
if(is_valid(cache_entry)) {
tbe.DataBlk := cache_entry.DataBlk;
tbe.Dirty := cache_entry.Dirty;
diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm
index ba58a6e9a..dcd37cc33 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm
@@ -122,7 +122,7 @@ machine(Directory, "Directory protocol")
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
@@ -234,26 +234,26 @@ machine(Directory, "Directory protocol")
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
trigger(Event:Last_Unblock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
trigger(Event:Unblock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
trigger(Event:Dirty_Writeback, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
trigger(Event:Clean_Writeback, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_ACK, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
error("Invalid message");
}
@@ -265,21 +265,21 @@ machine(Directory, "Directory protocol")
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
- trigger(Event:PUTX, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
- trigger(Event:PUTO, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
- trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else {
error("Invalid message");
}
@@ -292,9 +292,9 @@ machine(Directory, "Directory protocol")
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -540,7 +540,7 @@ machine(Directory, "Directory protocol")
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
peek (requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.PhysicalAddress := in_msg.addr;
tbe.Len := in_msg.Len;
tbe.DataBlk := in_msg.DataBlk;
diff --git a/src/mem/protocol/MOESI_CMP_directory-dma.sm b/src/mem/protocol/MOESI_CMP_directory-dma.sm
index 75c621243..e9931f25b 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dma.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dma.sm
@@ -108,10 +108,10 @@ machine(DMA, "DMA Controller")
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
error("Invalid request type");
}
@@ -124,14 +124,14 @@ machine(DMA, "DMA Controller")
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else {
error("Invalid response type");
}
@@ -144,7 +144,7 @@ machine(DMA, "DMA Controller")
if (triggerQueue_in.isReady()) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
- trigger(Event:All_Acks, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
@@ -240,7 +240,7 @@ machine(DMA, "DMA Controller")
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
}
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
index 1d47f1c8a..af6e4c0d5 100644
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -366,7 +366,7 @@ machine(L1Cache, "Token protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := L1_TBEs.lookup(addr);
+ TBE tbe := L1_TBEs[addr];
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
@@ -459,7 +459,7 @@ machine(L1Cache, "Token protocol")
// Use Timer
in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
if (useTimerTable_in.isReady()) {
- TBE tbe := L1_TBEs.lookup(useTimerTable.readyAddress());
+ TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
(persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
@@ -487,7 +487,7 @@ machine(L1Cache, "Token protocol")
if (reissueTimerTable_in.isReady()) {
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
getCacheEntry(reissueTimerTable.readyAddress()),
- L1_TBEs.lookup(reissueTimerTable.readyAddress()));
+ L1_TBEs[reissueTimerTable.readyAddress()]);
}
}
@@ -510,7 +510,7 @@ machine(L1Cache, "Token protocol")
// React to the message based on the current state of the table
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs.lookup(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
if (persistentTable.isLocked(in_msg.addr)) {
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
@@ -548,7 +548,7 @@ machine(L1Cache, "Token protocol")
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs.lookup(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
// Mark TBE flag if response received off-chip. Use this to update average latency estimate
if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
@@ -559,7 +559,7 @@ machine(L1Cache, "Token protocol")
// came from an off-chip L2 cache
if (is_valid(tbe)) {
- // L1_TBEs.lookup(in_msg.addr).ExternalResponse := true;
+ // L1_TBEs[in_msg.addr].ExternalResponse := true;
// profile_offchipL2_response(in_msg.addr);
}
}
@@ -619,7 +619,7 @@ machine(L1Cache, "Token protocol")
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs.lookup(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETX) {
if (in_msg.isLocal) {
@@ -665,7 +665,7 @@ machine(L1Cache, "Token protocol")
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
- TBE tbe := L1_TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := L1_TBEs[in_msg.LineAddress];
if (in_msg.Type == RubyRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
@@ -695,7 +695,7 @@ machine(L1Cache, "Token protocol")
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- L1_TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
@@ -726,7 +726,7 @@ machine(L1Cache, "Token protocol")
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- L1_TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -1284,22 +1284,12 @@ machine(L1Cache, "Token protocol")
}
}
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
- address, cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false,
- MachineType:L1Cache);
- }
-
- action(h_ifetch_hit, "hi", desc="Notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, false,
MachineType:L1Cache);
}
@@ -1309,8 +1299,6 @@ machine(L1Cache, "Token protocol")
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
peek(responseNetwork_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.readCallback(address, cache_entry.DataBlk,
isExternalHit(address, in_msg.Sender),
machineIDToMachineType(in_msg.Sender));
@@ -1322,7 +1310,6 @@ machine(L1Cache, "Token protocol")
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, false,
MachineType:L1Cache);
cache_entry.Dirty := true;
@@ -1334,8 +1321,6 @@ machine(L1Cache, "Token protocol")
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
peek(responseNetwork_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.writeCallback(address, cache_entry.DataBlk,
isExternalHit(address, in_msg.Sender),
machineIDToMachineType(in_msg.Sender));
@@ -1347,7 +1332,7 @@ machine(L1Cache, "Token protocol")
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(L1_TBEs);
L1_TBEs.allocate(address);
- set_tbe(L1_TBEs.lookup(address));
+ set_tbe(L1_TBEs[address]);
tbe.IssueCount := 0;
peek(mandatoryQueue_in, RubyRequest) {
tbe.PC := in_msg.ProgramCounter;
@@ -1717,7 +1702,7 @@ machine(L1Cache, "Token protocol")
}
transition({S, SM, S_L, SM_L}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
@@ -1799,7 +1784,7 @@ machine(L1Cache, "Token protocol")
// Transitions from Owned
transition({O, OM}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
@@ -1889,7 +1874,7 @@ machine(L1Cache, "Token protocol")
// Transitions from Modified
transition({MM, MM_W}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
@@ -1964,7 +1949,7 @@ machine(L1Cache, "Token protocol")
// Transitions from Dirty Exclusive
transition({M, M_W}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm
index fd6a62ef2..fdef75181 100644
--- a/src/mem/protocol/MOESI_CMP_token-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dir.sm
@@ -175,7 +175,7 @@ machine(Directory, "Token protocol")
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
@@ -218,7 +218,7 @@ machine(Directory, "Token protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
@@ -245,7 +245,7 @@ machine(Directory, "Token protocol")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -256,7 +256,7 @@ machine(Directory, "Token protocol")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -280,9 +280,9 @@ machine(Directory, "Token protocol")
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -295,7 +295,7 @@ machine(Directory, "Token protocol")
in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
if (reissueTimerTable_in.isReady()) {
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
- TBEs.lookup(reissueTimerTable.readyAddress()));
+ TBEs[reissueTimerTable.readyAddress()]);
}
}
@@ -307,13 +307,13 @@ machine(Directory, "Token protocol")
if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
trigger(Event:Data_All_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack_All_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -321,14 +321,14 @@ machine(Directory, "Token protocol")
} else {
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
trigger(Event:Data_Owner, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if ((in_msg.Type == CoherenceResponseType:ACK) ||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
trigger(Event:Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
@@ -360,38 +360,38 @@ machine(Directory, "Token protocol")
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
}
} else {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
}
} else {
// unlocked
- trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
}
}
else {
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
}
} else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
// unlocked
- trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
} else {
error("Invalid message");
}
@@ -405,9 +405,9 @@ machine(Directory, "Token protocol")
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
} else {
error("Invalid message");
}
@@ -419,14 +419,14 @@ machine(Directory, "Token protocol")
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
if (in_msg.Type == DMARequestType:READ) {
- trigger(Event:DMA_READ, in_msg.LineAddress, TBEs.lookup(in_msg.LineAddress));
+ trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
} else if (in_msg.Type == DMARequestType:WRITE) {
if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
trigger(Event:DMA_WRITE, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
}
} else {
error("Invalid message");
@@ -691,7 +691,7 @@ machine(Directory, "Token protocol")
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
diff --git a/src/mem/protocol/MOESI_hammer-cache.sm b/src/mem/protocol/MOESI_hammer-cache.sm
index 269e47dfd..d5539e021 100644
--- a/src/mem/protocol/MOESI_hammer-cache.sm
+++ b/src/mem/protocol/MOESI_hammer-cache.sm
@@ -210,7 +210,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
if(is_valid(cache_entry)) {
testAndRead(addr, cache_entry.DataBlk, pkt);
} else {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -229,7 +229,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
return num_functional_writes;
}
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
return num_functional_writes;
@@ -274,7 +274,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
@@ -337,7 +337,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
peek(triggerQueue_in, TriggerMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == TriggerType:L2_to_L1) {
trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
@@ -360,7 +360,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
peek(responseToCache_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
@@ -385,7 +385,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
peek(forwardToCache_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if ((in_msg.Type == CoherenceRequestType:GETX) ||
(in_msg.Type == CoherenceRequestType:GETF)) {
@@ -429,7 +429,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == RubyRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
@@ -452,7 +452,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
@@ -477,14 +477,14 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
trigger(Event:L1_to_L2,
l1i_victim_addr,
getL1ICacheEntry(l1i_victim_addr),
- TBEs.lookup(l1i_victim_addr));
+ TBEs[l1i_victim_addr]);
} else {
Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
// The L2 does not have room, so we replace a line from the L2
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
}
@@ -510,7 +510,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
@@ -534,14 +534,14 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
trigger(Event:L1_to_L2,
l1d_victim_addr,
getL1DCacheEntry(l1d_victim_addr),
- TBEs.lookup(l1d_victim_addr));
+ TBEs[l1d_victim_addr]);
} else {
Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
// The L2 does not have room, so we replace a line from the L2
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
}
@@ -857,18 +857,9 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
}
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false,
- testAndClearLocalHit(cache_entry));
- }
-
- action(h_ifetch_hit, "hi", desc="Notify sequencer the ifetch completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, false,
testAndClearLocalHit(cache_entry));
}
@@ -878,8 +869,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
assert(is_valid(tbe));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
peek(responseToCache_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
+
sequencer.readCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
tbe.ForwardRequestTime, tbe.FirstResponseTime);
@@ -890,7 +880,6 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
peek(mandatoryQueue_in, RubyRequest) {
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, false,
testAndClearLocalHit(cache_entry));
@@ -912,8 +901,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
assert(is_valid(tbe));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
peek(responseToCache_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
+
sequencer.writeCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
tbe.ForwardRequestTime, tbe.FirstResponseTime);
@@ -926,8 +914,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
assert(is_valid(cache_entry));
assert(is_valid(tbe));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
+
sequencer.writeCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
tbe.ForwardRequestTime, tbe.FirstResponseTime);
@@ -939,7 +926,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
tbe.Dirty := cache_entry.Dirty;
tbe.Sharers := false;
@@ -948,7 +935,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
action(it_allocateTBE, "it", desc="Allocate TBE") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.Dirty := false;
tbe.Sharers := false;
}
@@ -1521,7 +1508,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition({S, SM, ISM}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstHit;
k_popMandatoryQueue;
}
@@ -1535,7 +1522,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition(SR, Ifetch, S) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
@@ -1583,7 +1570,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstHit;
k_popMandatoryQueue;
}
@@ -1597,7 +1584,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition(OR, Ifetch, O) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
@@ -1648,7 +1635,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
// Transitions from Modified
transition({MM, M}, {Ifetch}) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstHit;
k_popMandatoryQueue;
}
@@ -1674,7 +1661,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition(MMR, Ifetch, MM) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
@@ -1755,7 +1742,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
transition(MR, Ifetch, M) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
diff --git a/src/mem/protocol/MOESI_hammer-dir.sm b/src/mem/protocol/MOESI_hammer-dir.sm
index b78d40510..27794a3bd 100644
--- a/src/mem/protocol/MOESI_hammer-dir.sm
+++ b/src/mem/protocol/MOESI_hammer-dir.sm
@@ -195,7 +195,7 @@ machine(Directory, "AMD Hammer-like protocol")
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
@@ -250,7 +250,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
@@ -267,7 +267,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
@@ -278,7 +278,7 @@ machine(Directory, "AMD Hammer-like protocol")
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
@@ -317,7 +317,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (triggerQueue_in.isReady()) {
peek(triggerQueue_in, TriggerMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks_and_owner_data, in_msg.addr,
pf_entry, tbe);
@@ -341,7 +341,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (unblockNetwork_in.isReady()) {
peek(unblockNetwork_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
trigger(Event:Unblock, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
@@ -370,7 +370,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (responseToDir_in.isReady()) {
peek(responseToDir_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
@@ -393,7 +393,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
@@ -410,7 +410,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:PUT) {
trigger(Event:PUT, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:PUTF) {
@@ -428,7 +428,7 @@ machine(Directory, "AMD Hammer-like protocol")
trigger(Event:Pf_Replacement,
probeFilter.cacheProbe(in_msg.addr),
getProbeFilterEntry(probeFilter.cacheProbe(in_msg.addr)),
- TBEs.lookup(probeFilter.cacheProbe(in_msg.addr)));
+ TBEs[probeFilter.cacheProbe(in_msg.addr)]);
}
}
} else {
@@ -444,7 +444,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
} else if (in_msg.Type == DMARequestType:WRITE) {
@@ -567,7 +567,7 @@ machine(Directory, "AMD Hammer-like protocol")
check_allocate(TBEs);
peek(requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.PhysicalAddress := address;
tbe.ResponseType := CoherenceResponseType:NULL;
}
@@ -577,7 +577,7 @@ machine(Directory, "AMD Hammer-like protocol")
check_allocate(TBEs);
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DmaDataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
index 63f4b90ea..d032adfd8 100644
--- a/src/mem/protocol/RubySlicc_Types.sm
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -50,10 +50,7 @@ structure(InPort, external = "yes", primitive="yes") {
}
external_type(NodeID, default="0", primitive="yes");
-structure (MachineID, external = "yes", non_obj="yes") {
- MachineType getType();
- NodeID getNum();
-}
+external_type(MachineID);
structure (Set, external = "yes", non_obj="yes") {
void setSize(int);
@@ -159,7 +156,6 @@ structure (CacheMemory, external = "yes") {
Cycles getTagLatency();
Cycles getDataLatency();
void setMRU(Addr);
- void setMRU(AbstractCacheEntry);
void recordRequestType(CacheRequestType, Addr);
bool checkResourceAvailable(CacheResourceType, Addr);