summaryrefslogtreecommitdiff
path: root/src/mem/protocol/MOESI_CMP_token-L2cache.sm
diff options
context:
space:
mode:
authorNilay Vaish <nilay@cs.wisc.edu>2011-01-17 18:46:16 -0600
committerNilay Vaish <nilay@cs.wisc.edu>2011-01-17 18:46:16 -0600
commitc82a8979a3909037a1654fc66cb215b5bacadb08 (patch)
treeb0b51c589c665812df1ec8eb1c40adfc98877f08 /src/mem/protocol/MOESI_CMP_token-L2cache.sm
parent6fb521faba37a47ebce2aebb08ac34bd69d29f13 (diff)
downloadgem5-c82a8979a3909037a1654fc66cb215b5bacadb08.tar.xz
Change interface between coherence protocols and CacheMemory
The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
Diffstat (limited to 'src/mem/protocol/MOESI_CMP_token-L2cache.sm')
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L2cache.sm325
1 files changed, 171 insertions, 154 deletions
diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
index 3541da41b..c23f98f9a 100644
--- a/src/mem/protocol/MOESI_CMP_token-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
@@ -148,35 +148,25 @@ machine(L2Cache, "Token protocol")
PersistentTable persistentTable;
PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
- Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
- if (L2cacheMemory.isTagPresent(addr)) {
- return static_cast(Entry, L2cacheMemory[addr]);
- }
- assert(false);
- return static_cast(Entry, L2cacheMemory[addr]);
- }
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
- int getTokens(Address addr) {
- if (L2cacheMemory.isTagPresent(addr)) {
- return getL2CacheEntry(addr).Tokens;
+ Entry getCacheEntry(Address address), return_by_pointer="yes" {
+ Entry cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
+ return cache_entry;
+ }
+
+ int getTokens(Entry cache_entry) {
+ if (is_valid(cache_entry)) {
+ return cache_entry.Tokens;
} else {
return 0;
}
}
- void changePermission(Address addr, AccessPermission permission) {
- if (L2cacheMemory.isTagPresent(addr)) {
- return L2cacheMemory.changePermission(addr, permission);
- }
- }
-
- bool isCacheTagPresent(Address addr) {
- return (L2cacheMemory.isTagPresent(addr) );
- }
-
- State getState(Address addr) {
- if (isCacheTagPresent(addr)) {
- return getL2CacheEntry(addr).CacheState;
+ State getState(Entry cache_entry, Address addr) {
+ if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
} else if (persistentTable.isLocked(addr) == true) {
return State:I_L;
} else {
@@ -184,57 +174,50 @@ machine(L2Cache, "Token protocol")
}
}
- std::string getStateStr(Address addr) {
- return L2Cache_State_to_string(getState(addr));
- }
-
- void setState(Address addr, State state) {
-
+ void setState(Entry cache_entry, Address addr, State state) {
- if (isCacheTagPresent(addr)) {
+ if (is_valid(cache_entry)) {
// Make sure the token count is in range
- assert(getL2CacheEntry(addr).Tokens >= 0);
- assert(getL2CacheEntry(addr).Tokens <= max_tokens());
- assert(getL2CacheEntry(addr).Tokens != (max_tokens() / 2));
+ assert(cache_entry.Tokens >= 0);
+ assert(cache_entry.Tokens <= max_tokens());
+ assert(cache_entry.Tokens != (max_tokens() / 2));
// Make sure we have no tokens in L
if ((state == State:I_L) ) {
- if (isCacheTagPresent(addr)) {
- assert(getL2CacheEntry(addr).Tokens == 0);
- }
+ assert(cache_entry.Tokens == 0);
}
// in M and E you have all the tokens
if (state == State:M ) {
- assert(getL2CacheEntry(addr).Tokens == max_tokens());
+ assert(cache_entry.Tokens == max_tokens());
}
// in NP you have no tokens
if (state == State:NP) {
- assert(getL2CacheEntry(addr).Tokens == 0);
+ assert(cache_entry.Tokens == 0);
}
// You have at least one token in S-like states
if (state == State:S ) {
- assert(getL2CacheEntry(addr).Tokens > 0);
+ assert(cache_entry.Tokens > 0);
}
// You have at least half the token in O-like states
if (state == State:O ) {
- assert(getL2CacheEntry(addr).Tokens > (max_tokens() / 2));
+ assert(cache_entry.Tokens > (max_tokens() / 2));
}
- getL2CacheEntry(addr).CacheState := state;
+ cache_entry.CacheState := state;
// Set permission
if (state == State:I) {
- changePermission(addr, AccessPermission:Invalid);
+ cache_entry.changePermission(AccessPermission:Invalid);
} else if (state == State:S || state == State:O ) {
- changePermission(addr, AccessPermission:Read_Only);
+ cache_entry.changePermission(AccessPermission:Read_Only);
} else if (state == State:M ) {
- changePermission(addr, AccessPermission:Read_Write);
+ cache_entry.changePermission(AccessPermission:Read_Write);
} else {
- changePermission(addr, AccessPermission:Invalid);
+ cache_entry.changePermission(AccessPermission:Invalid);
}
}
}
@@ -341,22 +324,24 @@ machine(L2Cache, "Token protocol")
error("Unexpected message");
}
+ Entry cache_entry := getCacheEntry(in_msg.Address);
// React to the message based on the current state of the table
if (persistentTable.isLocked(in_msg.Address)) {
if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
- if (getTokens(in_msg.Address) == 1 ||
- getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
- trigger(Event:Persistent_GETS_Last_Token, in_msg.Address);
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ trigger(Event:Persistent_GETS_Last_Token, in_msg.Address,
+ cache_entry);
} else {
- trigger(Event:Persistent_GETS, in_msg.Address);
+ trigger(Event:Persistent_GETS, in_msg.Address, cache_entry);
}
} else {
- trigger(Event:Persistent_GETX, in_msg.Address);
+ trigger(Event:Persistent_GETX, in_msg.Address, cache_entry);
}
}
else {
- trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
+ trigger(Event:Own_Lock_or_Unlock, in_msg.Address, cache_entry);
}
}
}
@@ -369,14 +354,16 @@ machine(L2Cache, "Token protocol")
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.Address);
if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:Transient_GETX, in_msg.Address);
+ trigger(Event:Transient_GETX, in_msg.Address, cache_entry);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
- trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
+ if (getTokens(cache_entry) == 1) {
+ trigger(Event:Transient_GETS_Last_Token, in_msg.Address,
+ cache_entry);
}
else {
- trigger(Event:Transient_GETS, in_msg.Address);
+ trigger(Event:Transient_GETS, in_msg.Address, cache_entry);
}
} else {
error("Unexpected message");
@@ -389,15 +376,16 @@ machine(L2Cache, "Token protocol")
if (L1requestNetwork_in.isReady()) {
peek(L1requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.Address);
if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:L1_GETX, in_msg.Address);
+ trigger(Event:L1_GETX, in_msg.Address, cache_entry);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (getTokens(in_msg.Address) == 1 ||
- getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
- trigger(Event:L1_GETS_Last_Token, in_msg.Address);
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ trigger(Event:L1_GETS_Last_Token, in_msg.Address, cache_entry);
}
else {
- trigger(Event:L1_GETS, in_msg.Address);
+ trigger(Event:L1_GETS, in_msg.Address, cache_entry);
}
} else {
error("Unexpected message");
@@ -412,68 +400,80 @@ machine(L2Cache, "Token protocol")
if (responseNetwork_in.isReady()) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
- if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
+ Entry cache_entry := getCacheEntry(in_msg.Address);
+
+ if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
if (in_msg.Type == CoherenceResponseType:ACK) {
assert(in_msg.Tokens < (max_tokens() / 2));
- trigger(Event:Ack, in_msg.Address);
+ trigger(Event:Ack, in_msg.Address, cache_entry);
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
- trigger(Event:Data_Owner, in_msg.Address);
+ trigger(Event:Data_Owner, in_msg.Address, cache_entry);
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Data_Shared, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ trigger(Event:Data_Shared, in_msg.Address, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
+ in_msg.Type == CoherenceResponseType:WB_OWNED ||
+ in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
- if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
+ if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
// either room is available or the block is already present
if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
assert(in_msg.Dirty == false);
- trigger(Event:Writeback_Tokens, in_msg.Address);
+ trigger(Event:Writeback_Tokens, in_msg.Address, cache_entry);
} else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
assert(in_msg.Dirty == false);
- trigger(Event:Writeback_Shared_Data, in_msg.Address);
+ trigger(Event:Writeback_Shared_Data, in_msg.Address, cache_entry);
}
else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
//assert(in_msg.Dirty == false);
- trigger(Event:Writeback_Owned, in_msg.Address);
+ trigger(Event:Writeback_Owned, in_msg.Address, cache_entry);
}
}
else {
- trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L2_Replacement,
+ L2cacheMemory.cacheProbe(in_msg.Address),
+ getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
}
} else if (in_msg.Type == CoherenceResponseType:INV) {
- trigger(Event:L1_INV, in_msg.Address);
+ trigger(Event:L1_INV, in_msg.Address, cache_entry);
} else {
error("Unexpected message");
}
} else {
if (in_msg.Type == CoherenceResponseType:ACK) {
assert(in_msg.Tokens < (max_tokens() / 2));
- trigger(Event:Ack_All_Tokens, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Data_All_Tokens, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
- if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
+ trigger(Event:Ack_All_Tokens, in_msg.Address, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
+ in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_All_Tokens, in_msg.Address, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
+ in_msg.Type == CoherenceResponseType:WB_OWNED ||
+ in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
// either room is available or the block is already present
if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
assert(in_msg.Dirty == false);
- assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
- trigger(Event:Writeback_All_Tokens, in_msg.Address);
+ assert( (getState(cache_entry, in_msg.Address) != State:NP)
+ && (getState(cache_entry, in_msg.Address) != State:I) );
+ trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
} else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
assert(in_msg.Dirty == false);
- trigger(Event:Writeback_All_Tokens, in_msg.Address);
+ trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
}
else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
- trigger(Event:Writeback_All_Tokens, in_msg.Address);
+ trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
}
}
else {
- trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L2_Replacement,
+ L2cacheMemory.cacheProbe(in_msg.Address),
+ getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
}
} else if (in_msg.Type == CoherenceResponseType:INV) {
- trigger(Event:L1_INV, in_msg.Address);
+ trigger(Event:L1_INV, in_msg.Address, cache_entry);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Unexpected message");
@@ -536,29 +536,31 @@ machine(L2Cache, "Token protocol")
}
action(c_cleanReplacement, "c", desc="Issue clean writeback") {
- if (getL2CacheEntry(address).Tokens > 0) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.Tokens := cache_entry.Tokens;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
}
action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
+ assert(is_valid(cache_entry));
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
- if (getL2CacheEntry(address).Dirty) {
+ if (cache_entry.Dirty) {
out_msg.MessageSize := MessageSizeType:Writeback_Data;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
} else {
@@ -566,23 +568,24 @@ machine(L2Cache, "Token protocol")
out_msg.Type := CoherenceResponseType:ACK_OWNER;
}
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
- if (getL2CacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Tokens := N_tokens;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens;
+ cache_entry.Tokens := cache_entry.Tokens - N_tokens;
}
else {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
@@ -591,109 +594,115 @@ machine(L2Cache, "Token protocol")
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Tokens := 1;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
+ cache_entry.Tokens := cache_entry.Tokens - 1;
}
}
}
action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
+ assert(is_valid(cache_entry));
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
- if (getL2CacheEntry(address).Tokens > 0) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
+ assert(is_valid(cache_entry));
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getL2CacheEntry(address).Tokens > 0);
- if (getL2CacheEntry(address).Tokens > 1) {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
+ if (cache_entry.Tokens > 1) {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens - 1;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
- getL2CacheEntry(address).Tokens := 1;
+ cache_entry.Tokens := 1;
}
action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getL2CacheEntry(address).Tokens > (max_tokens() / 2) + 1);
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.Tokens := cache_entry.Tokens - 1;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getL2CacheEntry(address).Tokens := 1;
+ cache_entry.Tokens := 1;
}
action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getL2CacheEntry(address).Tokens == (max_tokens() / 2) + 1);
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
@@ -791,58 +800,60 @@ machine(L2Cache, "Token protocol")
}
}
-
action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
- assert(getL2CacheEntry(address).Tokens > 0);
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
//enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
out_msg.Tokens := 1;
}
- getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
+ cache_entry.Tokens := cache_entry.Tokens - 1;
}
}
action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
- assert(getL2CacheEntry(address).Tokens == (max_tokens() / 2) + 1);
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.Tokens := cache_entry.Tokens;
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
}
action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
-// assert(getL2CacheEntry(address).Tokens == max_tokens());
+ assert(is_valid(cache_entry));
+// assert(cache_entry.Tokens == max_tokens());
//enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
//out_msg.Tokens := max_tokens();
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.Tokens := cache_entry.Tokens;
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
}
@@ -865,13 +876,14 @@ machine(L2Cache, "Token protocol")
action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
assert(in_msg.Tokens != 0);
- getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;
+ cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
// this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
// may not trigger this action.
if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
- getL2CacheEntry(address).Dirty := true;
+ cache_entry.Dirty := true;
}
}
}
@@ -895,61 +907,65 @@ machine(L2Cache, "Token protocol")
action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
peek(L1requestNetwork_in, RequestMsg) {
if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
- (isCacheTagPresent(address))) {
+ (is_valid(cache_entry))) {
L2cacheMemory.setMRU(address);
}
}
}
action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
- if (getL2CacheEntry(address).Tokens > 0) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
- if (getL2CacheEntry(address).Tokens > 0) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
peek(L1requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- assert(getL2CacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
- getL2CacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(u_writeDataToCache, "u", desc="Write data to cache") {
peek(responseNetwork_in, ResponseMsg) {
- getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
- if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
- getL2CacheEntry(address).Dirty := in_msg.Dirty;
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ if ((cache_entry.Dirty == false) && in_msg.Dirty) {
+ cache_entry.Dirty := in_msg.Dirty;
}
}
}
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
- L2cacheMemory.allocate(address, new Entry);
+ set_cache_entry(L2cacheMemory.allocate(address, new Entry));
}
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
L2cacheMemory.deallocate(address);
+ unset_cache_entry();
}
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
@@ -965,7 +981,8 @@ machine(L2Cache, "Token protocol")
peek(responseNetwork_in, ResponseMsg) {
if (in_msg.Type != CoherenceResponseType:ACK &&
in_msg.Type != CoherenceResponseType:WB_TOKENS) {
- assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
+ assert(is_valid(cache_entry));
+ assert(cache_entry.DataBlk == in_msg.DataBlk);
}
}
}