summaryrefslogtreecommitdiff
path: root/src/mem/protocol/MOESI_CMP_token-L1cache.sm
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem/protocol/MOESI_CMP_token-L1cache.sm')
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm573
1 files changed, 327 insertions, 246 deletions
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
index a810a3e02..8cb45249e 100644
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -171,6 +171,11 @@ machine(L1Cache, "Token protocol")
int countStarvingForAddress(Address);
int countReadStarvingForAddress(Address);
}
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
@@ -210,46 +215,39 @@ machine(L1Cache, "Token protocol")
averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
}
-
- Entry getCacheEntry(Address addr), return_by_ref="yes" {
- if (L1DcacheMemory.isTagPresent(addr)) {
- assert(L1IcacheMemory.isTagPresent(addr) == false);
- return static_cast(Entry, L1DcacheMemory[addr]);
- } else {
- return static_cast(Entry, L1IcacheMemory[addr]);
+ Entry getCacheEntry(Address addr), return_by_pointer="yes" {
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
+ if(is_valid(L1Dcache_entry)) {
+ return L1Dcache_entry;
}
+
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
+ return L1Icache_entry;
}
- int getTokens(Address addr) {
- if (L1DcacheMemory.isTagPresent(addr)) {
- assert(L1IcacheMemory.isTagPresent(addr) == false);
- return static_cast(Entry, L1DcacheMemory[addr]).Tokens;
- } else if (L1IcacheMemory.isTagPresent(addr)) {
- return static_cast(Entry, L1IcacheMemory[addr]).Tokens;
- } else {
- return 0;
- }
+ Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
+ return L1Dcache_entry;
}
- void changePermission(Address addr, AccessPermission permission) {
- if (L1DcacheMemory.isTagPresent(addr)) {
- return L1DcacheMemory.changePermission(addr, permission);
- } else {
- return L1IcacheMemory.changePermission(addr, permission);
- }
+ Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
+ return L1Icache_entry;
}
- bool isCacheTagPresent(Address addr) {
- return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
+ int getTokens(Entry cache_entry) {
+ if (is_valid(cache_entry)) {
+ return cache_entry.Tokens;
+ }
+ return 0;
}
- State getState(Address addr) {
- assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ State getState(TBE tbe, Entry cache_entry, Address addr) {
- if (L1_TBEs.isPresent(addr)) {
- return L1_TBEs[addr].TBEState;
- } else if (isCacheTagPresent(addr)) {
- return getCacheEntry(addr).CacheState;
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
} else {
if ((persistentTable.isLocked(addr) == true) && (persistentTable.findSmallest(addr) != machineID)) {
// Not in cache, in persistent table, but this processor isn't highest priority
@@ -260,31 +258,29 @@ machine(L1Cache, "Token protocol")
}
}
- void setState(Address addr, State state) {
+ void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
- if (L1_TBEs.isPresent(addr)) {
+ if (is_valid(tbe)) {
assert(state != State:I);
assert(state != State:S);
assert(state != State:O);
assert(state != State:MM);
assert(state != State:M);
- L1_TBEs[addr].TBEState := state;
+ tbe.TBEState := state;
}
- if (isCacheTagPresent(addr)) {
+ if (is_valid(cache_entry)) {
// Make sure the token count is in range
- assert(getCacheEntry(addr).Tokens >= 0);
- assert(getCacheEntry(addr).Tokens <= max_tokens());
- assert(getCacheEntry(addr).Tokens != (max_tokens() / 2));
+ assert(cache_entry.Tokens >= 0);
+ assert(cache_entry.Tokens <= max_tokens());
+ assert(cache_entry.Tokens != (max_tokens() / 2));
if ((state == State:I_L) ||
(state == State:IM_L) ||
(state == State:IS_L)) {
// Make sure we have no tokens in the "Invalid, locked" states
- if (isCacheTagPresent(addr)) {
- assert(getCacheEntry(addr).Tokens == 0);
- }
+ assert(cache_entry.Tokens == 0);
// Make sure the line is locked
// assert(persistentTable.isLocked(addr));
@@ -294,8 +290,8 @@ machine(L1Cache, "Token protocol")
} else if ((state == State:S_L) ||
(state == State:SM_L)) {
- assert(getCacheEntry(addr).Tokens >= 1);
- assert(getCacheEntry(addr).Tokens < (max_tokens() / 2));
+ assert(cache_entry.Tokens >= 1);
+ assert(cache_entry.Tokens < (max_tokens() / 2));
// Make sure the line is locked...
// assert(persistentTable.isLocked(addr));
@@ -321,30 +317,30 @@ machine(L1Cache, "Token protocol")
// in M and E you have all the tokens
if (state == State:MM || state == State:M || state == State:MM_W || state == State:M_W) {
- assert(getCacheEntry(addr).Tokens == max_tokens());
+ assert(cache_entry.Tokens == max_tokens());
}
// in NP you have no tokens
if (state == State:NP) {
- assert(getCacheEntry(addr).Tokens == 0);
+ assert(cache_entry.Tokens == 0);
}
// You have at least one token in S-like states
if (state == State:S || state == State:SM) {
- assert(getCacheEntry(addr).Tokens > 0);
+ assert(cache_entry.Tokens > 0);
}
// You have at least half the token in O-like states
if (state == State:O && state == State:OM) {
- assert(getCacheEntry(addr).Tokens > (max_tokens() / 2));
+ assert(cache_entry.Tokens > (max_tokens() / 2));
}
- getCacheEntry(addr).CacheState := state;
+ cache_entry.CacheState := state;
// Set permission
if (state == State:MM ||
state == State:MM_W) {
- changePermission(addr, AccessPermission:Read_Write);
+ cache_entry.changePermission(AccessPermission:Read_Write);
} else if ((state == State:S) ||
(state == State:O) ||
(state == State:M) ||
@@ -353,9 +349,9 @@ machine(L1Cache, "Token protocol")
(state == State:S_L) ||
(state == State:SM_L) ||
(state == State:OM)) {
- changePermission(addr, AccessPermission:Read_Only);
+ cache_entry.changePermission(AccessPermission:Read_Only);
} else {
- changePermission(addr, AccessPermission:Invalid);
+ cache_entry.changePermission(AccessPermission:Invalid);
}
}
}
@@ -418,6 +414,16 @@ machine(L1Cache, "Token protocol")
persistentTable.markEntries(addr);
}
+ void setExternalResponse(TBE tbe) {
+ assert(is_valid(tbe));
+ tbe.ExternalResponse := true;
+ }
+
+ bool IsAtomic(TBE tbe) {
+ assert(is_valid(tbe));
+ return tbe.IsAtomic;
+ }
+
// ** OUT_PORTS **
out_port(persistentNetwork_out, PersistentMsg, persistentFromL1Cache);
out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
@@ -429,18 +435,24 @@ machine(L1Cache, "Token protocol")
// Use Timer
in_port(useTimerTable_in, Address, useTimerTable) {
if (useTimerTable_in.isReady()) {
- if (persistentTable.isLocked(useTimerTable.readyAddress()) && (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
+ TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
+
+ if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
+ (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
if (persistentTable.typeOfSmallest(useTimerTable.readyAddress()) == AccessType:Write) {
- trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress());
+ trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress(),
+ getCacheEntry(useTimerTable.readyAddress()), tbe);
} else {
- trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress());
+ trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress(),
+ getCacheEntry(useTimerTable.readyAddress()), tbe);
}
} else {
- assert(L1_TBEs.isPresent(useTimerTable.readyAddress()));
- if (no_mig_atomic && L1_TBEs[useTimerTable.readyAddress()].IsAtomic) {
- trigger(Event:Use_TimeoutNoStarvers_NoMig, useTimerTable.readyAddress());
+ if (no_mig_atomic && IsAtomic(tbe)) {
+ trigger(Event:Use_TimeoutNoStarvers_NoMig, useTimerTable.readyAddress(),
+ getCacheEntry(useTimerTable.readyAddress()), tbe);
} else {
- trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress());
+ trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress(),
+ getCacheEntry(useTimerTable.readyAddress()), tbe);
}
}
}
@@ -449,7 +461,9 @@ machine(L1Cache, "Token protocol")
// Reissue Timer
in_port(reissueTimerTable_in, Address, reissueTimerTable) {
if (reissueTimerTable_in.isReady()) {
- trigger(Event:Request_Timeout, reissueTimerTable.readyAddress());
+ trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
+ getCacheEntry(reissueTimerTable.readyAddress()),
+ L1_TBEs[reissueTimerTable.readyAddress()]);
}
}
@@ -473,25 +487,33 @@ machine(L1Cache, "Token protocol")
}
// React to the message based on the current state of the table
+ Entry cache_entry := getCacheEntry(in_msg.Address);
+ TBE tbe := L1_TBEs[in_msg.Address];
+
if (persistentTable.isLocked(in_msg.Address)) {
if (persistentTable.findSmallest(in_msg.Address) == machineID) {
// Our Own Lock - this processor is highest priority
- trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
+ trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
+ cache_entry, tbe);
} else {
if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
- if (getTokens(in_msg.Address) == 1 ||
- getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
- trigger(Event:Persistent_GETS_Last_Token, in_msg.Address);
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ trigger(Event:Persistent_GETS_Last_Token, in_msg.Address,
+ cache_entry, tbe);
} else {
- trigger(Event:Persistent_GETS, in_msg.Address);
+ trigger(Event:Persistent_GETS, in_msg.Address,
+ cache_entry, tbe);
}
} else {
- trigger(Event:Persistent_GETX, in_msg.Address);
+ trigger(Event:Persistent_GETX, in_msg.Address,
+ cache_entry, tbe);
}
}
} else {
// Unlock case - no entries in the table
- trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
+ trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
+ cache_entry, tbe);
}
}
}
@@ -503,29 +525,39 @@ machine(L1Cache, "Token protocol")
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg, block_on="Address") {
assert(in_msg.Destination.isElement(machineID));
+
+ Entry cache_entry := getCacheEntry(in_msg.Address);
+ TBE tbe := L1_TBEs[in_msg.Address];
+
if (in_msg.Type == CoherenceRequestType:GETX) {
if (in_msg.isLocal) {
- trigger(Event:Transient_Local_GETX, in_msg.Address);
+ trigger(Event:Transient_Local_GETX, in_msg.Address,
+ cache_entry, tbe);
}
else {
- trigger(Event:Transient_GETX, in_msg.Address);
+ trigger(Event:Transient_GETX, in_msg.Address,
+ cache_entry, tbe);
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (getTokens(in_msg.Address) == 1 ||
- getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
if (in_msg.isLocal) {
- trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Address);
+ trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Address,
+ cache_entry, tbe);
}
else {
- trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
+ trigger(Event:Transient_GETS_Last_Token, in_msg.Address,
+ cache_entry, tbe);
}
}
else {
if (in_msg.isLocal) {
- trigger(Event:Transient_Local_GETS, in_msg.Address);
+ trigger(Event:Transient_Local_GETS, in_msg.Address,
+ cache_entry, tbe);
}
else {
- trigger(Event:Transient_GETS, in_msg.Address);
+ trigger(Event:Transient_GETS, in_msg.Address,
+ cache_entry, tbe);
}
}
} else {
@@ -541,6 +573,9 @@ machine(L1Cache, "Token protocol")
peek(responseNetwork_in, ResponseMsg, block_on="Address") {
assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.Address);
+ TBE tbe := L1_TBEs[in_msg.Address];
+
// Mark TBE flag if response received off-chip. Use this to update average latency estimate
if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
@@ -550,7 +585,7 @@ machine(L1Cache, "Token protocol")
l2_select_num_bits)) {
// came from an off-chip L2 cache
- if (L1_TBEs.isPresent(in_msg.Address)) {
+ if (is_valid(tbe)) {
// L1_TBEs[in_msg.Address].ExternalResponse := true;
// profile_offchipL2_response(in_msg.Address);
}
@@ -559,14 +594,14 @@ machine(L1Cache, "Token protocol")
// profile_onchipL2_response(in_msg.Address );
}
} else if ( machineIDToMachineType(in_msg.Sender) == MachineType:Directory ) {
- if (L1_TBEs.isPresent(in_msg.Address)) {
- L1_TBEs[in_msg.Address].ExternalResponse := true;
+ if (is_valid(tbe)) {
+ setExternalResponse(tbe);
// profile_memory_response( in_msg.Address);
}
} else if ( machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
//if (isLocalProcessor(machineID, in_msg.Sender) == false) {
- //if (L1_TBEs.isPresent(in_msg.Address)) {
- // L1_TBEs[in_msg.Address].ExternalResponse := true;
+ //if (is_valid(tbe)) {
+ // tbe.ExternalResponse := true;
// profile_offchipL1_response(in_msg.Address );
//}
//}
@@ -578,24 +613,24 @@ machine(L1Cache, "Token protocol")
}
- if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
+ if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
if (in_msg.Type == CoherenceResponseType:ACK) {
assert(in_msg.Tokens < (max_tokens() / 2));
- trigger(Event:Ack, in_msg.Address);
+ trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
- trigger(Event:Data_Owner, in_msg.Address);
+ trigger(Event:Data_Owner, in_msg.Address, cache_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
assert(in_msg.Tokens < (max_tokens() / 2));
- trigger(Event:Data_Shared, in_msg.Address);
+ trigger(Event:Data_Shared, in_msg.Address, cache_entry, tbe);
} else {
error("Unexpected message");
}
} else {
if (in_msg.Type == CoherenceResponseType:ACK) {
assert(in_msg.Tokens < (max_tokens() / 2));
- trigger(Event:Ack_All_Tokens, in_msg.Address);
+ trigger(Event:Ack_All_Tokens, in_msg.Address, cache_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Data_All_Tokens, in_msg.Address);
+ trigger(Event:Data_All_Tokens, in_msg.Address, cache_entry, tbe);
} else {
error("Unexpected message");
}
@@ -610,46 +645,65 @@ machine(L1Cache, "Token protocol")
peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+ TBE tbe := L1_TBEs[in_msg.LineAddress];
+
if (in_msg.Type == CacheRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
// Check to see if it is in the OTHER L1
- if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, try to write it to the L2
- trigger(Event:L1_Replacement, in_msg.LineAddress);
+ trigger(Event:L1_Replacement, in_msg.LineAddress,
+ L1Dcache_entry, tbe);
}
- if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Icache_entry, tbe);
} else {
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Icache_entry, tbe);
} else {
// No room in the L1, so we need to make room
- trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
+ trigger(Event:L1_Replacement,
+ L1IcacheMemory.cacheProbe(in_msg.LineAddress),
+ getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
+ L1_TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
// *** DATA ACCESS ***
// Check to see if it is in the OTHER L1
- if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+
+ if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, try to write it to the L2
- trigger(Event:L1_Replacement, in_msg.LineAddress);
+ trigger(Event:L1_Replacement, in_msg.LineAddress,
+ L1Icache_entry, tbe);
}
- if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Dcache_entry, tbe);
} else {
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Dcache_entry, tbe);
} else {
// No room in the L1, so we need to make room
- trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
+ trigger(Event:L1_Replacement,
+ L1DcacheMemory.cacheProbe(in_msg.LineAddress),
+ getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
+ L1_TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
}
}
}
@@ -660,13 +714,14 @@ machine(L1Cache, "Token protocol")
// ACTIONS
action(a_issueReadRequest, "a", desc="Issue GETS") {
- if (L1_TBEs[address].IssueCount == 0) {
+ assert(is_valid(tbe));
+ if (tbe.IssueCount == 0) {
// Update outstanding requests
//profile_outstanding_request(outstandingRequests);
outstandingRequests := outstandingRequests + 1;
}
- if (L1_TBEs[address].IssueCount >= retry_threshold) {
+ if (tbe.IssueCount >= retry_threshold) {
// Issue a persistent request if possible
if (okToIssueStarving(address, machineID) && (starving == false)) {
enqueue(persistentNetwork_out, PersistentMsg, latency = l1_request_latency) {
@@ -689,14 +744,14 @@ machine(L1Cache, "Token protocol")
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Persistent_Control;
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
markPersistentEntries(address);
starving := true;
- if (L1_TBEs[address].IssueCount == 0) {
- //profile_persistent_prediction(address, L1_TBEs[address].AccessType);
+ if (tbe.IssueCount == 0) {
+ //profile_persistent_prediction(address, tbe.AccessType);
}
// Update outstanding requests
@@ -704,9 +759,9 @@ machine(L1Cache, "Token protocol")
outstandingPersistentRequests := outstandingPersistentRequests + 1;
// Increment IssueCount
- L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+ tbe.IssueCount := tbe.IssueCount + 1;
- L1_TBEs[address].WentPersistent := true;
+ tbe.WentPersistent := true;
// Do not schedule a wakeup, a persistent requests will always complete
}
@@ -731,14 +786,14 @@ machine(L1Cache, "Token protocol")
l2_select_low_bit,
l2_select_num_bits));
- out_msg.RetryNum := L1_TBEs[address].IssueCount;
- if (L1_TBEs[address].IssueCount == 0) {
+ out_msg.RetryNum := tbe.IssueCount;
+ if (tbe.IssueCount == 0) {
out_msg.MessageSize := MessageSizeType:Request_Control;
} else {
out_msg.MessageSize := MessageSizeType:Reissue_Control;
}
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
// send to other local L1s, with local bit set
@@ -753,19 +808,19 @@ machine(L1Cache, "Token protocol")
out_msg.Destination.broadcast(MachineType:L1Cache);
out_msg.Destination.remove(machineID);
- out_msg.RetryNum := L1_TBEs[address].IssueCount;
+ out_msg.RetryNum := tbe.IssueCount;
out_msg.isLocal := true;
- if (L1_TBEs[address].IssueCount == 0) {
+ if (tbe.IssueCount == 0) {
out_msg.MessageSize := MessageSizeType:Broadcast_Control;
} else {
out_msg.MessageSize := MessageSizeType:Broadcast_Control;
}
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
// Increment IssueCount
- L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+ tbe.IssueCount := tbe.IssueCount + 1;
// Set a wakeup timer
@@ -780,13 +835,14 @@ machine(L1Cache, "Token protocol")
action(b_issueWriteRequest, "b", desc="Issue GETX") {
- if (L1_TBEs[address].IssueCount == 0) {
+ assert(is_valid(tbe));
+ if (tbe.IssueCount == 0) {
// Update outstanding requests
//profile_outstanding_request(outstandingRequests);
outstandingRequests := outstandingRequests + 1;
}
- if (L1_TBEs[address].IssueCount >= retry_threshold) {
+ if (tbe.IssueCount >= retry_threshold) {
// Issue a persistent request if possible
if ( okToIssueStarving(address, machineID) && (starving == false)) {
enqueue(persistentNetwork_out, PersistentMsg, latency = l1_request_latency) {
@@ -809,8 +865,8 @@ machine(L1Cache, "Token protocol")
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Persistent_Control;
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
markPersistentEntries(address);
starving := true;
@@ -819,14 +875,14 @@ machine(L1Cache, "Token protocol")
//profile_outstanding_persistent_request(outstandingPersistentRequests);
outstandingPersistentRequests := outstandingPersistentRequests + 1;
- if (L1_TBEs[address].IssueCount == 0) {
- //profile_persistent_prediction(address, L1_TBEs[address].AccessType);
+ if (tbe.IssueCount == 0) {
+ //profile_persistent_prediction(address, tbe.AccessType);
}
// Increment IssueCount
- L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+ tbe.IssueCount := tbe.IssueCount + 1;
- L1_TBEs[address].WentPersistent := true;
+ tbe.WentPersistent := true;
// Do not schedule a wakeup, a persistent requests will always complete
}
@@ -853,15 +909,15 @@ machine(L1Cache, "Token protocol")
l2_select_low_bit,
l2_select_num_bits));
- out_msg.RetryNum := L1_TBEs[address].IssueCount;
+ out_msg.RetryNum := tbe.IssueCount;
- if (L1_TBEs[address].IssueCount == 0) {
+ if (tbe.IssueCount == 0) {
out_msg.MessageSize := MessageSizeType:Request_Control;
} else {
out_msg.MessageSize := MessageSizeType:Reissue_Control;
}
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
// send to other local L1s too
@@ -878,21 +934,21 @@ machine(L1Cache, "Token protocol")
out_msg.Destination.broadcast(MachineType:L1Cache);
out_msg.Destination.remove(machineID);
- out_msg.RetryNum := L1_TBEs[address].IssueCount;
- if (L1_TBEs[address].IssueCount == 0) {
+ out_msg.RetryNum := tbe.IssueCount;
+ if (tbe.IssueCount == 0) {
out_msg.MessageSize := MessageSizeType:Broadcast_Control;
} else {
out_msg.MessageSize := MessageSizeType:Broadcast_Control;
}
- out_msg.Prefetch := L1_TBEs[address].Prefetch;
- out_msg.AccessMode := L1_TBEs[address].AccessMode;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
}
// Increment IssueCount
- L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
+ tbe.IssueCount := tbe.IssueCount + 1;
DPRINTF(RubySlicc, "incremented issue count to %d\n",
- L1_TBEs[address].IssueCount);
+ tbe.IssueCount);
// Set a wakeup timer
if (dynamic_timeout_enabled) {
@@ -920,6 +976,7 @@ machine(L1Cache, "Token protocol")
}
action(c_ownedReplacement, "c", desc="Issue writeback") {
+ assert(is_valid(cache_entry));
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
@@ -929,21 +986,22 @@ machine(L1Cache, "Token protocol")
l2_select_low_bit,
l2_select_num_bits));
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.Type := CoherenceResponseType:WB_OWNED;
// always send the data?
out_msg.MessageSize := MessageSizeType:Writeback_Data;
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(cc_sharedReplacement, "\c", desc="Issue shared writeback") {
// don't send writeback if replacing block with no tokens
- assert (getCacheEntry(address).Tokens > 0);
+ assert(is_valid(cache_entry));
+ assert (cache_entry.Tokens > 0);
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
@@ -953,19 +1011,20 @@ machine(L1Cache, "Token protocol")
l2_select_low_bit,
l2_select_num_bits));
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- // assert(getCacheEntry(address).Dirty == false);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // assert(cache_entry.Dirty == false);
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Writeback_Data;
out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(tr_tokenReplacement, "tr", desc="Issue token writeback") {
- if (getCacheEntry(address).Tokens > 0) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
@@ -975,9 +1034,9 @@ machine(L1Cache, "Token protocol")
l2_select_low_bit,
l2_select_num_bits));
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- // assert(getCacheEntry(address).Dirty == false);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // assert(cache_entry.Dirty == false);
out_msg.Dirty := false;
// always send the data?
@@ -985,11 +1044,12 @@ machine(L1Cache, "Token protocol")
out_msg.Type := CoherenceResponseType:WB_TOKENS;
}
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(d_sendDataWithToken, "d", desc="Send data and a token from cache to requestor") {
+ assert(is_valid(cache_entry));
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
@@ -997,8 +1057,8 @@ machine(L1Cache, "Token protocol")
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Tokens := 1;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
out_msg.Dirty := false;
if (in_msg.isLocal) {
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
@@ -1007,21 +1067,22 @@ machine(L1Cache, "Token protocol")
}
}
}
- getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - 1;
- assert(getCacheEntry(address).Tokens >= 1);
+ cache_entry.Tokens := cache_entry.Tokens - 1;
+ assert(cache_entry.Tokens >= 1);
}
action(d_sendDataWithNTokenIfAvail, "\dd", desc="Send data and a token from cache to requestor") {
+ assert(is_valid(cache_entry));
peek(requestNetwork_in, RequestMsg) {
- if (getCacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Tokens := N_tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
out_msg.Dirty := false;
if (in_msg.isLocal) {
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
@@ -1029,17 +1090,17 @@ machine(L1Cache, "Token protocol")
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
- getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - N_tokens;
+ cache_entry.Tokens := cache_entry.Tokens - N_tokens;
}
- else if (getCacheEntry(address).Tokens > 1) {
+ else if (cache_entry.Tokens > 1) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Tokens := 1;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- // out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
out_msg.Dirty := false;
if (in_msg.isLocal) {
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
@@ -1047,23 +1108,24 @@ machine(L1Cache, "Token protocol")
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
- getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - 1;
+ cache_entry.Tokens := cache_entry.Tokens - 1;
}
}
-// assert(getCacheEntry(address).Tokens >= 1);
+// assert(cache_entry.Tokens >= 1);
}
action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(cache_entry));
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- assert(getCacheEntry(address).Tokens > (max_tokens() / 2));
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- out_msg.Dirty := getCacheEntry(address).Dirty;
+ assert(cache_entry.Tokens > (max_tokens() / 2));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
if (in_msg.isLocal) {
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
} else {
@@ -1071,116 +1133,121 @@ machine(L1Cache, "Token protocol")
}
}
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
// assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- if (getCacheEntry(address).Tokens > 0) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
- if (getCacheEntry(address).Tokens > (max_tokens() / 2)) {
+ if (cache_entry.Tokens > (max_tokens() / 2)) {
out_msg.Type := CoherenceResponseType:DATA_OWNER;
} else {
out_msg.Type := CoherenceResponseType:ACK;
}
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getCacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getCacheEntry(address).Tokens > 0);
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getCacheEntry(address).Tokens > (max_tokens() / 2));
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- out_msg.Dirty := getCacheEntry(address).Dirty;
+ assert(cache_entry.Tokens > (max_tokens() / 2));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(f_sendAckWithAllButNorOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getCacheEntry(address).Tokens > 0);
- if (getCacheEntry(address).Tokens > 1) {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
+ if (cache_entry.Tokens > 1) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
- if (getCacheEntry(address).Tokens > (max_tokens() / 2)) {
+ if (cache_entry.Tokens > (max_tokens() / 2)) {
out_msg.Type := CoherenceResponseType:DATA_OWNER;
} else {
out_msg.Type := CoherenceResponseType:ACK;
}
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getCacheEntry(address).Tokens >= 1);
- if (getCacheEntry(address).Tokens > N_tokens) {
- out_msg.Tokens := getCacheEntry(address).Tokens - N_tokens;
+ assert(cache_entry.Tokens >= 1);
+ if (cache_entry.Tokens > N_tokens) {
+ out_msg.Tokens := cache_entry.Tokens - N_tokens;
} else {
- out_msg.Tokens := getCacheEntry(address).Tokens - 1;
+ out_msg.Tokens := cache_entry.Tokens - 1;
}
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
- if (getCacheEntry(address).Tokens > N_tokens) {
- getCacheEntry(address).Tokens := N_tokens;
+ if (cache_entry.Tokens > N_tokens) {
+ cache_entry.Tokens := N_tokens;
} else {
- getCacheEntry(address).Tokens := 1;
+ cache_entry.Tokens := 1;
}
}
action(ff_sendDataWithAllButNorOneTokens, "\f", desc="Send data and out tokens but one to starver") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getCacheEntry(address).Tokens > ((max_tokens() / 2) + 1));
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > ((max_tokens() / 2) + 1));
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- if (getCacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
- out_msg.Tokens := getCacheEntry(address).Tokens - N_tokens;
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
+ out_msg.Tokens := cache_entry.Tokens - N_tokens;
} else {
- out_msg.Tokens := getCacheEntry(address).Tokens - 1;
+ out_msg.Tokens := cache_entry.Tokens - 1;
}
assert(out_msg.Tokens > (max_tokens() / 2));
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- if (getCacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
- getCacheEntry(address).Tokens := N_tokens;
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
+ cache_entry.Tokens := N_tokens;
} else {
- getCacheEntry(address).Tokens := 1;
+ cache_entry.Tokens := 1;
}
}
action(fo_sendDataWithOwnerToken, "fo", desc="Send data and owner tokens") {
- assert(getCacheEntry(address).Tokens == ((max_tokens() / 2) + 1));
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens == ((max_tokens() / 2) + 1));
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.Tokens := cache_entry.Tokens;
assert(out_msg.Tokens > (max_tokens() / 2));
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
- out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
@@ -1204,67 +1271,72 @@ machine(L1Cache, "Token protocol")
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
- address, getCacheEntry(address).DataBlk);
+ address, cache_entry.DataBlk);
sequencer.readCallback(address,
GenericMachineType:L1Cache,
- getCacheEntry(address).DataBlk);
+ cache_entry.DataBlk);
}
action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
- address, getCacheEntry(address).DataBlk);
+ address, cache_entry.DataBlk);
peek(responseNetwork_in, ResponseMsg) {
sequencer.readCallback(address,
getNondirectHitMachType(address, in_msg.Sender),
- getCacheEntry(address).DataBlk);
+ cache_entry.DataBlk);
}
}
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
+ assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
- address, getCacheEntry(address).DataBlk);
+ address, cache_entry.DataBlk);
sequencer.writeCallback(address,
GenericMachineType:L1Cache,
- getCacheEntry(address).DataBlk);
+ cache_entry.DataBlk);
- getCacheEntry(address).Dirty := true;
- DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
+ cache_entry.Dirty := true;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
}
action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
+ assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
- address, getCacheEntry(address).DataBlk);
+ address, cache_entry.DataBlk);
peek(responseNetwork_in, ResponseMsg) {
sequencer.writeCallback(address,
getNondirectHitMachType(address, in_msg.Sender),
- getCacheEntry(address).DataBlk);
+ cache_entry.DataBlk);
}
- getCacheEntry(address).Dirty := true;
- DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
+ cache_entry.Dirty := true;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
}
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(L1_TBEs);
L1_TBEs.allocate(address);
- L1_TBEs[address].IssueCount := 0;
+ set_tbe(L1_TBEs[address]);
+ tbe.IssueCount := 0;
peek(mandatoryQueue_in, CacheMsg) {
- L1_TBEs[address].PC := in_msg.ProgramCounter;
- L1_TBEs[address].AccessType := cache_request_type_to_access_type(in_msg.Type);
+ tbe.PC := in_msg.ProgramCounter;
+ tbe.AccessType := cache_request_type_to_access_type(in_msg.Type);
if (in_msg.Type == CacheRequestType:ATOMIC) {
- L1_TBEs[address].IsAtomic := true;
+ tbe.IsAtomic := true;
}
- L1_TBEs[address].Prefetch := in_msg.Prefetch;
- L1_TBEs[address].AccessMode := in_msg.AccessMode;
+ tbe.Prefetch := in_msg.Prefetch;
+ tbe.AccessMode := in_msg.AccessMode;
}
- L1_TBEs[address].IssueTime := get_time();
+ tbe.IssueTime := get_time();
}
@@ -1317,21 +1389,23 @@ machine(L1Cache, "Token protocol")
action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
assert(in_msg.Tokens != 0);
DPRINTF(RubySlicc, "L1 received tokens for address: %s, tokens: %d\n",
in_msg.Address, in_msg.Tokens);
- getCacheEntry(address).Tokens := getCacheEntry(address).Tokens + in_msg.Tokens;
- DPRINTF(RubySlicc, "%d\n", getCacheEntry(address).Tokens);
+ cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
+ DPRINTF(RubySlicc, "%d\n", cache_entry.Tokens);
- if (getCacheEntry(address).Dirty == false && in_msg.Dirty) {
- getCacheEntry(address).Dirty := true;
+ if (cache_entry.Dirty == false && in_msg.Dirty) {
+ cache_entry.Dirty := true;
}
}
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
- if (L1_TBEs[address].WentPersistent) {
+ assert(is_valid(tbe));
+ if (tbe.WentPersistent) {
// assert(starving == true);
outstandingRequests := outstandingRequests - 1;
enqueue(persistentNetwork_out, PersistentMsg, latency = l1_request_latency) {
@@ -1359,74 +1433,80 @@ machine(L1Cache, "Token protocol")
}
// Update average latency
- if (L1_TBEs[address].IssueCount <= 1) {
- if (L1_TBEs[address].ExternalResponse == true) {
- updateAverageLatencyEstimate(time_to_int(get_time()) - time_to_int(L1_TBEs[address].IssueTime));
+ if (tbe.IssueCount <= 1) {
+ if (tbe.ExternalResponse == true) {
+ updateAverageLatencyEstimate(time_to_int(get_time()) - time_to_int(tbe.IssueTime));
}
}
// Profile
- //if (L1_TBEs[address].WentPersistent) {
- // profile_token_retry(address, L1_TBEs[address].AccessType, 2);
+ //if (tbe.WentPersistent) {
+ // profile_token_retry(address, tbe.AccessType, 2);
//}
//else {
- // profile_token_retry(address, L1_TBEs[address].AccessType, 1);
+ // profile_token_retry(address, tbe.AccessType, 1);
//}
- //profile_token_retry(address, L1_TBEs[address].AccessType, L1_TBEs[address].IssueCount);
+ //profile_token_retry(address, tbe.AccessType, tbe.IssueCount);
L1_TBEs.deallocate(address);
+ unset_tbe();
}
action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
- if (getCacheEntry(address).Tokens > 0) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
- if (getCacheEntry(address).Tokens > (max_tokens() / 2)) {
+ if (cache_entry.Tokens > (max_tokens() / 2)) {
out_msg.Type := CoherenceResponseType:DATA_OWNER;
} else {
out_msg.Type := CoherenceResponseType:ACK;
}
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- assert(getCacheEntry(address).Tokens >= 1);
- out_msg.Tokens := getCacheEntry(address).Tokens;
- out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
- getCacheEntry(address).Tokens := 0;
+ cache_entry.Tokens := 0;
}
action(u_writeDataToCache, "u", desc="Write data to cache") {
peek(responseNetwork_in, ResponseMsg) {
- getCacheEntry(address).DataBlk := in_msg.DataBlk;
- if (getCacheEntry(address).Dirty == false && in_msg.Dirty) {
- getCacheEntry(address).Dirty := in_msg.Dirty;
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ if (cache_entry.Dirty == false && in_msg.Dirty) {
+ cache_entry.Dirty := in_msg.Dirty;
}
}
}
action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
- assert(getTokens(address) == 0);
+ assert(getTokens(cache_entry) == 0);
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.deallocate(address);
} else {
L1IcacheMemory.deallocate(address);
}
+ unset_cache_entry();
}
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
- if (L1DcacheMemory.isTagPresent(address) == false) {
- L1DcacheMemory.allocate(address, new Entry);
+ if (is_valid(cache_entry)) {
+ } else {
+ set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
}
}
action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
- if (L1IcacheMemory.isTagPresent(address) == false) {
- L1IcacheMemory.allocate(address, new Entry);
+ if (is_valid(cache_entry)) {
+ } else {
+ set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
}
}
@@ -1442,7 +1522,8 @@ machine(L1Cache, "Token protocol")
action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
peek(responseNetwork_in, ResponseMsg) {
- assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
+ assert(is_valid(cache_entry));
+ assert(cache_entry.DataBlk == in_msg.DataBlk);
}
}