summaryrefslogtreecommitdiff
path: root/src/mem/protocol
diff options
context:
space:
mode:
authorBrad Beckmann <Brad.Beckmann@amd.com>2010-08-20 11:46:13 -0700
committerBrad Beckmann <Brad.Beckmann@amd.com>2010-08-20 11:46:13 -0700
commit45f6f31d7ac130867977994aa03ea723dafc867a (patch)
treeb9580499b918ca5299852b14b9044331f6039918 /src/mem/protocol
parenta2dcbde1657660cef91e8a83ab00f3752a034c64 (diff)
downloadgem5-45f6f31d7ac130867977994aa03ea723dafc867a.tar.xz
ruby: fixed token bugs associated with owner token counts
This patch fixes several bugs related to previous inconsistent assumptions on how many tokens the Owner had. Mike Marty should have fixes these bugs years ago. :)
Diffstat (limited to 'src/mem/protocol')
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm152
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L2cache.sm73
2 files changed, 165 insertions, 60 deletions
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
index 8424479bd..d3e993efa 100644
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -112,6 +112,7 @@ machine(L1Cache, "Token protocol")
// Lock/Unlock for distributed
Persistent_GETX, desc="Another processor has priority to read/write";
Persistent_GETS, desc="Another processor has priority to read";
+ Persistent_GETS_Last_Token, desc="Another processor has priority to read, no more tokens";
Own_Lock_or_Unlock, desc="This processor now has priority";
// Triggers
@@ -208,6 +209,7 @@ machine(L1Cache, "Token protocol")
Entry getCacheEntry(Address addr), return_by_ref="yes" {
if (L1DcacheMemory.isTagPresent(addr)) {
+ assert(L1IcacheMemory.isTagPresent(addr) == false);
return static_cast(Entry, L1DcacheMemory[addr]);
} else {
return static_cast(Entry, L1IcacheMemory[addr]);
@@ -216,6 +218,7 @@ machine(L1Cache, "Token protocol")
int getTokens(Address addr) {
if (L1DcacheMemory.isTagPresent(addr)) {
+ assert(L1IcacheMemory.isTagPresent(addr) == false);
return static_cast(Entry, L1DcacheMemory[addr]).Tokens;
} else if (L1IcacheMemory.isTagPresent(addr)) {
return static_cast(Entry, L1IcacheMemory[addr]).Tokens;
@@ -269,6 +272,7 @@ machine(L1Cache, "Token protocol")
// Make sure the token count is in range
assert(getCacheEntry(addr).Tokens >= 0);
assert(getCacheEntry(addr).Tokens <= max_tokens());
+ assert(getCacheEntry(addr).Tokens != (max_tokens() / 2));
if ((state == State:I_L) ||
(state == State:IM_L) ||
@@ -287,6 +291,7 @@ machine(L1Cache, "Token protocol")
} else if ((state == State:S_L) ||
(state == State:SM_L)) {
assert(getCacheEntry(addr).Tokens >= 1);
+ assert(getCacheEntry(addr).Tokens < (max_tokens() / 2));
// Make sure the line is locked...
// assert(persistentTable.isLocked(addr));
@@ -327,8 +332,7 @@ machine(L1Cache, "Token protocol")
// You have at least half the token in O-like states
if (state == State:O && state == State:OM) {
- assert(getCacheEntry(addr).Tokens >= 1); // Must have at least one token
- assert(getCacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
+ assert(getCacheEntry(addr).Tokens > (max_tokens() / 2));
}
getCacheEntry(addr).CacheState := state;
@@ -462,7 +466,12 @@ machine(L1Cache, "Token protocol")
trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
} else {
if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
- trigger(Event:Persistent_GETS, in_msg.Address);
+ if (getTokens(in_msg.Address) == 1 ||
+ getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
+ trigger(Event:Persistent_GETS_Last_Token, in_msg.Address);
+ } else {
+ trigger(Event:Persistent_GETS, in_msg.Address);
+ }
} else {
trigger(Event:Persistent_GETX, in_msg.Address);
}
@@ -489,7 +498,8 @@ machine(L1Cache, "Token protocol")
trigger(Event:Transient_GETX, in_msg.Address);
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
- if ( (L1DcacheMemory.isTagPresent(in_msg.Address) || L1IcacheMemory.isTagPresent(in_msg.Address)) && getCacheEntry(in_msg.Address).Tokens == 1) {
+ if (getTokens(in_msg.Address) == 1 ||
+ getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
if (in_msg.isLocal) {
trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Address);
}
@@ -557,16 +567,19 @@ machine(L1Cache, "Token protocol")
if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
if (in_msg.Type == CoherenceResponseType:ACK) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
trigger(Event:Ack, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
trigger(Event:Data_Owner, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
trigger(Event:Data_Shared, in_msg.Address);
} else {
error("Unexpected message");
}
} else {
if (in_msg.Type == CoherenceResponseType:ACK) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
trigger(Event:Ack_All_Tokens, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
trigger(Event:Data_All_Tokens, in_msg.Address);
@@ -914,10 +927,32 @@ machine(L1Cache, "Token protocol")
getCacheEntry(address).Tokens := 0;
}
- action(cc_sharedReplacement, "\c", desc="Issue dirty writeback") {
+ action(cc_sharedReplacement, "\c", desc="Issue shared writeback") {
// don't send writeback if replacing block with no tokens
- if (getCacheEntry(address).Tokens != 0) {
+ assert (getCacheEntry(address).Tokens > 0);
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache,
+ l2_select_low_bit,
+ l2_select_num_bits));
+
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ // assert(getCacheEntry(address).Dirty == false);
+ out_msg.Dirty := false;
+
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
+ action(tr_tokenReplacement, "tr", desc="Issue token writeback") {
+ if (getCacheEntry(address).Tokens > 0) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
@@ -933,16 +968,11 @@ machine(L1Cache, "Token protocol")
out_msg.Dirty := false;
// always send the data?
- if (getCacheEntry(address).Tokens > 1) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- out_msg.Type := CoherenceResponseType:WB_TOKENS;
- }
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.Type := CoherenceResponseType:WB_TOKENS;
}
- getCacheEntry(address).Tokens := 0;
}
+ getCacheEntry(address).Tokens := 0;
}
@@ -970,7 +1000,7 @@ machine(L1Cache, "Token protocol")
action(d_sendDataWithNTokenIfAvail, "\dd", desc="Send data and a token from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
- if (getCacheEntry(address).Tokens > N_tokens) {
+ if (getCacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
@@ -1017,7 +1047,7 @@ machine(L1Cache, "Token protocol")
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- assert(getCacheEntry(address).Tokens >= 1);
+ assert(getCacheEntry(address).Tokens > (max_tokens() / 2));
out_msg.Tokens := getCacheEntry(address).Tokens;
out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.Dirty := getCacheEntry(address).Dirty;
@@ -1036,11 +1066,16 @@ machine(L1Cache, "Token protocol")
if (getCacheEntry(address).Tokens > 0) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:ACK;
+ if (getCacheEntry(address).Tokens > (max_tokens() / 2)) {
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
assert(getCacheEntry(address).Tokens >= 1);
out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
@@ -1055,7 +1090,7 @@ machine(L1Cache, "Token protocol")
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getCacheEntry(address).Tokens >= 1);
+ assert(getCacheEntry(address).Tokens > (max_tokens() / 2));
out_msg.Tokens := getCacheEntry(address).Tokens;
out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.Dirty := getCacheEntry(address).Dirty;
@@ -1070,7 +1105,11 @@ machine(L1Cache, "Token protocol")
if (getCacheEntry(address).Tokens > 1) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:ACK;
+ if (getCacheEntry(address).Tokens > (max_tokens() / 2)) {
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
assert(getCacheEntry(address).Tokens >= 1);
@@ -1079,6 +1118,7 @@ machine(L1Cache, "Token protocol")
} else {
out_msg.Tokens := getCacheEntry(address).Tokens - 1;
}
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
@@ -1091,31 +1131,45 @@ machine(L1Cache, "Token protocol")
action(ff_sendDataWithAllButNorOneTokens, "\f", desc="Send data and out tokens but one to starver") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getCacheEntry(address).Tokens > 0);
- if (getCacheEntry(address).Tokens > 1) {
- enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
+ assert(getCacheEntry(address).Tokens > ((max_tokens() / 2) + 1));
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getCacheEntry(address).Tokens >= 1);
- if (getCacheEntry(address).Tokens > N_tokens) {
+ if (getCacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
out_msg.Tokens := getCacheEntry(address).Tokens - N_tokens;
} else {
out_msg.Tokens := getCacheEntry(address).Tokens - 1;
}
+ assert(out_msg.Tokens > (max_tokens() / 2));
out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.Dirty := getCacheEntry(address).Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- if (getCacheEntry(address).Tokens > N_tokens) {
- getCacheEntry(address).Tokens := N_tokens;
- } else {
- getCacheEntry(address).Tokens := 1;
- }
+ }
+ if (getCacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
+ getCacheEntry(address).Tokens := N_tokens;
+ } else {
+ getCacheEntry(address).Tokens := 1;
}
}
+ action(fo_sendDataWithOwnerToken, "fo", desc="Send data and owner tokens") {
+ assert(getCacheEntry(address).Tokens == ((max_tokens() / 2) + 1));
+ enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := getCacheEntry(address).Tokens;
+ assert(out_msg.Tokens > (max_tokens() / 2));
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
+ out_msg.Dirty := getCacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ getCacheEntry(address).Tokens := 0;
+ }
+
action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
// assert(persistentTable.isLocked(address));
@@ -1313,11 +1367,16 @@ machine(L1Cache, "Token protocol")
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:ACK;
+ if (getCacheEntry(address).Tokens > (max_tokens() / 2)) {
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
assert(getCacheEntry(address).Tokens >= 1);
out_msg.Tokens := getCacheEntry(address).Tokens;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
@@ -1336,6 +1395,7 @@ machine(L1Cache, "Token protocol")
}
action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ assert(getTokens(address) == 0);
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.deallocate(address);
} else {
@@ -1432,7 +1492,7 @@ machine(L1Cache, "Token protocol")
m_popRequestQueue;
}
- transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
+ transition(NP, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
l_popPersistentQueue;
}
@@ -1459,7 +1519,7 @@ machine(L1Cache, "Token protocol")
}
transition(I, L1_Replacement) {
- cc_sharedReplacement;
+ tr_tokenReplacement;
gg_deallocateL1CacheBlock;
}
@@ -1472,12 +1532,12 @@ machine(L1Cache, "Token protocol")
m_popRequestQueue;
}
- transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
+ transition(I, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
e_sendAckWithCollectedTokens;
l_popPersistentQueue;
}
- transition(I_L, {Persistent_GETX, Persistent_GETS}) {
+ transition(I_L, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}) {
l_popPersistentQueue;
}
@@ -1548,12 +1608,12 @@ machine(L1Cache, "Token protocol")
l_popPersistentQueue;
}
- transition(S, Persistent_GETS, S_L) {
+ transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
f_sendAckWithAllButNorOneTokens;
l_popPersistentQueue;
}
- transition(S_L, Persistent_GETS) {
+ transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
l_popPersistentQueue;
}
@@ -1615,6 +1675,11 @@ machine(L1Cache, "Token protocol")
l_popPersistentQueue;
}
+ transition(O, Persistent_GETS_Last_Token, I_L) {
+ fo_sendDataWithOwnerToken;
+ l_popPersistentQueue;
+ }
+
transition(O, Transient_GETS) {
d_sendDataWithToken;
m_popRequestQueue;
@@ -1798,7 +1863,7 @@ machine(L1Cache, "Token protocol")
m_popRequestQueue;
}
- transition(IS, {Persistent_GETX, Persistent_GETS}, IS_L) {
+ transition(IS, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IS_L) {
e_sendAckWithCollectedTokens;
l_popPersistentQueue;
}
@@ -1807,7 +1872,7 @@ machine(L1Cache, "Token protocol")
l_popPersistentQueue;
}
- transition(IM, {Persistent_GETX, Persistent_GETS}, IM_L) {
+ transition(IM, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IM_L) {
e_sendAckWithCollectedTokens;
l_popPersistentQueue;
}
@@ -1821,12 +1886,12 @@ machine(L1Cache, "Token protocol")
l_popPersistentQueue;
}
- transition(SM, Persistent_GETS, SM_L) {
+ transition(SM, {Persistent_GETS, Persistent_GETS_Last_Token}, SM_L) {
f_sendAckWithAllButNorOneTokens;
l_popPersistentQueue;
}
- transition(SM_L, Persistent_GETS) {
+ transition(SM_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
l_popPersistentQueue;
}
@@ -1840,6 +1905,11 @@ machine(L1Cache, "Token protocol")
l_popPersistentQueue;
}
+ transition(OM, Persistent_GETS_Last_Token, IM_L) {
+ fo_sendDataWithOwnerToken;
+ l_popPersistentQueue;
+ }
+
// Transitions from IM/SM
transition({IM, SM}, Ack) {
diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
index a90b24800..ae239e3ef 100644
--- a/src/mem/protocol/MOESI_CMP_token-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
@@ -108,6 +108,7 @@ machine(L2Cache, "Token protocol")
// Lock/Unlock
Persistent_GETX, desc="Another processor has priority to read/write";
Persistent_GETS, desc="Another processor has priority to read";
+ Persistent_GETS_Last_Token, desc="Another processor has priority to read";
Own_Lock_or_Unlock, desc="This processor now has priority";
}
@@ -194,6 +195,7 @@ machine(L2Cache, "Token protocol")
// Make sure the token count is in range
assert(getL2CacheEntry(addr).Tokens >= 0);
assert(getL2CacheEntry(addr).Tokens <= max_tokens());
+ assert(getL2CacheEntry(addr).Tokens != (max_tokens() / 2));
// Make sure we have no tokens in L
if ((state == State:I_L) ) {
@@ -219,8 +221,7 @@ machine(L2Cache, "Token protocol")
// You have at least half the token in O-like states
if (state == State:O ) {
- assert(getL2CacheEntry(addr).Tokens >= 1); // Must have at least one token
- // assert(getL2CacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
+ assert(getL2CacheEntry(addr).Tokens > (max_tokens() / 2));
}
getL2CacheEntry(addr).CacheState := state;
@@ -344,7 +345,12 @@ machine(L2Cache, "Token protocol")
if (persistentTable.isLocked(in_msg.Address)) {
if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
- trigger(Event:Persistent_GETS, in_msg.Address);
+ if (getTokens(in_msg.Address) == 1 ||
+ getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
+ trigger(Event:Persistent_GETS_Last_Token, in_msg.Address);
+ } else {
+ trigger(Event:Persistent_GETS, in_msg.Address);
+ }
} else {
trigger(Event:Persistent_GETX, in_msg.Address);
}
@@ -386,7 +392,8 @@ machine(L2Cache, "Token protocol")
if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:L1_GETX, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
+ if (getTokens(in_msg.Address) == 1 ||
+ getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
trigger(Event:L1_GETS_Last_Token, in_msg.Address);
}
else {
@@ -407,6 +414,7 @@ machine(L2Cache, "Token protocol")
assert(in_msg.Destination.isElement(machineID));
if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
if (in_msg.Type == CoherenceResponseType:ACK) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
trigger(Event:Ack, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
trigger(Event:Data_Owner, in_msg.Address);
@@ -440,6 +448,7 @@ machine(L2Cache, "Token protocol")
}
} else {
if (in_msg.Type == CoherenceResponseType:ACK) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
trigger(Event:Ack_All_Tokens, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
trigger(Event:Data_All_Tokens, in_msg.Address);
@@ -562,7 +571,7 @@ machine(L2Cache, "Token protocol")
action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
- if (getL2CacheEntry(address).Tokens > N_tokens) {
+ if (getL2CacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_SHARED;
@@ -657,21 +666,34 @@ machine(L2Cache, "Token protocol")
action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(getL2CacheEntry(address).Tokens > 0);
- if (getL2CacheEntry(address).Tokens > 1) {
- enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
+ assert(getL2CacheEntry(address).Tokens > (max_tokens() / 2) + 1);
+ enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getL2CacheEntry(address).Tokens >= 1);
out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
out_msg.Dirty := getL2CacheEntry(address).Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- getL2CacheEntry(address).Tokens := 1;
}
+ getL2CacheEntry(address).Tokens := 1;
+ }
+
+ action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(getL2CacheEntry(address).Tokens == (max_tokens() / 2) + 1);
+ enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := getL2CacheEntry(address).Tokens;
+ out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
+ out_msg.Dirty := getL2CacheEntry(address).Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ getL2CacheEntry(address).Tokens := 0;
}
@@ -702,6 +724,7 @@ machine(L2Cache, "Token protocol")
if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
out_msg.Type := CoherenceResponseType:DATA_SHARED;
} else {
+ assert(in_msg.Tokens < (max_tokens() / 2));
out_msg.Type := CoherenceResponseType:ACK;
}
out_msg.Sender := machineID;
@@ -789,7 +812,7 @@ machine(L2Cache, "Token protocol")
action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
- assert(getL2CacheEntry(address).Tokens > 0);
+ assert(getL2CacheEntry(address).Tokens == (max_tokens() / 2) + 1);
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
@@ -798,9 +821,9 @@ machine(L2Cache, "Token protocol")
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
out_msg.Dirty := getL2CacheEntry(address).Dirty;
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
- out_msg.Tokens := 1;
+ out_msg.Tokens := getL2CacheEntry(address).Tokens;
}
- getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
+ getL2CacheEntry(address).Tokens := 0;
}
}
@@ -940,7 +963,10 @@ machine(L2Cache, "Token protocol")
action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
peek(responseNetwork_in, ResponseMsg) {
- assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
+ if (in_msg.Type != CoherenceResponseType:ACK &&
+ in_msg.Type != CoherenceResponseType:WB_TOKENS) {
+ assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
+ }
}
}
@@ -1014,7 +1040,9 @@ machine(L2Cache, "Token protocol")
}
- transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
+ transition(NP,
+ {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
+ I_L) {
l_popPersistentQueue;
}
@@ -1048,7 +1076,9 @@ machine(L2Cache, "Token protocol")
m_popRequestQueue;
}
- transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
+ transition(I,
+ {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
+ I_L) {
e_sendAckWithCollectedTokens;
l_popPersistentQueue;
}
@@ -1131,7 +1161,7 @@ machine(L2Cache, "Token protocol")
}
- transition(S, Persistent_GETS, S_L) {
+ transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
f_sendAckWithAllButOneTokens;
l_popPersistentQueue;
}
@@ -1237,6 +1267,11 @@ machine(L2Cache, "Token protocol")
l_popPersistentQueue;
}
+ transition(O, Persistent_GETS_Last_Token, I_L) {
+ fa_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
transition(O, Transient_GETS) {
// send multiple tokens
r_clearExclusive;
@@ -1426,7 +1461,7 @@ machine(L2Cache, "Token protocol")
l_popPersistentQueue;
}
- transition(S_L, Persistent_GETS) {
+ transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
l_popPersistentQueue;
}