summaryrefslogtreecommitdiff
path: root/src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm
diff options
context:
space:
mode:
authorNathan Binkert <nate@binkert.org>2009-05-11 10:38:43 -0700
committerNathan Binkert <nate@binkert.org>2009-05-11 10:38:43 -0700
commit2f30950143cc70bc42a3c8a4111d7cf8198ec881 (patch)
tree708f6c22edb3c6feb31dd82866c26623a5329580 /src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm
parentc70241810d4e4f523f173c1646b008dc40faad8e (diff)
downloadgem5-2f30950143cc70bc42a3c8a4111d7cf8198ec881.tar.xz
ruby: Import ruby and slicc from GEMS
We eventually plan to replace the m5 cache hierarchy with the GEMS hierarchy, but for now we will make both live alongside eachother.
Diffstat (limited to 'src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm')
-rw-r--r--src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm921
1 files changed, 921 insertions, 0 deletions
diff --git a/src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm b/src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm
new file mode 100644
index 000000000..b44e502c0
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_bcast_1level-cache.sm
@@ -0,0 +1,921 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+machine(L1Cache, "MOSI Broadcast Optimized") {
+
+ MessageBuffer addressFromCache, network="To", virtual_network="0", ordered="true";
+ MessageBuffer dataFromCache, network="To", virtual_network="1", ordered="false";
+
+ MessageBuffer addressToCache, network="From", virtual_network="0", ordered="true";
+ MessageBuffer dataToCache, network="From", virtual_network="1", ordered="false";
+
+ // STATES
+
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ NP, desc="Not Present";
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ M, desc="Modified", format="!b";
+ IS_AD, "IS^AD", desc="idle, issued GETS, have not seen GETS or data yet";
+ IM_AD, "IM^AD", desc="idle, issued GETX, have not seen GETX or data yet";
+ SM_AD, "SM^AD",desc="shared, issued GETX, have not seen GETX or data yet";
+ OM_A, "OM^A",desc="owned, issued GETX, have not seen GETX yet", format="!b";
+
+ IS_A, "IS^A",desc="idle, issued GETS, have not seen GETS, have seen data";
+ IM_A, "IM^A",desc="idle, issued GETX, have not seen GETX, have seen data";
+ SM_A, "SM^A",desc="shared, issued GETX, have not seen GETX, have seen data", format="!b";
+
+ MI_A, "MI^A", desc="modified, issued PUTX, have not seen PUTX yet";
+ OI_A, "OI^A", desc="owned, issued PUTX, have not seen PUTX yet";
+ II_A, "II^A", desc="modified, issued PUTX, have not seen PUTX, then saw other GETX", format="!b";
+
+ IS_D, "IS^D", desc="idle, issued GETS, have seen GETS, have not seen data yet";
+ IS_D_I, "IS^D^I", desc="idle, issued GETS, have seen GETS, have not seen data, then saw other GETX";
+ IM_D, "IM^D", desc="idle, issued GETX, have seen GETX, have not seen data yet";
+ IM_D_O, "IM^D^O", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETS";
+ IM_D_I, "IM^D^I", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETX";
+ IM_D_OI, "IM^D^OI", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETS, then saw other GETX";
+ SM_D, "SM^D", desc="shared, issued GETX, have seen GETX, have not seen data yet";
+ SM_D_O, "SM^D^O", desc="shared, issued GETX, have seen GETX, have not seen data yet, then saw other GETS";
+ }
+
+ // ** EVENTS **
+
+ enumeration(Event, desc="Cache events") {
+ // From processor
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ Replacement, desc="Replacement";
+ Load_prefetch, desc="Read only prefetch";
+ Store_prefetch, desc="Read write prefetch", format="!r";
+
+ // From Address network
+ Own_GETS, desc="Occurs when we observe our own GETS request in the global order";
+ Own_GET_INSTR, desc="Occurs when we observe our own GETInstr request in the global order";
+ Own_GETX, desc="Occurs when we observe our own GETX request in the global order";
+ Own_PUTX, desc="Occurs when we observe our own PUTX request in the global order", format="!r";
+ Other_GETS, desc="Occurs when we observe a GETS request from another processor";
+ Other_GET_INSTR, desc="Occurs when we observe a GETInstr request from another processor";
+ Other_GETX, desc="Occurs when we observe a GETX request from another processor";
+ Other_PUTX, desc="Occurs when we observe a PUTX request from another processor", format="!r";
+
+ // From Data network
+ Data, desc="Data for this block from the data network";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ NetDest ForwardIDs, desc="IDs of the processors to forward the block";
+ Address ForwardAddress, desc="Address of request for forwarding";
+ bool isPrefetch, desc="Set if this request is a prefetch";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
+ MessageBuffer optionalQueue, ordered="true", abstract_chip_ptr="true";
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+ StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
+
+
+ TBETable TBEs, template_hack="<L1Cache_TBE>";
+ CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_unified"', abstract_chip_ptr="true";
+
+ int cache_state_to_int(State state);
+
+ State getState(Address addr) {
+ if(TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ } else if (cacheMemory.isTagPresent(addr)) {
+ return cacheMemory[addr].CacheState;
+ }
+ return State:NP;
+ }
+
+ void setState(Address addr, State state) {
+ if (TBEs.isPresent(addr)) {
+ TBEs[addr].TBEState := state;
+ }
+ if (cacheMemory.isTagPresent(addr)) {
+ cacheMemory[addr].CacheState := state;
+
+ // Set permission
+ if ((state == State:I) || (state == State:MI_A) || (state == State:II_A)) {
+ cacheMemory.changePermission(addr, AccessPermission:Invalid);
+ } else if (state == State:S || state == State:O) {
+ cacheMemory.changePermission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ cacheMemory.changePermission(addr, AccessPermission:Read_Write);
+ } else {
+ cacheMemory.changePermission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(dataNetwork_out, DataMsg, dataFromCache);
+ out_port(addressNetwork_out, AddressMsg, addressFromCache);
+
+ // ** IN_PORTS **
+
+ // Data Network
+ in_port(dataNetwork_in, DataMsg, dataToCache) {
+ if (dataNetwork_in.isReady()) {
+ peek(dataNetwork_in, DataMsg) {
+ trigger(Event:Data, in_msg.Address);
+ }
+ }
+ }
+
+ // Address Network
+ in_port(addressNetwork_in, AddressMsg, addressToCache) {
+ if (addressNetwork_in.isReady()) {
+ peek(addressNetwork_in, AddressMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GETS, in_msg.Address);
+ } else {
+ trigger(Event:Other_GETS, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GETX, in_msg.Address);
+ } else {
+ trigger(Event:Other_GETX, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GET_INSTR, in_msg.Address);
+ } else {
+ trigger(Event:Other_GET_INSTR, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_PUTX, in_msg.Address);
+ } else {
+ trigger(Event:Other_PUTX, in_msg.Address);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+ if (cacheMemory.cacheAvail(in_msg.Address) == false) {
+ trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ if (in_msg.Type == CacheRequestType:LD) {
+ trigger(Event:Load, in_msg.Address);
+ } else if (in_msg.Type == CacheRequestType:IFETCH) {
+ trigger(Event:Ifetch, in_msg.Address);
+ } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
+ trigger(Event:Store, in_msg.Address);
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+ }
+ }
+ }
+
+ // Optional Queue
+ in_port(optionalQueue_in, CacheMsg, optionalQueue, desc="...") {
+ if (optionalQueue_in.isReady()) {
+ peek(optionalQueue_in, CacheMsg) {
+ if (cacheMemory.cacheAvail(in_msg.Address) == false) {
+ trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ if ((in_msg.Type == CacheRequestType:LD) || (in_msg.Type == CacheRequestType:IFETCH)) {
+ trigger(Event:Load_prefetch, in_msg.Address);
+ } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
+ trigger(Event:Store_prefetch, in_msg.Address);
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+ action(a_allocateTBE, "a", desc="Allocate TBE with Address=B, ForwardID=null, RetryCount=zero, ForwardIDRetryCount=zero, ForwardProgressBit=unset.") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ TBEs[address].isPrefetch := false;
+ TBEs[address].ForwardIDs.clear();
+
+ // Keep the TBE state consistent with the cache state
+ if (cacheMemory.isTagPresent(address)) {
+ TBEs[address].TBEState := cacheMemory[address].CacheState;
+ }
+ }
+
+
+ action(b_setPrefetchBit, "b", desc="Set prefetch bit in TBE.") {
+ TBEs[address].isPrefetch := true;
+ }
+
+ action(c_allocateCacheBlock, "c", desc="Set cache tag equal to tag of block B.") {
+ if (cacheMemory.isTagPresent(address) == false) {
+ cacheMemory.allocate(address);
+ }
+ }
+
+ action(d_deallocateTBE, "d", desc="Deallocate TBE.") {
+ TBEs.deallocate(address);
+ }
+
+ action(e_recordForwardingInfo, "e", desc="Record ID of other processor in ForwardID.") {
+ peek(addressNetwork_in, AddressMsg){
+ TBEs[address].ForwardIDs.add(in_msg.Requestor);
+ TBEs[address].ForwardAddress := in_msg.Address;
+ }
+ }
+
+ action(f_issueGETS, "f", desc="Issue GETS.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(g_issueGETX, "g", desc="Issue GETX.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ sequencer.readCallback(address, cacheMemory[address].DataBlk);
+ } else {
+ // Prefetch - don't call back
+ }
+ }
+
+ action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ sequencer.writeCallback(address, cacheMemory[address].DataBlk);
+ } else {
+ // Prefetch - don't call back
+ }
+ }
+
+ action(i_popAddressQueue, "i", desc="Pop incoming address queue.") {
+ addressNetwork_in.dequeue();
+ }
+
+ action(j_popDataQueue, "j", desc="Pop incoming data queue.") {
+ dataNetwork_in.dequeue();
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popOptionalQueue, "l", desc="Pop optional queue.") {
+ optionalQueue_in.dequeue();
+ }
+
+
+ action(o_cacheToForward, "o", desc="Send data from the cache to the processor indicated by ForwardIDs.") {
+ peek(dataNetwork_in, DataMsg){
+ // This has a CACHE_RESPONSE_LATENCY latency because we want to avoid the
+ // timing strangeness that can occur if requests that source the
+ // data from the TBE are faster than data sourced from the cache
+ enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY"){
+ out_msg.Address := TBEs[address].ForwardAddress;
+ out_msg.Sender := machineID;
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.Destination := TBEs[address].ForwardIDs;
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ action(p_issuePUTX, "p", desc="Issue PUTX.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
+ out_msg.Destination.add(machineID); // Back to us
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(q_writeDataFromCacheToTBE, "q", desc="Write data from the cache into the TBE.") {
+ TBEs[address].DataBlk := cacheMemory[address].DataBlk;
+ DEBUG_EXPR(TBEs[address].DataBlk);
+ }
+
+ action(r_cacheToRequestor, "r", desc="Send data from the cache to the requestor") {
+ peek(addressNetwork_in, AddressMsg) {
+ enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ }
+ }
+
+
+ action(s_saveDataInTBE, "s", desc="Save data in data field of TBE.") {
+ peek(dataNetwork_in, DataMsg) {
+ TBEs[address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(TBEs[address].DataBlk);
+ }
+ }
+
+ action(t_issueGET_INSTR, "t", desc="Issue GETInstr.") {
+ enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GET_INSTR;
+ out_msg.CacheState := cache_state_to_int(getState(address));
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(w_writeDataFromTBEToCache, "w", desc="Write data from the TBE into the cache.") {
+ cacheMemory[address].DataBlk := TBEs[address].DataBlk;
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ }
+
+ action(y_tbeToReq, "y", desc="Send data from the TBE to the requestor.") {
+ peek(addressNetwork_in, AddressMsg) {
+ enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY") { // Either this or the PutX should have a real latency
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ action(ff_deallocateCacheBlock, "\f", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ cacheMemory.deallocate(address);
+ }
+
+ action(z_stall, "z", desc="Cannot be handled right now.") {
+ // Special name recognized as do nothing case
+ }
+
+ // TRANSITIONS
+
+ // Transitions from Idle
+ transition({NP, I}, Load, IS_AD) {
+ f_issueGETS;
+ c_allocateCacheBlock;
+ a_allocateTBE;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Ifetch, IS_AD) {
+ t_issueGET_INSTR;
+ c_allocateCacheBlock;
+ a_allocateTBE;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Load_prefetch, IS_AD) {
+ f_issueGETS;
+ c_allocateCacheBlock;
+ a_allocateTBE;
+ b_setPrefetchBit;
+ l_popOptionalQueue;
+ }
+
+ transition({NP, I}, Store, IM_AD) {
+ g_issueGETX;
+ c_allocateCacheBlock;
+ a_allocateTBE;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Store_prefetch, IM_AD) {
+ g_issueGETX;
+ c_allocateCacheBlock;
+ a_allocateTBE;
+ b_setPrefetchBit;
+ l_popOptionalQueue;
+ }
+
+ transition(I, Replacement) {
+ ff_deallocateCacheBlock; // the cache line is now in NotPresent
+ }
+
+ transition({NP, I}, { Other_GETS, Other_GET_INSTR, Other_GETX } ) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from Shared
+ transition(S, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Load_prefetch) {
+ l_popOptionalQueue;
+ }
+
+ transition(S, Store, SM_AD) {
+ g_issueGETX;
+ a_allocateTBE;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store_prefetch, IM_AD) {
+ g_issueGETX;
+ a_allocateTBE;
+ b_setPrefetchBit; // Must be after allocate TBE
+ l_popOptionalQueue;
+ }
+
+ transition(S, Replacement, I) {
+ ff_deallocateCacheBlock; // the cache line is now in NotPresent
+ }
+
+ transition(S, {Other_GETS, Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+
+ transition(S, Other_GETX, I) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from Owned
+ transition(O, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Store, OM_A){
+ g_issueGETX;
+ a_allocateTBE;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Load_prefetch) {
+ l_popOptionalQueue;
+ }
+
+ transition(O, Store_prefetch, OM_A) {
+ g_issueGETX;
+ a_allocateTBE;
+ b_setPrefetchBit;
+ l_popOptionalQueue;
+ }
+
+ transition(O, Replacement, OI_A) {
+ p_issuePUTX;
+ a_allocateTBE;
+ q_writeDataFromCacheToTBE;// the cache line is now empty
+ ff_deallocateCacheBlock; // the cache line is now in NotPresent
+ }
+
+ transition(O, {Other_GETS,Other_GET_INSTR}) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(O, Other_GETX, I) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, {Load_prefetch,Store_prefetch}) {
+ l_popOptionalQueue;
+ }
+
+ transition(M, Replacement, MI_A) {
+ p_issuePUTX;
+ a_allocateTBE;
+ q_writeDataFromCacheToTBE;// the cache line is now empty
+ ff_deallocateCacheBlock; // the cache line is now in NotPresent
+ }
+
+ transition(M, {Other_GETS,Other_GET_INSTR}, O) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(M, Other_GETX, I) {
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+
+ // Transitions for Load/Store/Replacement from transient states
+
+ transition({IS_AD, IM_AD, IS_A, IM_A, SM_AD, OM_A, SM_A, IS_D, IS_D_I, IM_D, IM_D_O, IM_D_I, IM_D_OI, SM_D, SM_D_O}, {Load, Ifetch, Store, Replacement}) {
+ z_stall;
+ }
+
+ transition({IS_AD, IM_AD, IS_A, IM_A, SM_AD, OM_A, SM_A, IS_D, IM_D, IM_D_O, SM_D, SM_D_O}, Load_prefetch) {
+ l_popOptionalQueue;
+ }
+
+ transition({IS_D_I, IM_D_I, IM_D_OI}, Load_prefetch) {
+ z_stall;
+ }
+
+ transition({IM_AD, SM_AD, OM_A, IM_A, SM_A, IM_D, SM_D}, Store_prefetch) {
+ l_popOptionalQueue;
+ }
+
+ transition({IS_AD, IS_A, IS_D, IS_D_I, IM_D_O, IM_D_I, IM_D_OI, SM_D_O}, Store_prefetch) {
+ z_stall;
+ }
+
+ transition({MI_A, OI_A, II_A}, {Load, Ifetch, Store, Load_prefetch, Store_prefetch, Replacement}) {
+ z_stall;
+ }
+
+ // Always ignore PUTXs which we are not the owner of
+ transition({NP, I, S, O, M, IS_AD, IM_AD, SM_AD, OM_A, IS_A, IM_A, SM_A, MI_A, OI_A, II_A, IS_D, IS_D_I, IM_D, IM_D_O, IM_D_I, IM_D_OI, SM_D, SM_D_O }, Other_PUTX) {
+ i_popAddressQueue;
+ }
+
+ // transitions from IS_AD
+
+ transition(IS_AD, {Own_GETS,Own_GET_INSTR}, IS_D) {
+ i_popAddressQueue;
+ }
+ transition(IS_AD, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+ transition(IS_AD, Data, IS_A) {
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+
+ // Transitions from IM_AD
+
+ transition(IM_AD, Own_GETX, IM_D) {
+ i_popAddressQueue;
+ }
+ transition(IM_AD, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+ transition(IM_AD, Data, IM_A) {
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions from OM_A
+
+ transition(OM_A, Own_GETX, M){
+ hh_store_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(OM_A, {Other_GETS, Other_GET_INSTR}){
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(OM_A, Other_GETX, IM_AD){
+ r_cacheToRequestor;
+ i_popAddressQueue;
+ }
+
+ transition(OM_A, Data, IM_A) { // if we get data, we know we're going to lose block before we see own GETX
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions from SM_AD
+
+ transition(SM_AD, Own_GETX, SM_D) {
+ i_popAddressQueue;
+ }
+ transition(SM_AD, {Other_GETS,Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+ transition(SM_AD, Other_GETX, IM_AD) {
+ i_popAddressQueue;
+ }
+ transition(SM_AD, Data, SM_A) {
+ s_saveDataInTBE;
+ j_popDataQueue;
+ }
+
+
+ // Transitions from IS_A
+
+ transition(IS_A, {Own_GETS,Own_GET_INSTR}, S) {
+ w_writeDataFromTBEToCache;
+ h_load_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+ transition(IS_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from IM_A
+
+ transition(IM_A, Own_GETX, M) {
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+ transition(IM_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from SM_A
+
+ transition(SM_A, Own_GETX, M) {
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+ transition(SM_A, {Other_GETS,Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+ transition(SM_A, Other_GETX, IM_A) {
+ i_popAddressQueue;
+ }
+
+
+ // Transitions from MI_A
+
+ transition(MI_A, Own_PUTX, I) {
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(MI_A, {Other_GETS, Other_GET_INSTR}) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+ transition(MI_A, Other_GETX, II_A) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+ // Transitions from OI_A
+
+ transition(OI_A, Own_PUTX, I) {
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(OI_A, {Other_GETS, Other_GET_INSTR}) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+ transition(OI_A, Other_GETX, II_A) {
+ y_tbeToReq;
+ i_popAddressQueue;
+ }
+
+
+ // Transitions from II_A
+
+ transition(II_A, Own_PUTX, I) {
+ d_deallocateTBE;
+ i_popAddressQueue;
+ }
+
+ transition(II_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
+ i_popAddressQueue;
+ }
+
+ // Transitions from IS_D, IS_D_I
+
+ transition({IS_D, IS_D_I}, {Other_GETS,Other_GET_INSTR}) {
+ i_popAddressQueue;
+ }
+ transition(IS_D, Other_GETX, IS_D_I) {
+ i_popAddressQueue;
+ }
+ transition(IS_D_I, Other_GETX) {
+ i_popAddressQueue;
+ }
+ transition(IS_D, Data, S) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ h_load_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IS_D_I, Data, I) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ h_load_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions from IM_D, IM_D_O, IM_D_I, IM_D_OI
+
+ transition( IM_D, {Other_GETS,Other_GET_INSTR}, IM_D_O ) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition( IM_D, Other_GETX, IM_D_I ) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(IM_D_O, {Other_GETS,Other_GET_INSTR} ) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(IM_D_O, Other_GETX, IM_D_OI) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition( {IM_D_I, IM_D_OI}, {Other_GETS, Other_GET_INSTR, Other_GETX} ) {
+ i_popAddressQueue;
+ }
+
+ transition(IM_D, Data, M) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IM_D_O, Data, O) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IM_D_I, Data, I) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(IM_D_OI, Data, I) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ // Transitions for SM_D, SM_D_O
+
+ transition(SM_D, {Other_GETS,Other_GET_INSTR}, SM_D_O) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D, Other_GETX, IM_D_I) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D_O, {Other_GETS,Other_GET_INSTR}) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D_O, Other_GETX, IM_D_OI) {
+ e_recordForwardingInfo;
+ i_popAddressQueue;
+ }
+
+ transition(SM_D, Data, M) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+ transition(SM_D_O, Data, O) {
+ s_saveDataInTBE;
+ w_writeDataFromTBEToCache;
+ hh_store_hit;
+ o_cacheToForward;
+ d_deallocateTBE;
+ j_popDataQueue;
+ }
+
+}