summaryrefslogtreecommitdiff
path: root/src/mem/protocol/MOSI_SMP_directory_1level-cache.sm
diff options
context:
space:
mode:
authorNathan Binkert <nate@binkert.org>2009-05-11 10:38:43 -0700
committerNathan Binkert <nate@binkert.org>2009-05-11 10:38:43 -0700
commit2f30950143cc70bc42a3c8a4111d7cf8198ec881 (patch)
tree708f6c22edb3c6feb31dd82866c26623a5329580 /src/mem/protocol/MOSI_SMP_directory_1level-cache.sm
parentc70241810d4e4f523f173c1646b008dc40faad8e (diff)
downloadgem5-2f30950143cc70bc42a3c8a4111d7cf8198ec881.tar.xz
ruby: Import ruby and slicc from GEMS
We eventually plan to replace the m5 cache hierarchy with the GEMS hierarchy, but for now we will make both live alongside eachother.
Diffstat (limited to 'src/mem/protocol/MOSI_SMP_directory_1level-cache.sm')
-rw-r--r--src/mem/protocol/MOSI_SMP_directory_1level-cache.sm838
1 files changed, 838 insertions, 0 deletions
diff --git a/src/mem/protocol/MOSI_SMP_directory_1level-cache.sm b/src/mem/protocol/MOSI_SMP_directory_1level-cache.sm
new file mode 100644
index 000000000..f780bb473
--- /dev/null
+++ b/src/mem/protocol/MOSI_SMP_directory_1level-cache.sm
@@ -0,0 +1,838 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOSI_directory_1level-cache.sm 1.18 04/09/07 13:52:52-05:00 mikem@maya.cs.wisc.edu $
+ *
+ */
+
+machine(L1Cache, "MOSI Directory Optimized") {
+
+ MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false";
+ MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false";
+
+ MessageBuffer forwardedRequestToCache, network="From", virtual_network="1", ordered="true";
+ MessageBuffer responseToCache, network="From", virtual_network="2", ordered="false";
+
+ // STATES
+ enumeration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ M, desc="Modified", format="!b";
+
+ // Transient States
+ MI, desc="modified, issued PUTX, have not seen response yet";
+ OI, desc="owned, issued PUTX, have not seen response yet";
+
+ IS, desc="idle, issued GETS, have not seen response yet";
+ ISI, desc="idle, issued GETS, saw INV, have not seen data for GETS yet", format="!b";
+
+ IM, desc="idle, issued GETX, have not seen response yet";
+ IMI, desc="idle, issued GETX, saw forwarded GETX";
+ IMO, desc="idle, issued GETX, saw forwarded GETS";
+ IMOI, desc="idle, issued GETX, saw forwarded GETS, saw forwarded GETX";
+
+ // Note: OM is a strange state, because it is waiting for the line
+ // to be stolen away, or look like it has been stolen away. The
+ // common case is that we see a forward from the directory that is
+ // really from us, we forwarded the data to our dataqueue, and
+ // everythings works fine.
+
+ OM, desc="owned, issued GETX, have not seen response yet";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ Load, desc="Load request from the processor";
+ Load_prefetch, desc="Load prefetch request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store_prefetch, desc="Store prefetch request from the processor";
+ Store, desc="Store request from the processor";
+ Replacement, desc="Replacement", format="!r";
+
+ Forwarded_GETS, "Forwarded GETS", desc="Directory forwards GETS to us";
+ Forwarded_GETX, "Forwarded GETX", desc="Directory forwards GETX to us";
+ INV, "INV", desc="Invalidation", format="!r";
+
+ Proc_ack, "Proc ack", desc="Ack from proc";
+ Proc_last_ack, "Proc last ack", desc="Last ack", format="!r";
+
+ Data_ack_0, "Data ack 0", desc="Data with ack count = 0";
+ Data_ack_not_0, "Data ack not 0", desc="Data with ack count != 0 (but haven't seen all acks first";
+ Data_ack_not_0_last, "Data ack not 0 last", desc="Data with ack count != 0 after having received all acks";
+
+ Dir_WB_ack, "WB ack", desc="Writeback ack from dir";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Address Address, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ int NumPendingAcks, desc="Number of acks that this processor is waiting for";
+ NetDest ForwardGetS_IDs, desc="Set of the processors to forward the block";
+ MachineID ForwardGetX_ID, desc="ID of the processor to forward the block";
+ int ForwardGetX_AckCount, desc="Number of acks the GetX we are forwarded needs";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+ }
+
+ external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ Entry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
+ MessageBuffer optionalQueue, ordered="false", abstract_chip_ptr="true";
+ Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+ StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
+
+ TBETable TBEs, template_hack="<L1Cache_TBE>";
+ CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_unified L1"', abstract_chip_ptr="true";
+
+ State getState(Address addr) {
+ if(TBEs.isPresent(addr)) {
+ return TBEs[addr].TBEState;
+ } else if (cacheMemory.isTagPresent(addr)) {
+ return cacheMemory[addr].CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(Address addr, State state) {
+ if (TBEs.isPresent(addr)) {
+ TBEs[addr].TBEState := state;
+ }
+ if (cacheMemory.isTagPresent(addr)) {
+ cacheMemory[addr].CacheState := state;
+
+ // Set permission
+ if ((state == State:I) || (state == State:MI) || (state == State:OI)) {
+ cacheMemory.changePermission(addr, AccessPermission:Invalid);
+ } else if (state == State:S || state == State:O) {
+ cacheMemory.changePermission(addr, AccessPermission:Read_Only);
+ } else if (state == State:M) {
+ cacheMemory.changePermission(addr, AccessPermission:Read_Write);
+ } else {
+ cacheMemory.changePermission(addr, AccessPermission:Busy);
+ }
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(requestNetwork_out, RequestMsg, requestFromCache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromCache);
+
+ // ** IN_PORTS **
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToCache) {
+ if (responseNetwork_in.isReady()) {
+ peek(responseNetwork_in, ResponseMsg) {
+ if(in_msg.Type == CoherenceResponseType:DATA) {
+ if(in_msg.NumPendingAcks == 0) {
+ trigger(Event:Data_ack_0, in_msg.Address);
+ } else {
+ if(in_msg.NumPendingAcks + TBEs[in_msg.Address].NumPendingAcks != 0) {
+ trigger(Event:Data_ack_not_0, in_msg.Address);
+ } else {
+ trigger(Event:Data_ack_not_0_last, in_msg.Address);
+ }
+ }
+ } else if(in_msg.Type == CoherenceResponseType:ACK) {
+ if(TBEs[in_msg.Address].NumPendingAcks != 1){
+ trigger(Event:Proc_ack, in_msg.Address);
+ } else {
+ trigger(Event:Proc_last_ack, in_msg.Address);
+ }
+ }
+ }
+ }
+ }
+
+ // Forwarded Request network
+ in_port(forwardedRequestNetwork_in, RequestMsg, forwardedRequestToCache) {
+ if(forwardedRequestNetwork_in.isReady()) {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ if(in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Forwarded_GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Forwarded_GETX, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:INV, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
+ trigger(Event:Dir_WB_ack, in_msg.Address);
+ } else {
+ error("Invalid forwarded request type");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady()) {
+ peek(mandatoryQueue_in, CacheMsg) {
+ if (cacheMemory.cacheAvail(in_msg.Address) == false) {
+ trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ if (in_msg.Type == CacheRequestType:LD) {
+ trigger(Event:Load, in_msg.Address);
+ } else if (in_msg.Type == CacheRequestType:IFETCH) {
+ trigger(Event:Ifetch, in_msg.Address);
+ } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
+ trigger(Event:Store, in_msg.Address);
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+ }
+ }
+ }
+
+ // Optional Queue
+ in_port(optionalQueue_in, CacheMsg, optionalQueue, desc="...") {
+ if (optionalQueue_in.isReady()) {
+ peek(optionalQueue_in, CacheMsg) {
+ if (cacheMemory.cacheAvail(in_msg.Address) == false) {
+ trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
+ } else {
+ if (in_msg.Type == CacheRequestType:LD) {
+ trigger(Event:Load_prefetch, in_msg.Address);
+ } else if (in_msg.Type == CacheRequestType:IFETCH) {
+ trigger(Event:Load_prefetch, in_msg.Address);
+ } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
+ trigger(Event:Store_prefetch, in_msg.Address);
+ } else {
+ error("Invalid CacheRequestType");
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(d_issuePUTX, "d", desc="Issue PUTX") {
+ enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(e_dataFromCacheToRequestor, "e", desc="Send data from cache to requestor") {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.NumPendingAcks := in_msg.NumPendingAcks; // Needed when in state O and we see a GetX
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ action(g_allocateCacheBlock, "g", desc="Allocate cache block") {
+ if (cacheMemory.isTagPresent(address) == false) {
+ cacheMemory.allocate(address);
+ }
+ }
+
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ sequencer.readCallback(address, cacheMemory[address].DataBlk);
+ } else {
+ // Prefetch - don't call back
+ }
+ }
+
+ action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
+ DEBUG_EXPR(cacheMemory[address].DataBlk);
+ if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
+ // Non-prefetch
+ sequencer.writeCallback(address, cacheMemory[address].DataBlk);
+ } else {
+ // Prefetch - don't call back
+ }
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ TBEs[address].NumPendingAcks := 0; // default value
+ TBEs[address].isPrefetch := false;
+ TBEs[address].ForwardGetS_IDs.clear();
+ }
+
+ action(j_setPrefetchBit, "j", desc="Set prefetch bit") {
+ TBEs[address].isPrefetch := true;
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue();
+ }
+
+ action(l_popForwardedRequestQueue, "l", desc="Pop incoming forwarded request queue") {
+ forwardedRequestNetwork_in.dequeue();
+ }
+
+ action(m_popOptionalQueue, "m", desc="Pop optional queue") {
+ optionalQueue_in.dequeue();
+ }
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
+ responseNetwork_in.dequeue();
+ }
+
+ action(p_addNumberOfPendingAcks, "p", desc="Add number of pending acks to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ DEBUG_EXPR(TBEs[address].NumPendingAcks);
+ TBEs[address].NumPendingAcks := TBEs[address].NumPendingAcks + in_msg.NumPendingAcks;
+ DEBUG_EXPR(in_msg.NumPendingAcks);
+ DEBUG_EXPR(TBEs[address].NumPendingAcks);
+ }
+ }
+
+ action(q_decrementNumberOfPendingAcks, "q", desc="Decrement number of pending invalidations by one") {
+ DEBUG_EXPR(TBEs[address].NumPendingAcks);
+ TBEs[address].NumPendingAcks := TBEs[address].NumPendingAcks - 1;
+ DEBUG_EXPR(TBEs[address].NumPendingAcks);
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ }
+
+ action(t_sendAckToInvalidator, "t", desc="Send ack to invalidator") {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.NumPendingAcks := 0;
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ cacheMemory[address].DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
+ TBEs[address].DataBlk := cacheMemory[address].DataBlk;
+ }
+
+ action(y_dataFromTBEToRequestor, "y", desc="Send data from TBE to requestor") {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.NumPendingAcks := in_msg.NumPendingAcks; // Needed when in state MS and we see a GetX
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DestMachine := MachineType:L1Cache;
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := TBEs[address].DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ action(z_stall, "z", desc="Stall") {
+ }
+
+ action(dd_recordGetSForwardID, "\d", desc="Record forwarded GetS for future forwarding") {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ TBEs[address].ForwardGetS_IDs.add(in_msg.Requestor);
+ }
+ }
+
+ action(ee_dataFromCacheToGetSForwardIDs, "\e", desc="Send data from cache to GetS ForwardIDs") {
+ // FIXME - In some cases this should be from the TBE, not the cache.
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination := TBEs[address].ForwardGetS_IDs;
+ out_msg.DestMachine := MachineType:L1Cache;
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.NumPendingAcks := 0;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(ff_deallocateCacheBlock, "\f", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ cacheMemory.deallocate(address);
+ }
+
+ action(gg_dataFromCacheToGetXForwardID, "\g", desc="Send data from cache to GetX ForwardID") {
+ // FIXME - In some cases this should be from the TBE, not the cache.
+ enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(TBEs[address].ForwardGetX_ID);
+ out_msg.DestMachine := MachineType:L1Cache;
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.NumPendingAcks := TBEs[address].ForwardGetX_AckCount;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(ii_recordGetXForwardID, "\i", desc="Record forwarded GetX and ack count for future forwarding") {
+ peek(forwardedRequestNetwork_in, RequestMsg) {
+ TBEs[address].ForwardGetX_ID := in_msg.Requestor;
+ TBEs[address].ForwardGetX_AckCount := in_msg.NumPendingAcks;
+ }
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/Prefetch/Replacement from transient states
+ transition({OM, OI, IS, ISI, IM, IMO, IMOI, IMI, MI}, {Load, Load_prefetch, Ifetch, Store, Store_prefetch, Replacement}) {
+ z_stall;
+ }
+
+ // Transitions from Idle
+ transition(I, {Load,Ifetch}, IS) {
+ g_allocateCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, {Load_prefetch}, IS) {
+ g_allocateCacheBlock;
+ i_allocateTBE;
+ j_setPrefetchBit;
+ a_issueGETS;
+ m_popOptionalQueue;
+ }
+
+ transition(I, Store, IM) {
+ g_allocateCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Store_prefetch, IM) {
+ g_allocateCacheBlock;
+ i_allocateTBE;
+ j_setPrefetchBit;
+ b_issueGETX;
+ m_popOptionalQueue;
+ }
+
+ transition(I, Replacement) {
+ ff_deallocateCacheBlock;
+ }
+
+ transition(I, INV) {
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from Shared
+ transition({S, O}, {Load,Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, O, M}, Load_prefetch) {
+ m_popOptionalQueue;
+ }
+
+ transition(S, Store, IM) {
+ i_allocateTBE;
+ b_issueGETX;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store_prefetch, IM) {
+ i_allocateTBE;
+ j_setPrefetchBit;
+ b_issueGETX;
+ m_popOptionalQueue;
+ }
+
+ transition(S, Replacement, I) {
+ ff_deallocateCacheBlock;
+ }
+
+ transition(S, INV, I) {
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store_prefetch) {
+ m_popOptionalQueue;
+ }
+
+ transition(M, Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUTX;
+ x_copyDataFromCacheToTBE;
+ ff_deallocateCacheBlock;
+ }
+
+ transition(M, Forwarded_GETS, O) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(M, Forwarded_GETX, I) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from O
+ transition(O, Store, OM) {
+ i_allocateTBE;
+ b_issueGETX;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, Store_prefetch, OM) {
+ i_allocateTBE;
+ j_setPrefetchBit;
+ b_issueGETX;
+ m_popOptionalQueue;
+ }
+
+ transition(O, Replacement, OI){
+ i_allocateTBE;
+ d_issuePUTX;
+ x_copyDataFromCacheToTBE;
+ ff_deallocateCacheBlock;
+ }
+
+ transition(O, Forwarded_GETS) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(O, Forwarded_GETX, I) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ // transitions from OI
+
+ transition(OI, Forwarded_GETS) {
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(OI, Forwarded_GETX) {
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(OI, Dir_WB_ack, I) {
+ s_deallocateTBE;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from IS
+
+ transition(IS, INV, ISI) {
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(IS, Data_ack_0, S) {
+ u_writeDataToCache;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from ISI
+
+ // in ISI, could get data from the Proc whose GETX caused INV to go from IS to ISI
+ // or, could get data from Dir if Dir's data lost race to Dir's INV
+ // or, could get data from Dir, if my GETS took forever to get to Dir, and the GETX
+ // processor already wrote it back
+
+ transition(ISI, Data_ack_0, I) {
+ u_writeDataToCache;
+ h_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(ISI, INV) {
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from IM
+
+ transition(IM, INV) { // do not need to go to IMI, since INV is for earlier epoch
+ t_sendAckToInvalidator;
+ l_popForwardedRequestQueue;
+ }
+
+ transition({IM, IMO}, Forwarded_GETS, IMO) {
+ dd_recordGetSForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(IM, Forwarded_GETX, IMI) {
+ ii_recordGetXForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(IM, {Data_ack_0, Data_ack_not_0_last}, M) {
+ u_writeDataToCache;
+ hh_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Data_ack_not_0) {
+ u_writeDataToCache;
+ p_addNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Proc_ack) {
+ q_decrementNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Proc_last_ack, M) {
+ hh_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from IMO
+
+ transition(IMO, Forwarded_GETX, IMOI) {
+ ii_recordGetXForwardID;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(IMO, {Data_ack_0, Data_ack_not_0_last}, O) {
+ u_writeDataToCache;
+ hh_store_hit;
+ ee_dataFromCacheToGetSForwardIDs;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMO, Data_ack_not_0) {
+ u_writeDataToCache;
+ p_addNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMO, Proc_ack) {
+ q_decrementNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMO, Proc_last_ack, O) {
+ hh_store_hit;
+ ee_dataFromCacheToGetSForwardIDs;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from IMI
+
+ transition(IMI, {Data_ack_0, Data_ack_not_0_last}, I) {
+ u_writeDataToCache;
+ hh_store_hit;
+ gg_dataFromCacheToGetXForwardID;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMI, Data_ack_not_0) {
+ u_writeDataToCache;
+ p_addNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMI, Proc_ack) {
+ q_decrementNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMI, Proc_last_ack, I) {
+ hh_store_hit;
+ gg_dataFromCacheToGetXForwardID;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // transitions from IMOI
+
+ transition(IMOI, {Data_ack_0, Data_ack_not_0_last}, I) {
+ u_writeDataToCache;
+ hh_store_hit;
+ ee_dataFromCacheToGetSForwardIDs;
+ gg_dataFromCacheToGetXForwardID;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMOI, Data_ack_not_0) {
+ u_writeDataToCache;
+ p_addNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMOI, Proc_ack) {
+ q_decrementNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IMOI, Proc_last_ack, I) {
+ hh_store_hit;
+ ee_dataFromCacheToGetSForwardIDs;
+ gg_dataFromCacheToGetXForwardID;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ // Transitions from OM
+ transition(OM, Proc_ack) {
+ q_decrementNumberOfPendingAcks;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(OM, Forwarded_GETS) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(OM, Forwarded_GETX, IM) {
+ e_dataFromCacheToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ // Transitions from MI
+
+ transition(MI, Forwarded_GETS) {
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(MI, Forwarded_GETX) {
+ y_dataFromTBEToRequestor;
+ l_popForwardedRequestQueue;
+ }
+
+ transition(MI, Dir_WB_ack, I) {
+ s_deallocateTBE;
+ l_popForwardedRequestQueue;
+ }
+}