summaryrefslogtreecommitdiff
path: root/src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm
diff options
context:
space:
mode:
authorNathan Binkert <nate@binkert.org>2009-05-11 10:38:43 -0700
committerNathan Binkert <nate@binkert.org>2009-05-11 10:38:43 -0700
commit2f30950143cc70bc42a3c8a4111d7cf8198ec881 (patch)
tree708f6c22edb3c6feb31dd82866c26623a5329580 /src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm
parentc70241810d4e4f523f173c1646b008dc40faad8e (diff)
downloadgem5-2f30950143cc70bc42a3c8a4111d7cf8198ec881.tar.xz
ruby: Import ruby and slicc from GEMS
We eventually plan to replace the m5 cache hierarchy with the GEMS hierarchy, but for now we will make both live alongside eachother.
Diffstat (limited to 'src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm')
-rw-r--r--src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm497
1 files changed, 497 insertions, 0 deletions
diff --git a/src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm b/src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm
new file mode 100644
index 000000000..435bf0eff
--- /dev/null
+++ b/src/mem/protocol/MSI_MOSI_CMP_directory-dir.sm
@@ -0,0 +1,497 @@
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+machine(Directory, "MOSI Directory Optimized") {
+
+ // ** OUT QUEUES **
+ MessageBuffer dummyFrom0, network="To", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer dummyFrom1, network="To", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
+ // Dir -> mod-L2 bank - Must be true for the 'opt' and 'GS' protocols BE CAREFUL HERE!!!
+ MessageBuffer forwardedRequestFromDir, network="To", virtual_network="2", ordered="true";
+ MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false"; // Dir -> mod-L2 bank
+ MessageBuffer dummyFrom4, network="To", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
+
+ // ** IN QUEUES **
+ MessageBuffer dummyTo0, network="From", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
+ MessageBuffer dummyTo2, network="From", virtual_network="2", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer dummyTo3, network="From", virtual_network="3", ordered="false"; // dummy buffer that shouldn't be used
+ MessageBuffer finalAckToDir, network="From", virtual_network="4", ordered="false"; // a mod-L2 bank -> this Dir
+
+ // STATES
+ enumeration(State, desc="Directory states", default="Directory_State_NP") {
+ // Base states
+ NP, desc="Not present";
+ I, desc="Idle";
+ S, desc="Shared";
+ O, desc="Owned";
+ M, desc="Modified", format="!b";
+ OO, desc="transient state of O->GetS/GetInstr->O";
+ OM, desc="transient state of O->GetX->M";
+ MO, desc="transient state of M->GetS/GetInstr->O";
+ MM, desc="transient state of M->GetX->M";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETS, desc="A GETS arrives";
+ GET_INSTR, desc="";
+ GETX_Owner, desc="A GETX arrives, requestor is owner";
+ GETX_NotOwner, desc="A GETX arrives, requestor is not owner";
+ PUTX_Owner, "PUTX (requestor is owner)", desc="A PUTX arrives, requestor is owner";
+ PUTX_NotOwner, "PUTX (requestor not owner)",desc="A PUTX arrives, requestor is not owner";
+ FinalAck, desc="";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...") {
+ State DirectoryState, desc="Directory state";
+ Set Sharers, desc="Set of sharers - must be L2 caches"; // Note this is a Set and not a NetDest for space concerns
+ bool DirOwner, default="true", desc="Is dir owner?";
+ NodeID ProcOwner, default="0", desc="Processor owner"; // Note this is an int for space concerns
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ external_type(DirectoryMemory) {
+ Entry lookup(Address);
+ bool isPresent(Address);
+ }
+
+ // ** OBJECTS **
+
+ DirectoryMemory directory, constructor_hack="i";
+
+ State getState(Address addr) {
+ if (directory.isPresent(addr)) {
+ return directory[addr].DirectoryState;
+ }
+ return State:NP;
+ }
+
+ string getDirStateStr(Address addr) {
+ return Directory_State_to_string(getState(addr));
+ }
+
+ string getRequestTypeStr(CoherenceRequestType type) {
+ return CoherenceRequestType_to_string(type);
+ }
+
+ void setState(Address addr, State state) {
+ if (directory.isPresent(addr)) {
+ DEBUG_EXPR(addr);
+ DEBUG_EXPR(directory[addr].DirectoryState);
+ directory[addr].DirectoryState := state;
+ DEBUG_EXPR(directory[addr].DirectoryState);
+ DEBUG_EXPR(state);
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(forwardedRequestNetwork_out, RequestMsg, forwardedRequestFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(ownRequestQueue_out, RequestMsg, requestToDir);
+
+ // ** IN_PORTS **
+ in_port(dummyTo0_in, RequestMsg, dummyTo0) {
+ if (dummyTo0_in.isReady()) {
+ peek(dummyTo0_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ }
+ error("dummyTo0 port should not be used");
+ }
+ }
+ in_port(dummyTo2_in, RequestMsg, dummyTo2) {
+ if (dummyTo2_in.isReady()) {
+ peek(dummyTo2_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ }
+ error("dummyTo2 port should not be used");
+ }
+ }
+
+ in_port(dummyTo3_in, RequestMsg, dummyTo3) {
+ if (dummyTo3_in.isReady()) {
+ peek(dummyTo3_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(id);
+ DEBUG_EXPR(in_msg.Type);
+ DEBUG_EXPR(getState(in_msg.Address));
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ }
+ error("dummyTo3 port should not be used");
+ }
+ }
+
+ in_port(finalAckNetwork_in, ResponseMsg, finalAckToDir){
+ if(finalAckNetwork_in.isReady()){
+ peek(finalAckNetwork_in, ResponseMsg){
+ assert(in_msg.Destination.isElement(machineID));
+ if(in_msg.Type == CoherenceResponseType:FINALACK){
+ trigger(Event:FinalAck, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady()) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ trigger(Event:GET_INSTR, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ if(directory[in_msg.Address].DirOwner == false &&
+ L2CacheMachIDToChipID(in_msg.RequestorMachId) == directory[in_msg.Address].ProcOwner) {
+ trigger(Event:GETX_Owner, in_msg.Address);
+ } else {
+ trigger(Event:GETX_NotOwner, in_msg.Address);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ if (directory[in_msg.Address].DirOwner == false &&
+ L2CacheMachIDToChipID(in_msg.RequestorMachId) == directory[in_msg.Address].ProcOwner) {
+ trigger(Event:PUTX_Owner, in_msg.Address);
+ } else {
+ trigger(Event:PUTX_NotOwner, in_msg.Address);
+ }
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+
+ // Actions
+
+ // a_addRequestorToSharers
+
+ action(a_addRequestorToSharers, "a", desc="Add requestor to list of sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ directory[address].Sharers.add(L2CacheMachIDToChipID(in_msg.RequestorMachId));
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+ }
+
+ // b_dataToRequestor
+
+ action(b_dataToRequestor, "b", desc="Send data to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.SenderMachId := machineID;
+ if(in_msg.Type == CoherenceRequestType:GETX) {
+ DEBUG_EXPR(directory[address].Sharers);
+ DEBUG_EXPR(directory[address].Sharers.count());
+ out_msg.NumPendingExtAcks := directory[address].Sharers.count();
+ } else {
+ out_msg.NumPendingExtAcks := 0; // don't need to send pending ack count to GETS requestor
+ }
+ out_msg.Destination.add(in_msg.RequestorMachId);
+ out_msg.DataBlk := directory[address].DataBlk;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.DataBlk);
+ DEBUG_EXPR(out_msg.NumPendingExtAcks);
+ DEBUG_EXPR(out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+ }
+
+ // d_forwardRequestToOwner
+
+ action(d_forwardRequestToOwner, "d", desc="Forward request to owner") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.RequestorMachId := in_msg.RequestorMachId;
+ out_msg.Destination.add(map_L2ChipId_to_L2Cache(out_msg.Address, directory[address].ProcOwner));
+ DEBUG_EXPR(out_msg.Destination);
+
+ if(in_msg.Type == CoherenceRequestType:GETX) {
+ out_msg.NumPendingExtAcks := directory[address].Sharers.count();
+ } else {
+ out_msg.NumPendingExtAcks := 0; // don't need to send pending ack count to GETS requestor
+ }
+ out_msg.MessageSize := MessageSizeType:Control;
+ DEBUG_EXPR(out_msg.Address);
+ DEBUG_EXPR(out_msg.NumPendingExtAcks);
+ DEBUG_EXPR(out_msg.Destination);
+ }
+ }
+ }
+
+ action(f_setOwnerToRequestor, "f", desc="Set owner equal to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ directory[address].ProcOwner := L2CacheMachIDToChipID(in_msg.RequestorMachId);
+ directory[address].DirOwner := false;
+ }
+ DEBUG_EXPR(directory[address].ProcOwner);
+ }
+
+ action(g_clearSharers, "g", desc="Clear list of sharers") {
+ directory[address].Sharers.clear();
+ }
+
+ // currently done via multicast message
+
+ action(h_invToSharers, "h", desc="Send INVs to all sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ DEBUG_EXPR(directory[address].Sharers.count());
+ if(directory[address].Sharers.count() != 0){
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.RequestorMachId := in_msg.RequestorMachId;
+ DEBUG_EXPR(directory[address].Sharers);
+ out_msg.Destination := getMultiStaticL2BankNetDest(address, directory[address].Sharers);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ profileMsgDelay(1, requestNetwork_in.dequeue_getDelayCycles());
+ }
+
+ action(l_writeRequestDataToMemory, "l", desc="Write PUTX/DWN data to memory") {
+ peek(requestNetwork_in, RequestMsg) {
+ directory[in_msg.Address].DataBlk := in_msg.DataBlk;
+ DEBUG_EXPR(in_msg.Address);
+ DEBUG_EXPR(in_msg.DataBlk);
+ }
+ }
+
+ action(n_writebackAckToRequestor, "n", desc="Send WB_ack to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ // This needs to be DIRECTORY_LATENCY to keep the queue fifo
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(in_msg.RequestorMachId);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(m_forwardExclusiveRequestToOwner, "m", desc="Send EXE_ack to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ // This needs to be DIRECTORY_LATENCY to keep the queue fifo
+ enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:EXE_ACK;
+ out_msg.RequestorMachId := machineID;
+ out_msg.Destination.add(in_msg.RequestorMachId);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(uu_profile, "u/", desc="Profile this transition.") {
+ peek(requestNetwork_in, RequestMsg) {
+ profile_request(in_msg.L1CacheStateStr, in_msg.L2CacheStateStr, getDirStateStr(address), getRequestTypeStr(in_msg.Type));
+ }
+ }
+
+ action(p_clearOwner, "p", desc="Clear owner") {
+ directory[address].DirOwner := true; // set owner equal to dir
+ }
+
+ action(r_addOwnerToSharers, "r", desc="Add owner to list of sharers") {
+ DEBUG_EXPR(directory[address].ProcOwner);
+ directory[address].Sharers.add(directory[address].ProcOwner);
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+
+ action(t_removeOwnerFromSharers, "t", desc="Remove owner from list of sharers") {
+ DEBUG_EXPR(directory[address].ProcOwner);
+ directory[address].Sharers.remove(directory[address].ProcOwner);
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+
+ action(u_removeRequestorFromSharers, "u", desc="Remove requestor from list of sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ DEBUG_EXPR(in_msg.RequestorMachId);
+ directory[address].Sharers.remove(L2CacheMachIDToChipID(in_msg.RequestorMachId));
+ DEBUG_EXPR(directory[address].Sharers);
+ }
+ }
+
+ action(x_recycleRequest, "x", desc=""){
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(ownRequestQueue_out, RequestMsg, latency="RECYCLE_LATENCY"){
+ out_msg := in_msg;
+ }
+ }
+ }
+
+ action(hh_popFinalAckQueue, "\h", desc=""){
+ profileMsgDelay(4, finalAckNetwork_in.dequeue_getDelayCycles());
+ }
+
+ //action(z_stall, "z", desc=""){
+ //}
+
+ // TRANSITIONS
+
+ transition({OM,MM}, FinalAck, M){
+ hh_popFinalAckQueue;
+ }
+ transition({OO,MO}, FinalAck, O){
+ hh_popFinalAckQueue;
+ }
+
+ transition({OO, OM, MO, MM}, {GETS, GET_INSTR, GETX_Owner, GETX_NotOwner, PUTX_Owner}){
+ x_recycleRequest;
+ j_popIncomingRequestQueue;
+ // z_stall;
+ }
+
+ // ---------------------------
+
+ transition({NP, I, S, M, O, OO, OM, MO, MM}, PUTX_NotOwner) {
+ uu_profile;
+ n_writebackAckToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ // Transitions from Idle
+ transition({NP,I}, {GETS,GET_INSTR}, S) {
+ uu_profile;
+ a_addRequestorToSharers;
+ b_dataToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ transition({NP,I}, GETX_NotOwner, M) {
+ uu_profile;
+ f_setOwnerToRequestor;
+ b_dataToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ // Transitions from Shared
+ transition(S, {GETS,GET_INSTR}) {
+ uu_profile;
+ a_addRequestorToSharers;
+ b_dataToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(S, GETX_NotOwner, M) {
+ uu_profile;
+ u_removeRequestorFromSharers;
+ b_dataToRequestor;
+ f_setOwnerToRequestor;
+ h_invToSharers;
+ g_clearSharers;
+ j_popIncomingRequestQueue;
+ }
+
+ // Transitions from Owned
+ transition(O, {GETS,GET_INSTR}, OO) {
+ uu_profile;
+ a_addRequestorToSharers;
+ d_forwardRequestToOwner;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, {GETX_NotOwner, GETX_Owner}, OM) {
+ uu_profile;
+ u_removeRequestorFromSharers;
+ t_removeOwnerFromSharers;
+ d_forwardRequestToOwner;
+ f_setOwnerToRequestor;
+ h_invToSharers;
+ g_clearSharers;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTX_Owner, S) {
+ uu_profile;
+ u_removeRequestorFromSharers;
+ l_writeRequestDataToMemory;
+ n_writebackAckToRequestor;
+ p_clearOwner;
+ j_popIncomingRequestQueue;
+ }
+
+
+ // Transitions from Modified
+ transition(M, {GETS,GET_INSTR}, MO) {
+ uu_profile;
+ a_addRequestorToSharers;
+ r_addOwnerToSharers;
+ d_forwardRequestToOwner;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M, GETX_NotOwner, MM) {
+ uu_profile;
+ d_forwardRequestToOwner;
+ f_setOwnerToRequestor;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M, GETX_Owner) {
+ uu_profile;
+ m_forwardExclusiveRequestToOwner;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTX_Owner, I) {
+ uu_profile;
+ l_writeRequestDataToMemory;
+ n_writebackAckToRequestor;
+ p_clearOwner;
+ j_popIncomingRequestQueue;
+ }
+}