diff options
-rw-r--r-- | src/learning_gem5/part3/MSI-cache.sm | 853 | ||||
-rw-r--r-- | src/learning_gem5/part3/MSI-dir.sm | 548 | ||||
-rw-r--r-- | src/learning_gem5/part3/MSI-msg.sm | 108 | ||||
-rw-r--r-- | src/learning_gem5/part3/MSI.slicc | 5 | ||||
-rw-r--r-- | src/learning_gem5/part3/SConsopts | 11 |
5 files changed, 1525 insertions, 0 deletions
diff --git a/src/learning_gem5/part3/MSI-cache.sm b/src/learning_gem5/part3/MSI-cache.sm new file mode 100644 index 000000000..3847b53a8 --- /dev/null +++ b/src/learning_gem5/part3/MSI-cache.sm @@ -0,0 +1,853 @@ +/* + * Copyright (c) 2017 Jason Lowe-Power + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * This file contains a simple example MSI protocol. + * + * The protocol in this file is based off of the MSI protocol found in + * A Primer on Memory Consistency and Cache Coherence + * Daniel J. Sorin, Mark D. Hill, and David A. Wood + * Synthesis Lectures on Computer Architecture 2011 6:3, 141-149 + * + * Table 8.1 contains the transitions and actions found in this file and + * section 8.2.4 explains the protocol in detail. + * + * See Learning gem5 Part 3: Ruby for more details. + * + * Authors: Jason Lowe-Power + */ + +/// Declare a machine with type L1Cache. +machine(MachineType:L1Cache, "MSI cache") + : Sequencer *sequencer; // Incoming request from CPU come from this + CacheMemory *cacheMemory; // This stores the data and cache states + bool send_evictions; // Needed to support O3 CPU and mwait + + // Other declarations + // Message buffers are required to send and receive data from the Ruby + // network. The from/to and request/response can be confusing! + // Virtual networks are needed to prevent deadlock (e.g., it is bad if a + // response gets stuck behind a stalled request). In this protocol, we are + // using three virtual networks. The highest priority is responses, + // followed by forwarded requests, then requests have the lowest priority. + + // Requests *to* the directory + MessageBuffer * requestToDir, network="To", virtual_network="0", + vnet_type="request"; + // Responses *to* the directory or other caches + MessageBuffer * responseToDirOrSibling, network="To", virtual_network="2", + vnet_type="response"; + + // Requests *from* the directory for fwds, invs, and put acks. + MessageBuffer * forwardFromDir, network="From", virtual_network="1", + vnet_type="forward"; + // Responses *from* directory and other caches for this cache's reqs. + MessageBuffer * responseFromDirOrSibling, network="From", + virtual_network="2", vnet_type="response"; + + // This is all of the incoming requests from the core via the sequencer + MessageBuffer * mandatoryQueue; +{ + // Declare the states that this cache will use. These are both stable + // states (no underscore) and transient states (with underscore). Letters + // after the underscores are superscript in Sorin et al. + // Underscores and "desc" are used when generating HTML tables. + // Access permissions are used for functional accesses. For reads, the + // functional access reads *all* of the blocks with a matching address that + // have read-only or read-write permission. For functional writes, all + // blocks are updated with new data if they have busy, read-only, or + // read-write permission. + state_declaration(State, desc="Cache states") { + I, AccessPermission:Invalid, + desc="Not present/Invalid"; + + // States moving out of I + IS_D, AccessPermission:Invalid, + desc="Invalid, moving to S, waiting for data"; + IM_AD, AccessPermission:Invalid, + desc="Invalid, moving to M, waiting for acks and data"; + IM_A, AccessPermission:Busy, + desc="Invalid, moving to M, waiting for acks"; + + S, AccessPermission:Read_Only, + desc="Shared. Read-only, other caches may have the block"; + + // States moving out of S + SM_AD, AccessPermission:Read_Only, + desc="Shared, moving to M, waiting for acks and 'data'"; + SM_A, AccessPermission:Read_Only, + desc="Shared, moving to M, waiting for acks"; + + M, AccessPermission:Read_Write, + desc="Modified. Read & write permissions. Owner of block"; + + // States moving to Invalid + MI_A, AccessPermission:Busy, + desc="Was modified, moving to I, waiting for put ack"; + SI_A, AccessPermission:Busy, + desc="Was shared, moving to I, waiting for put ack"; + II_A, AccessPermission:Invalid, + desc="Sent valid data before receiving put ack. "; + //"Waiting for put ack."; + } + + // Events that can be triggered on incoming messages. These are the events + // that will trigger transitions + enumeration(Event, desc="Cache events") { + // From the processor/sequencer/mandatory queue + Load, desc="Load from processor"; + Store, desc="Store from processor"; + + // Internal event (only triggered from processor requests) + Replacement, desc="Triggered when block is chosen as victim"; + + // Forwarded reqeust from other cache via dir on the forward network + FwdGetS, desc="Directory sent us a request to satisfy GetS. "; + //"We must have the block in M to respond to this."; + FwdGetM, desc="Directory sent us a request to satisfy GetM. "; + //"We must have the block in M to respond to this."; + Inv, desc="Invalidate from the directory."; + PutAck, desc="Response from directory after we issue a put. "; + //"This must be on the fwd network to avoid"; + //"deadlock."; + + // Responses from directory + DataDirNoAcks, desc="Data from directory (acks = 0)"; + DataDirAcks, desc="Data from directory (acks > 0)"; + + // Responses from other caches + DataOwner, desc="Data from owner"; + InvAck, desc="Invalidation ack from other cache after Inv"; + + // Special internally triggered event to simplify implementation + LastInvAck, desc="Triggered after the last ack is received"; + } + + // A structure for the cache entry. This stores the cache data and state + // as defined above. You can put any other information here you like. + // The AbstractCacheEntry is defined in + // src/mem/ruby/slic_interface/AbstractCacheEntry.hh + // If you want to use any of the functions in the abstract entry declare + // them here. + structure(Entry, desc="Cache entry", interface="AbstractCacheEntry") { + State CacheState, desc="cache state"; + DataBlock DataBlk, desc="Data in the block"; + } + + // TBE is the "transaction buffer entry". This stores information needed + // during transient states. This is *like* an MSHR. It functions as an MSHR + // in this protocol, but the entry is also allocated for other uses. + structure(TBE, desc="Entry for transient requests") { + State TBEState, desc="State of block"; + DataBlock DataBlk, desc="Data for the block. Needed for MI_A"; + int AcksOutstanding, default=0, desc="Number of acks left to receive."; + } + + // Table of TBE entries. This is defined externally in + // src/mem/ruby/structures/TBETable.hh. It is templatized on the TBE + // structure defined above. + structure(TBETable, external="yes") { + TBE lookup(Addr); + void allocate(Addr); + void deallocate(Addr); + bool isPresent(Addr); + } + + /*************************************************************************/ + // Some declarations of member functions and member variables. + + // The TBE table for this machine. It is templatized under the covers. + // NOTE: SLICC mangles names with the machine type. Thus, the TBE declared + // above will be L1Cache_TBE in C++. + // We also have to pass through a parameter to the machine to the TBETable. + TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs"; + + // Declare all of the functions of the AbstractController that we may use + // in this file. + // Functions from clocked object + Tick clockEdge(); + + // Functions we must use to set things up for the transitions to execute + // correctly. + // These next set/unset functions are used to populate the implicit + // variables used in actions. This is required when a transition has + // multiple actions. + void set_cache_entry(AbstractCacheEntry a); + void unset_cache_entry(); + void set_tbe(TBE b); + void unset_tbe(); + + // Given an address and machine type this queries the network to check + // where it should be sent. In a real implementation, this might be fixed + // at design time, but this function gives us flexibility at runtime. + // For example, if you have multiple memory channels, this function will + // tell you which addresses to send to which memory controller. + MachineID mapAddressToMachine(Addr addr, MachineType mtype); + + // Convience function to look up the cache entry. + // Needs a pointer so it will be a reference and can be updated in actions + Entry getCacheEntry(Addr address), return_by_pointer="yes" { + return static_cast(Entry, "pointer", cacheMemory.lookup(address)); + } + + /*************************************************************************/ + // Functions that we need to define/override to use our specific structures + // in this implementation. + + // Required function for getting the current state of the block. + // This is called from the transition to know which transition to execute + State getState(TBE tbe, Entry cache_entry, Addr addr) { + // The TBE state will override the state in cache memory, if valid + if (is_valid(tbe)) { return tbe.TBEState; } + // Next, if the cache entry is valid, it holds the state + else if (is_valid(cache_entry)) { return cache_entry.CacheState; } + // If the block isn't present, then it's state must be I. + else { return State:I; } + } + + + // Required function for setting the current state of the block. + // This is called from the transition to set the ending state. + // Needs to set both the TBE and the cache entry state. + // This is also called when transitioning to I so it's possible the TBE and/ + // or the cache_entry is invalid. + void setState(TBE tbe, Entry cache_entry, Addr addr, State state) { + if (is_valid(tbe)) { tbe.TBEState := state; } + if (is_valid(cache_entry)) { cache_entry.CacheState := state; } + } + + // Required function to override. Used for functional access to know where + // the valid data is. NOTE: L1Cache_State_to_permission is automatically + // created based on the access permissions in the state_declaration. + // This is mangled by both the MachineType and the name of the state + // declaration ("State" in this case) + AccessPermission getAccessPermission(Addr addr) { + TBE tbe := TBEs[addr]; + if(is_valid(tbe)) { + return L1Cache_State_to_permission(tbe.TBEState); + } + + Entry cache_entry := getCacheEntry(addr); + if(is_valid(cache_entry)) { + return L1Cache_State_to_permission(cache_entry.CacheState); + } + + return AccessPermission:NotPresent; + } + + // Required function to override. Like above function, but sets thte state. + void setAccessPermission(Entry cache_entry, Addr addr, State state) { + if (is_valid(cache_entry)) { + cache_entry.changePermission(L1Cache_State_to_permission(state)); + } + } + + // Required function to override for functionally reading/writing data. + // NOTE: testAndRead/Write defined in src/mem/ruby/slicc_interface/Util.hh + void functionalRead(Addr addr, Packet *pkt) { + TBE tbe := TBEs[addr]; + if(is_valid(tbe)) { + testAndRead(addr, tbe.DataBlk, pkt); + } else { + testAndRead(addr, getCacheEntry(addr).DataBlk, pkt); + } + } + + int functionalWrite(Addr addr, Packet *pkt) { + TBE tbe := TBEs[addr]; + if(is_valid(tbe)) { + if (testAndWrite(addr, tbe.DataBlk, pkt)) { + return 1; + } else { + return 0; + } + } else { + if (testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt)) { + return 1; + } else { + return 0; + } + } + } + + /*************************************************************************/ + // Input/output network definitions + + // Output ports. This defines the message types that will flow ocross the + // output buffers as defined above. These must be "to" networks. + // "request_out" is the name we'll use later to send requests. + // "RequestMsg" is the message type we will send (see MSI-msg.sm) + // "requestToDir" is the name of the MessageBuffer declared above that + // we are sending these requests out of. + out_port(request_out, RequestMsg, requestToDir); + out_port(response_out, ResponseMsg, responseToDirOrSibling); + + // Input ports. The order here is/(can be) important. The code in each + // in_port is executed in the order specified in this file (or by the rank + // parameter). Thus, we must sort these based on the network priority. + // In this cache, the order is responses from other caches, forwards, then + // requests from the CPU. + + // Like the out_port above + // "response_in" is the name we'll use later when we refer to this port + // "ResponseMsg" is the type of message we expect on this port + // "responseFromDirOrSibling" is the name of the buffer this in_port is + // connected to for responses from other caches and the directory. + in_port(response_in, ResponseMsg, responseFromDirOrSibling) { + // NOTE: You have to check to make sure the message buffer has a valid + // message at the head. The code in in_port is executed either way. + if (response_in.isReady(clockEdge())) { + // Peek is a special function. Any code inside a peek statement has + // a special variable declared and populated: in_msg. This contains + // the message (of type RequestMsg in this case) at the head. + // "forward_in" is the port we want to peek into + // "RequestMsg" is the type of message we expect. + peek(response_in, ResponseMsg) { + // Grab the entry and tbe if they exist. + Entry cache_entry := getCacheEntry(in_msg.addr); + TBE tbe := TBEs[in_msg.addr]; + // The TBE better exist since this is a response and we need to + // be able to check the remaining acks. + assert(is_valid(tbe)); + + // If it's from the directory... + if (machineIDToMachineType(in_msg.Sender) == + MachineType:Directory) { + if (in_msg.Type != CoherenceResponseType:Data) { + error("Directory should only reply with data"); + } + // Take the in_msg acks and add (sub) the Acks we've seen. + // The InvAck will decrement the acks we're waiting for in + // tbe.AcksOutstanding to below 0 if we haven't gotten the + // dir resp yet. So, if this is 0 we don't need to wait + assert(in_msg.Acks + tbe.AcksOutstanding >= 0); + if (in_msg.Acks + tbe.AcksOutstanding == 0) { + trigger(Event:DataDirNoAcks, in_msg.addr, cache_entry, + tbe); + } else { + // If it's not 0, then we need to wait for more acks + // and we'll trigger LastInvAck later. + trigger(Event:DataDirAcks, in_msg.addr, cache_entry, + tbe); + } + } else { + // This is from another cache. + if (in_msg.Type == CoherenceResponseType:Data) { + trigger(Event:DataOwner, in_msg.addr, cache_entry, + tbe); + } else if (in_msg.Type == CoherenceResponseType:InvAck) { + DPRINTF(RubySlicc, "Got inv ack. %d left\n", + tbe.AcksOutstanding); + if (tbe.AcksOutstanding == 1) { + // If there is exactly one ack remaining then we + // know it is the last ack. + trigger(Event:LastInvAck, in_msg.addr, cache_entry, + tbe); + } else { + trigger(Event:InvAck, in_msg.addr, cache_entry, + tbe); + } + } else { + error("Unexpected response from other cache"); + } + } + } + } + } + + // Forward requests for other caches. + in_port(forward_in, RequestMsg, forwardFromDir) { + if (forward_in.isReady(clockEdge())) { + peek(forward_in, RequestMsg) { + // Grab the entry and tbe if they exist. + Entry cache_entry := getCacheEntry(in_msg.addr); + TBE tbe := TBEs[in_msg.addr]; + + if (in_msg.Type == CoherenceRequestType:GetS) { + // This is a special function that will trigger a + // transition (as defined below). It *must* have these + // parameters. + trigger(Event:FwdGetS, in_msg.addr, cache_entry, tbe); + } else if (in_msg.Type == CoherenceRequestType:GetM) { + trigger(Event:FwdGetM, in_msg.addr, cache_entry, tbe); + } else if (in_msg.Type == CoherenceRequestType:Inv) { + trigger(Event:Inv, in_msg.addr, cache_entry, tbe); + } else if (in_msg.Type == CoherenceRequestType:PutAck) { + trigger(Event:PutAck, in_msg.addr, cache_entry, tbe); + } else { + error("Unexpected forward message!"); + } + } + } + } + + // The "mandatory queue" is the port/queue from the CPU or other processor. + // This is *always* a RubyRequest + in_port(mandatory_in, RubyRequest, mandatoryQueue) { + if (mandatory_in.isReady(clockEdge())) { + // Block all requests if there is already an outstanding request + // that has the same line address. This is unblocked when we + // finally respond to the request. + peek(mandatory_in, RubyRequest, block_on="LineAddress") { + // NOTE: Using LineAddress here to promote smaller requests to + // full cache block requests. + Entry cache_entry := getCacheEntry(in_msg.LineAddress); + TBE tbe := TBEs[in_msg.LineAddress]; + // If there isn't a matching entry and no room in the cache, + // then we need to find a victim. + if (is_invalid(cache_entry) && + cacheMemory.cacheAvail(in_msg.LineAddress) == false ) { + // make room for the block + // The "cacheProbe" function looks at the cache set for + // the address and queries the replacement protocol for + // the address to replace. It returns the address to repl. + Addr addr := cacheMemory.cacheProbe(in_msg.LineAddress); + Entry victim_entry := getCacheEntry(addr); + TBE victim_tbe := TBEs[addr]; + trigger(Event:Replacement, addr, victim_entry, victim_tbe); + } else { + if (in_msg.Type == RubyRequestType:LD || + in_msg.Type == RubyRequestType:IFETCH) { + trigger(Event:Load, in_msg.LineAddress, cache_entry, + tbe); + } else if (in_msg.Type == RubyRequestType:ST) { + trigger(Event:Store, in_msg.LineAddress, cache_entry, + tbe); + } else { + error("Unexpected type from processor"); + } + } + } + } + } + + + /*************************************************************************/ + // Below are all of the actions that might be taken on a transition. + + // Each actions has a name, a shorthand, and a description. + // The shorthand is used when generating the HTML tables for the protocol. + // "\" in the shorthand cause that letter to be bold. Underscores insert a + // space, ^ makes the rest of the letters superscript. + // The description is also shown in the HTML table when clicked + + // The first set of actions are things we will do to interact with the + // rest of the system. Things like sending requests/responses. + + // Action blocks define a number of implicit variables that are useful. + // These variables come straight from the trigger() call in the in_port + // blocks. + // address: The address passed in the trigger (usually the in_msg.addr, + // though it can be different. E.g., on a replacement it is the + // victim address). + // cache_entry: The cache entry passed in the trigger call + // tbe: The TBE passed in the trigger call + action(sendGetS, 'gS', desc="Send GetS to the directory") { + // The syntax for enqueue is a lot like peek. Instead of populating + // in_msg, enqueue has an out_msg reference. Whatever you set on out_msg + // is sent through the out port specified. "request_out" is the port + // we're sending the message out of "RequestMsg" is the type of message + // we're sending "1" is the latency (in cycles) the port waits before + // sending the message. + enqueue(request_out, RequestMsg, 1) { + out_msg.addr := address; + // This type is defined in MSI-msg.sm for this protocol. + out_msg.Type := CoherenceRequestType:GetS; + // The destination may change depending on the address striping + // across different directories, so query the network. + out_msg.Destination.add(mapAddressToMachine(address, + MachineType:Directory)); + // See mem/protocol/RubySlicc_Exports.sm for possible sizes. + out_msg.MessageSize := MessageSizeType:Control; + // Set that the reqeustor is this machine so we get the response. + out_msg.Requestor := machineID; + } + } + + action(sendGetM, "gM", desc="Send GetM to the directory") { + enqueue(request_out, RequestMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceRequestType:GetM; + out_msg.Destination.add(mapAddressToMachine(address, + MachineType:Directory)); + out_msg.MessageSize := MessageSizeType:Control; + out_msg.Requestor := machineID; + } + } + + // NOTE: Clean evict. Required to keep the directory state up-to-date + action(sendPutS, "pS", desc="Send PutS to the directory") { + enqueue(request_out, RequestMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceRequestType:PutS; + out_msg.Destination.add(mapAddressToMachine(address, + MachineType:Directory)); + out_msg.MessageSize := MessageSizeType:Control; + out_msg.Requestor := machineID; + } + } + + action(sendPutM, "pM", desc="Send putM+data to the directory") { + enqueue(request_out, RequestMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceRequestType:PutM; + out_msg.Destination.add(mapAddressToMachine(address, + MachineType:Directory)); + out_msg.DataBlk := cache_entry.DataBlk; + out_msg.MessageSize := MessageSizeType:Data; + out_msg.Requestor := machineID; + } + } + + action(sendCacheDataToReq, "cdR", desc="Send cache data to requestor") { + // We have to peek into the request to see who to send to. + // If we are in both the peek and the enqueue block then we have access + // to both in_msg and out_msg. + assert(is_valid(cache_entry)); + peek(forward_in, RequestMsg) { + enqueue(response_out, ResponseMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceResponseType:Data; + out_msg.Destination.add(in_msg.Requestor); + out_msg.DataBlk := cache_entry.DataBlk; + out_msg.MessageSize := MessageSizeType:Data; + out_msg.Sender := machineID; + } + } + } + + action(sendCacheDataToDir, "cdD", desc="Send the cache data to the dir") { + enqueue(response_out, ResponseMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceResponseType:Data; + out_msg.Destination.add(mapAddressToMachine(address, + MachineType:Directory)); + out_msg.DataBlk := cache_entry.DataBlk; + out_msg.MessageSize := MessageSizeType:Data; + out_msg.Sender := machineID; + } + } + + action(sendInvAcktoReq, "iaR", desc="Send inv-ack to requestor") { + peek(forward_in, RequestMsg) { + enqueue(response_out, ResponseMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceResponseType:InvAck; + out_msg.Destination.add(in_msg.Requestor); + out_msg.DataBlk := cache_entry.DataBlk; + out_msg.MessageSize := MessageSizeType:Control; + out_msg.Sender := machineID; + } + } + } + + action(decrAcks, "da", desc="Decrement the number of acks") { + assert(is_valid(tbe)); + tbe.AcksOutstanding := tbe.AcksOutstanding - 1; + // This annotates the protocol trace + APPEND_TRANSITION_COMMENT("Acks: "); + APPEND_TRANSITION_COMMENT(tbe.AcksOutstanding); + } + + action(storeAcks, "sa", desc="Store the needed acks to the TBE") { + assert(is_valid(tbe)); + peek(response_in, ResponseMsg) { + tbe.AcksOutstanding := in_msg.Acks + tbe.AcksOutstanding; + } + assert(tbe.AcksOutstanding > 0); + } + + // Responses to CPU requests (e.g., hits and store acks) + + action(loadHit, "Lh", desc="Load hit") { + assert(is_valid(cache_entry)); + // Set this entry as the most recently used for the replacement policy + cacheMemory.setMRU(cache_entry); + // Send the data back to the sequencer/CPU. NOTE: False means it was + // not an "external hit", but hit in this local cache. + sequencer.readCallback(address, cache_entry.DataBlk, false); + } + + action(externalLoadHit, "xLh", desc="External load hit (was a miss)") { + assert(is_valid(cache_entry)); + peek(response_in, ResponseMsg) { + cacheMemory.setMRU(cache_entry); + // Forward the type of machine that responded to this request + // E.g., another cache or the directory. This is used for tracking + // statistics. + sequencer.readCallback(address, cache_entry.DataBlk, true, + machineIDToMachineType(in_msg.Sender)); + } + } + + action(storeHit, "Sh", desc="Store hit") { + assert(is_valid(cache_entry)); + cacheMemory.setMRU(cache_entry); + // The same as the read callback above. + sequencer.writeCallback(address, cache_entry.DataBlk, false); + } + + action(externalStoreHit, "xSh", desc="External store hit (was a miss)") { + assert(is_valid(cache_entry)); + peek(response_in, ResponseMsg) { + cacheMemory.setMRU(cache_entry); + sequencer.writeCallback(address, cache_entry.DataBlk, true, + // Note: this could be the last ack. + machineIDToMachineType(in_msg.Sender)); + } + } + + action(forwardEviction, "e", desc="sends eviction notification to CPU") { + if (send_evictions) { + sequencer.evictionCallback(address); + } + } + + // Cache management actions + + action(allocateCacheBlock, "a", desc="Allocate a cache block") { + assert(is_invalid(cache_entry)); + assert(cacheMemory.cacheAvail(address)); + // Create a new entry and update cache_entry to the new entry + set_cache_entry(cacheMemory.allocate(address, new Entry)); + } + + action(deallocateCacheBlock, "d", desc="Deallocate a cache block") { + assert(is_valid(cache_entry)); + cacheMemory.deallocate(address); + // clear the cache_entry variable (now it's invalid) + unset_cache_entry(); + } + + action(writeDataToCache, "wd", desc="Write data to the cache") { + peek(response_in, ResponseMsg) { + assert(is_valid(cache_entry)); + cache_entry.DataBlk := in_msg.DataBlk; + } + } + + action(allocateTBE, "aT", desc="Allocate TBE") { + assert(is_invalid(tbe)); + TBEs.allocate(address); + // this updates the tbe variable for other actions + set_tbe(TBEs[address]); + } + + action(deallocateTBE, "dT", desc="Deallocate TBE") { + assert(is_valid(tbe)); + TBEs.deallocate(address); + // this makes the tbe varible invalid + unset_tbe(); + } + + // Queue management actions + + action(popMandatoryQueue, "pQ", desc="Pop the mandatory queue") { + mandatory_in.dequeue(clockEdge()); + } + + action(popResponseQueue, "pR", desc="Pop the response queue") { + response_in.dequeue(clockEdge()); + } + + action(popForwardQueue, "pF", desc="Pop the forward queue") { + forward_in.dequeue(clockEdge()); + } + + // Stalling actions + + action(stall, "z", desc="Stall the incoming request") { + // Do nothing. However, the transition must have some action to be + // valid which is why this is needed. + // NOTE: There are other more complicated but higher performing stalls + // in Ruby like recycle() or stall_and_wait. + // z_stall stalls everything in the queue behind this request. + } + + + /*************************************************************************/ + // These are the transition definition. These are simply each cell in the + // table from Sorin et al. These are mostly in upper-left to bottom-right + // order + + // Each transtiion has (up to) 3 parameters, the current state, the + // triggering event and the final state. Thus, the below transition reads + // "Move from state I on a Load event to state IS_D". Below are other + // examples of transition statements. + // Within the transition statement is a set of action to take during the + // transition. These actions are executed atomically (i.e., all or nothing) + transition(I, Load, IS_D) { + // Make sure there is room in the cache to put the block whenever the + // miss returns. Otherwise we could deadlock. + allocateCacheBlock; + // We may need to track acks for this block and only the TBE holds an + // ack count. Thus, we need to allocate both a TBE and cache block. + allocateTBE; + // Actually send the request to the directory + sendGetS; + // Since we have handled this request on the mandatory queue, we can pop + popMandatoryQueue; + } + + transition(I, Store, IM_AD) { + allocateCacheBlock; + allocateTBE; + sendGetM; + popMandatoryQueue; + } + + // You can use {} to specify multiple states or events for which the + // transition applies. For instance, below. If we are in IS_D, then on any + // of the following Events (Load, Store, Replacement, Inv) we should stall + // When there is no third parameter to transition, it means that we want + // to stay in the initial state. + transition(IS_D, {Load, Store, Replacement, Inv}) { + stall; + } + + // Similarly, on either DataDirNoAcks or DataOwner we should go to S + transition(IS_D, {DataDirNoAcks, DataOwner}, S) { + writeDataToCache; + deallocateTBE; + externalLoadHit; + popResponseQueue; + } + + transition({IM_AD, IM_A}, {Load, Store, Replacement, FwdGetS, FwdGetM}) { + stall; + } + + transition({IM_AD, SM_AD}, {DataDirNoAcks, DataOwner}, M) { + writeDataToCache; + deallocateTBE; + externalStoreHit; + popResponseQueue; + } + + transition(IM_AD, DataDirAcks, IM_A) { + writeDataToCache; + storeAcks; + popResponseQueue; + } + + transition({IM_AD, IM_A, SM_AD, SM_A}, InvAck) { + decrAcks; + popResponseQueue; + } + + transition({IM_A, SM_A}, LastInvAck, M) { + deallocateTBE; + externalStoreHit; + popResponseQueue; + } + + transition({S, SM_AD, SM_A, M}, Load) { + loadHit; + popMandatoryQueue; + } + + transition(S, Store, SM_AD) { + allocateTBE; + sendGetM; + popMandatoryQueue; + } + + transition(S, Replacement, SI_A) { + sendPutS; + } + + transition(S, Inv, I) { + sendInvAcktoReq; + forwardEviction; + deallocateCacheBlock; + popForwardQueue; + } + + transition({SM_AD, SM_A}, {Store, Replacement, FwdGetS, FwdGetM}) { + stall; + } + + transition(SM_AD, Inv, IM_AD) { + sendInvAcktoReq; + popForwardQueue; + } + + transition(SM_AD, DataDirAcks, SM_A) { + writeDataToCache; + storeAcks; + popResponseQueue; + } + + transition(M, Store) { + storeHit; + forwardEviction; + popMandatoryQueue; + } + + transition(M, Replacement, MI_A) { + sendPutM; + } + + transition(M, FwdGetS, S) { + sendCacheDataToReq; + sendCacheDataToDir; + popForwardQueue; + } + + transition(M, FwdGetM, I) { + sendCacheDataToReq; + deallocateCacheBlock; + popForwardQueue; + } + + transition({MI_A, SI_A, II_A}, {Load, Store, Replacement}) { + stall; + } + + transition(MI_A, FwdGetS, SI_A) { + sendCacheDataToReq; + sendCacheDataToDir; + popForwardQueue; + } + + transition(MI_A, FwdGetM, II_A) { + sendCacheDataToReq; + popForwardQueue; + } + + transition({MI_A, SI_A, II_A}, PutAck, I) { + deallocateCacheBlock; + popForwardQueue; + } + + transition(SI_A, Inv, II_A) { + sendInvAcktoReq; + popForwardQueue; + } + +} diff --git a/src/learning_gem5/part3/MSI-dir.sm b/src/learning_gem5/part3/MSI-dir.sm new file mode 100644 index 000000000..7bd7aae83 --- /dev/null +++ b/src/learning_gem5/part3/MSI-dir.sm @@ -0,0 +1,548 @@ +/* + * Copyright (c) 2017 Jason Lowe-Power + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * This file contains the directory controller of a simple example MSI protocol + * + * In Ruby the directory controller both contains the directory coherence state + * but also functions as the memory controller in many ways. There are states + * in the directory that are both memory-centric and cache-centric. Be careful! + * + * The protocol in this file is based off of the MSI protocol found in + * A Primer on Memory Consistency and Cache Coherence + * Daniel J. Sorin, Mark D. Hill, and David A. Wood + * Synthesis Lectures on Computer Architecture 2011 6:3, 141-149 + * + * Table 8.2 contains the transitions and actions found in this file and + * section 8.2.4 explains the protocol in detail. + * + * See Learning gem5 Part 3: Ruby for more details. + * + * Authors: Jason Lowe-Power + */ + +machine(MachineType:Directory, "Directory protocol") + : + // This "DirectoryMemory" is a little weird. It is initially allocated + // so that it *can* cover all of memory (i.e., there are pointers for + // every 64-byte block in memory). However, the entries are lazily + // created in getDirEntry() + DirectoryMemory * directory; + // You can put any parameters you want here. They will be exported as + // normal SimObject parameters (like in the SimObject description file) + // and you can set these parameters at runtime via the python config + // file. If there is no default here (like directory), it is mandatory + // to set the parameter in the python config. Otherwise, it uses the + // default value set here. + Cycles toMemLatency := 1; + + // Forwarding requests from the directory *to* the caches. + MessageBuffer *forwardToCache, network="To", virtual_network="1", + vnet_type="forward"; + // Response from the directory *to* the cache. + MessageBuffer *responseToCache, network="To", virtual_network="2", + vnet_type="response"; + + // Requests *from* the cache to the directory + MessageBuffer *requestFromCache, network="From", virtual_network="0", + vnet_type="request"; + + // Responses *from* the cache to the directory + MessageBuffer *responseFromCache, network="From", virtual_network="2", + vnet_type="response"; + + // Special buffer for memory responses. Kind of like the mandatory queue + MessageBuffer *responseFromMemory; + +{ + // For many things in SLICC you can specify a default. However, this + // default must use the C++ name (mangled SLICC name). For the state below + // you have to use the controller name and the name we use for states. + state_declaration(State, desc="Directory states", + default="Directory_State_I") { + // Stable states. + // NOTE: Thise are "cache-centric" states like in Sorin et al. + // However, The access permissions are memory-centric. + I, AccessPermission:Read_Write, desc="Invalid in the caches."; + S, AccessPermission:Read_Only, desc="At least one cache has the blk"; + M, AccessPermission:Invalid, desc="A cache has the block in M"; + + // Transient states + S_D, AccessPermission:Busy, desc="Moving to S, but need data"; + + // Waiting for data from memory + S_m, AccessPermission:Read_Write, desc="In S waiting for mem"; + M_m, AccessPermission:Read_Write, desc="Moving to M waiting for mem"; + + // Waiting for write-ack from memory + MI_m, AccessPermission:Busy, desc="Moving to I waiting for ack"; + SS_m, AccessPermission:Busy, desc="Moving to S waiting for ack"; + } + + enumeration(Event, desc="Directory events") { + // Data requests from the cache + GetS, desc="Request for read-only data from cache"; + GetM, desc="Request for read-write data from cache"; + + // Writeback requests from the cache + PutSNotLast, desc="PutS and the block has other sharers"; + PutSLast, desc="PutS and the block has no other sharers"; + PutMOwner, desc="Dirty data writeback from the owner"; + PutMNonOwner, desc="Dirty data writeback from non-owner"; + + // Cache responses + Data, desc="Response to fwd request with data"; + + // From Memory + MemData, desc="Data from memory"; + MemAck, desc="Ack from memory that write is complete"; + } + + // NOTE: We use a netdest for the sharers and the owner so we can simply + // copy the structure into the message we send as a response. + structure(Entry, desc="...", interface="AbstractEntry") { + State DirState, desc="Directory state"; + NetDest Sharers, desc="Sharers for this block"; + NetDest Owner, desc="Owner of this block"; + } + + Tick clockEdge(); + + // This either returns the valid directory entry, or, if it hasn't been + // allocated yet, this allocates the entry. This may save some host memory + // since this is lazily populated. + Entry getDirectoryEntry(Addr addr), return_by_pointer = "yes" { + Entry dir_entry := static_cast(Entry, "pointer", directory[addr]); + if (is_invalid(dir_entry)) { + // This first time we see this address allocate an entry for it. + dir_entry := static_cast(Entry, "pointer", + directory.allocate(addr, new Entry)); + } + return dir_entry; + } + + /*************************************************************************/ + // Functions that we need to define/override to use our specific structures + // in this implementation. + // NOTE: we don't have TBE in this machine, so we don't need to pass it + // to these overridden functions. + + State getState(Addr addr) { + if (directory.isPresent(addr)) { + return getDirectoryEntry(addr).DirState; + } else { + return State:I; + } + } + + void setState(Addr addr, State state) { + if (directory.isPresent(addr)) { + if (state == State:M) { + DPRINTF(RubySlicc, "Owner %s\n", getDirectoryEntry(addr).Owner); + assert(getDirectoryEntry(addr).Owner.count() == 1); + assert(getDirectoryEntry(addr).Sharers.count() == 0); + } + getDirectoryEntry(addr).DirState := state; + if (state == State:I) { + assert(getDirectoryEntry(addr).Owner.count() == 0); + assert(getDirectoryEntry(addr).Sharers.count() == 0); + } + } + } + + // This is really the access permissions of memory. + // TODO: I don't understand this at the directory. + AccessPermission getAccessPermission(Addr addr) { + if (directory.isPresent(addr)) { + Entry e := getDirectoryEntry(addr); + return Directory_State_to_permission(e.DirState); + } else { + return AccessPermission:NotPresent; + } + } + void setAccessPermission(Addr addr, State state) { + if (directory.isPresent(addr)) { + Entry e := getDirectoryEntry(addr); + e.changePermission(Directory_State_to_permission(state)); + } + } + + void functionalRead(Addr addr, Packet *pkt) { + functionalMemoryRead(pkt); + } + + // This returns the number of writes. So, if we write then return 1 + int functionalWrite(Addr addr, Packet *pkt) { + if (functionalMemoryWrite(pkt)) { + return 1; + } else { + return 0; + } + } + + + /*************************************************************************/ + // Network ports + + out_port(forward_out, RequestMsg, forwardToCache); + out_port(response_out, ResponseMsg, responseToCache); + + in_port(memQueue_in, MemoryMsg, responseFromMemory) { + if (memQueue_in.isReady(clockEdge())) { + peek(memQueue_in, MemoryMsg) { + if (in_msg.Type == MemoryRequestType:MEMORY_READ) { + trigger(Event:MemData, in_msg.addr); + } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) { + trigger(Event:MemAck, in_msg.addr); + } else { + error("Invalid message"); + } + } + } + } + + in_port(response_in, ResponseMsg, responseFromCache) { + if (response_in.isReady(clockEdge())) { + peek(response_in, ResponseMsg) { + if (in_msg.Type == CoherenceResponseType:Data) { + trigger(Event:Data, in_msg.addr); + } else { + error("Unexpected message type."); + } + } + } + } + + in_port(request_in, RequestMsg, requestFromCache) { + if (request_in.isReady(clockEdge())) { + peek(request_in, RequestMsg) { + Entry entry := getDirectoryEntry(in_msg.addr); + if (in_msg.Type == CoherenceRequestType:GetS) { + // NOTE: Since we don't have a TBE in this machine, there + // is no need to pass a TBE into trigger. Also, for the + // directory there is no cache entry. + trigger(Event:GetS, in_msg.addr); + } else if (in_msg.Type == CoherenceRequestType:GetM) { + trigger(Event:GetM, in_msg.addr); + } else if (in_msg.Type == CoherenceRequestType:PutS) { + assert(is_valid(entry)); + // If there is only a single sharer (i.e., the requestor) + if (entry.Sharers.count() == 1) { + assert(entry.Sharers.isElement(in_msg.Requestor)); + trigger(Event:PutSLast, in_msg.addr); + } else { + trigger(Event:PutSNotLast, in_msg.addr); + } + } else if (in_msg.Type == CoherenceRequestType:PutM) { + assert(is_valid(entry)); + if (entry.Owner.isElement(in_msg.Requestor)) { + trigger(Event:PutMOwner, in_msg.addr); + } else { + trigger(Event:PutMNonOwner, in_msg.addr); + } + } else { + error("Unexpected message type."); + } + } + } + } + + + + /*************************************************************************/ + // Actions + + // Memory actions. + + action(sendMemRead, "r", desc="Send a memory read request") { + peek(request_in, RequestMsg) { + // Special function from AbstractController that will send a new + // packet out of the "Ruby" black box to the memory side. At some + // point the response will be on the memory queue. + // Like enqeue, this takes a latency for the request. + queueMemoryRead(in_msg.Requestor, address, toMemLatency); + } + } + + action(sendDataToMem, "w", desc="Write data to memory") { + peek(request_in, RequestMsg) { + DPRINTF(RubySlicc, "Writing memory for %#x\n", address); + DPRINTF(RubySlicc, "Writing %s\n", in_msg.DataBlk); + queueMemoryWrite(in_msg.Requestor, address, toMemLatency, + in_msg.DataBlk); + } + } + + action(sendRespDataToMem, "rw", desc="Write data to memory from resp") { + peek(response_in, ResponseMsg) { + DPRINTF(RubySlicc, "Writing memory for %#x\n", address); + DPRINTF(RubySlicc, "Writing %s\n", in_msg.DataBlk); + queueMemoryWrite(in_msg.Sender, address, toMemLatency, + in_msg.DataBlk); + } + } + + // Sharer/owner actions + + action(addReqToSharers, "aS", desc="Add requestor to sharer list") { + peek(request_in, RequestMsg) { + getDirectoryEntry(address).Sharers.add(in_msg.Requestor); + } + } + + action(setOwner, "sO", desc="Set the owner") { + peek(request_in, RequestMsg) { + getDirectoryEntry(address).Owner.add(in_msg.Requestor); + } + } + + action(addOwnerToSharers, "oS", desc="Add the owner to sharers") { + Entry e := getDirectoryEntry(address); + assert(e.Owner.count() == 1); + e.Sharers.addNetDest(e.Owner); + } + + action(removeReqFromSharers, "rS", desc="Remove requestor from sharers") { + peek(request_in, RequestMsg) { + getDirectoryEntry(address).Sharers.remove(in_msg.Requestor); + } + } + + action(clearSharers, "cS", desc="Clear the sharer list") { + getDirectoryEntry(address).Sharers.clear(); + } + + action(clearOwner, "cO", desc="Clear the owner") { + getDirectoryEntry(address).Owner.clear(); + } + + // Invalidates and forwards + + action(sendInvToSharers, "i", desc="Send invalidate to all sharers") { + peek(request_in, RequestMsg) { + enqueue(forward_out, RequestMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceRequestType:Inv; + out_msg.Requestor := in_msg.Requestor; + out_msg.Destination := getDirectoryEntry(address).Sharers; + out_msg.MessageSize := MessageSizeType:Control; + } + } + } + + action(sendFwdGetS, "fS", desc="Send forward getS to owner") { + assert(getDirectoryEntry(address).Owner.count() == 1); + peek(request_in, RequestMsg) { + enqueue(forward_out, RequestMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceRequestType:GetS; + out_msg.Requestor := in_msg.Requestor; + out_msg.Destination := getDirectoryEntry(address).Owner; + out_msg.MessageSize := MessageSizeType:Control; + } + } + } + + action(sendFwdGetM, "fM", desc="Send forward getM to owner") { + assert(getDirectoryEntry(address).Owner.count() == 1); + peek(request_in, RequestMsg) { + enqueue(forward_out, RequestMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceRequestType:GetM; + out_msg.Requestor := in_msg.Requestor; + out_msg.Destination := getDirectoryEntry(address).Owner; + out_msg.MessageSize := MessageSizeType:Control; + } + } + } + + // Responses to requests + + // This also needs to send along the number of sharers!!!! + action(sendDataToReq, "d", desc="Send data from memory to requestor. ") { + //"May need to send sharer number, too") { + peek(memQueue_in, MemoryMsg) { + enqueue(response_out, ResponseMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceResponseType:Data; + out_msg.Sender := machineID; + out_msg.Destination.add(in_msg.OriginalRequestorMachId); + out_msg.DataBlk := in_msg.DataBlk; + out_msg.MessageSize := MessageSizeType:Data; + Entry e := getDirectoryEntry(address); + // Only need to include acks if we are the owner. + if (e.Owner.isElement(in_msg.OriginalRequestorMachId)) { + out_msg.Acks := e.Sharers.count(); + } else { + out_msg.Acks := 0; + } + assert(out_msg.Acks >= 0); + } + } + } + + action(sendPutAck, "a", desc="Send the put ack") { + peek(request_in, RequestMsg) { + enqueue(forward_out, RequestMsg, 1) { + out_msg.addr := address; + out_msg.Type := CoherenceRequestType:PutAck; + out_msg.Requestor := machineID; + out_msg.Destination.add(in_msg.Requestor); + out_msg.MessageSize := MessageSizeType:Control; + } + } + } + + // Queue management + + action(popResponseQueue, "pR", desc="Pop the response queue") { + response_in.dequeue(clockEdge()); + } + + action(popRequestQueue, "pQ", desc="Pop the request queue") { + request_in.dequeue(clockEdge()); + } + + action(popMemQueue, "pM", desc="Pop the memory queue") { + memQueue_in.dequeue(clockEdge()); + } + + // Stalling actions + action(stall, "z", desc="Stall the incoming request") { + // Do nothing. + } + + + /*************************************************************************/ + // transitions + + transition({I, S}, GetS, S_m) { + sendMemRead; + addReqToSharers; + popRequestQueue; + } + + transition(I, {PutSNotLast, PutSLast, PutMNonOwner}) { + sendPutAck; + popRequestQueue; + } + + transition(S_m, MemData, S) { + sendDataToReq; + popMemQueue; + } + + transition(I, GetM, M_m) { + sendMemRead; + setOwner; + popRequestQueue; + } + + transition(M_m, MemData, M) { + sendDataToReq; + clearSharers; // NOTE: This isn't *required* in some cases. + popMemQueue; + } + + transition(S, GetM, M_m) { + sendMemRead; + removeReqFromSharers; + sendInvToSharers; + setOwner; + popRequestQueue; + } + + transition({S, S_D, SS_m, S_m}, {PutSNotLast, PutMNonOwner}) { + removeReqFromSharers; + sendPutAck; + popRequestQueue; + } + + transition(S, PutSLast, I) { + removeReqFromSharers; + sendPutAck; + popRequestQueue; + } + + transition(M, GetS, S_D) { + sendFwdGetS; + addReqToSharers; + addOwnerToSharers; + clearOwner; + popRequestQueue; + } + + transition(M, GetM) { + sendFwdGetM; + clearOwner; + setOwner; + popRequestQueue; + } + + transition({M, M_m, MI_m}, {PutSNotLast, PutSLast, PutMNonOwner}) { + sendPutAck; + popRequestQueue; + } + + transition(M, PutMOwner, MI_m) { + sendDataToMem; + clearOwner; + sendPutAck; + popRequestQueue; + } + + transition(MI_m, MemAck, I) { + popMemQueue; + } + + transition(S_D, {GetS, GetM}) { + stall; + } + + transition(S_D, PutSLast) { + removeReqFromSharers; + sendPutAck; + popRequestQueue; + } + + transition(S_D, Data, SS_m) { + sendRespDataToMem; + popResponseQueue; + } + + transition(SS_m, MemAck, S) { + popMemQueue; + } + + // If we get another request for a block that's waiting on memory, + // stall that request. + transition({MI_m, SS_m, S_m, M_m}, {GetS, GetM}) { + stall; + } + +} diff --git a/src/learning_gem5/part3/MSI-msg.sm b/src/learning_gem5/part3/MSI-msg.sm new file mode 100644 index 000000000..6ef409e87 --- /dev/null +++ b/src/learning_gem5/part3/MSI-msg.sm @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2017 Jason Lowe-Power + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * This file contains the messages and other types for a simple MSI protocol. + * + * The protocol in this file is based off of the MSI protocol found in + * A Primer on Memory Consistency and Cache Coherence + * Daniel J. Sorin, Mark D. Hill, and David A. Wood + * Synthesis Lectures on Computer Architecture 2011 6:3, 141-149 + * + * See Learning gem5 Part 3: Ruby for more details. + * + * Authors: Jason Lowe-Power + */ + +enumeration(CoherenceRequestType, desc="Types of request messages") { + GetS, desc="Request from cache for a block with read permission"; + GetM, desc="Request from cache for a block with write permission"; + PutS, desc="Sent to directory when evicting a block in S (clean WB)"; + PutM, desc="Sent to directory when evicting a block in M"; + + // "Requests" from the directory to the caches on the fwd network + Inv, desc="Probe the cache and invalidate any matching blocks"; + PutAck, desc="The put request has been processed."; +} + +enumeration(CoherenceResponseType, desc="Types of response messages") { + Data, desc="Contains the most up-to-date data"; + InvAck, desc="Message from another cache that they have inv. the blk"; +} + +structure(RequestMsg, desc="Used for Cache->Dir and Fwd messages", + interface="Message") { + // NOTE: You can't name addr "Addr" because it would conflict with the + // Addr *type*. + Addr addr, desc="Physical address for this request"; + CoherenceRequestType Type, desc="Type of request"; + MachineID Requestor, desc="Node who initiated the request"; + NetDest Destination, desc="Multicast destination mask"; + DataBlock DataBlk, desc="data for the cache line"; + // NOTE: You *must* use MessageSize as the name of this variable, and it's + // required that you have a MessageSize for each type of message. You will + // the the error "panic: MessageSizeType() called on wrong message!" + MessageSizeType MessageSize, desc="size category of the message"; + + // This must be overridden here to support functional accesses + bool functionalRead(Packet *pkt) { + // Requests should never have the only copy of the most up-to-date data + return false; + } + + bool functionalWrite(Packet *pkt) { + // No check on message type required since the protocol should read + // data block from only those messages that contain valid data + return testAndWrite(addr, DataBlk, pkt); + } +} + +structure(ResponseMsg, desc="Used for Cache->Dir and Fwd messages", + interface="Message") { + Addr addr, desc="Physical address for this response"; + CoherenceResponseType Type, desc="Type of response"; + MachineID Sender, desc="Node who is responding to the request"; + NetDest Destination, desc="Multicast destination mask"; + DataBlock DataBlk, desc="data for the cache line"; + MessageSizeType MessageSize, desc="size category of the message"; + int Acks, desc="Number of acks required from others"; + + // This must be overridden here to support functional accesses + bool functionalRead(Packet *pkt) { + if (Type == CoherenceResponseType:Data) { + return testAndRead(addr, DataBlk, pkt); + } + return false; + } + + bool functionalWrite(Packet *pkt) { + // No check on message type required since the protocol should read + // data block from only those messages that contain valid data + return testAndWrite(addr, DataBlk, pkt); + } +} diff --git a/src/learning_gem5/part3/MSI.slicc b/src/learning_gem5/part3/MSI.slicc new file mode 100644 index 000000000..c6fdc5075 --- /dev/null +++ b/src/learning_gem5/part3/MSI.slicc @@ -0,0 +1,5 @@ +protocol "MSI"; +include "RubySlicc_interfaces.slicc"; +include "MSI-msg.sm"; +include "MSI-cache.sm"; +include "MSI-dir.sm"; diff --git a/src/learning_gem5/part3/SConsopts b/src/learning_gem5/part3/SConsopts new file mode 100644 index 000000000..c8573d3ac --- /dev/null +++ b/src/learning_gem5/part3/SConsopts @@ -0,0 +1,11 @@ +Import('*') + +# NOTE: All SLICC setup code found in src/mem/protocol/SConscript + +# Register this protocol with gem5/SCons +all_protocols.extend([ + 'MSI', +]) + +# Add this directory to the search path for SLICC +protocol_dirs.append(str(Dir('.').abspath)) |