summaryrefslogtreecommitdiff
path: root/src/mem/protocol
diff options
context:
space:
mode:
authorDan Gibson <gibson@cs.wisc.edu>2009-05-11 10:38:45 -0700
committerDan Gibson <gibson@cs.wisc.edu>2009-05-11 10:38:45 -0700
commitd8c592a05d884560b3cbbe04d9e1ed9cf6575eaa (patch)
tree6902f66ea067a5f2a63a6f149c6be0ddc6777337 /src/mem/protocol
parent6ceaffd7240993761785c0d2f5e4f92bd94fbf32 (diff)
downloadgem5-d8c592a05d884560b3cbbe04d9e1ed9cf6575eaa.tar.xz
ruby: remove unnecessary code.
1) Removing files from the ruby build left some unresovled symbols. Those have been fixed. 2) Most of the dependencies on Simics data types and the simics interface files have been removed. 3) Almost all mention of opal is gone. 4) Huge chunks of LogTM are now gone. 5) Handling 1-4 left ~hundreds of unresolved references, which were fixed, yielding a snowball effect (and the massive size of this delta).
Diffstat (limited to 'src/mem/protocol')
-rw-r--r--src/mem/protocol/LogTM.sm83
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm1800
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm2123
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-mem.sm166
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-msg.sm153
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory.slicc7
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm250
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory_m.slicc7
8 files changed, 0 insertions, 4589 deletions
diff --git a/src/mem/protocol/LogTM.sm b/src/mem/protocol/LogTM.sm
deleted file mode 100644
index 02c6656ac..000000000
--- a/src/mem/protocol/LogTM.sm
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- This file has been modified by Kevin Moore and Dan Nussbaum of the
- Scalable Systems Research Group at Sun Microsystems Laboratories
- (http://research.sun.com/scalable/) to support the Adaptive
- Transactional Memory Test Platform (ATMTP).
-
- Please send email to atmtp-interest@sun.com with feedback, questions, or
- to request future announcements about ATMTP.
-
- ----------------------------------------------------------------------
-
- File modification date: 2008-02-23
-
- ----------------------------------------------------------------------
-*/
-
-external_type(PartialAddressFilter, desc="Bloom filter for tracking transaction locks."){
- bool isRead(Address);
- bool isWrite(Address);
-
- void addEntry(Address, bool);
- void clear();
-}
-
-external_type(TransactionInterfaceManager) {
- bool shouldNackLoad(Address, uint64, MachineID);
- bool shouldNackStore(Address, uint64, MachineID);
- bool checkReadWriteSignatures(Address);
- bool checkWriteSignatures(Address);
-
- void notifySendNack(Address, uint64, MachineID);
- void notifyReceiveNack(int, Address, uint64, uint64, MachineID);
- void notifyReceiveNackFinal(int, Address);
-
- uint64 getTimestamp(int);
- uint64 getOldestTimestamp();
-
- bool existGlobalLoadConflict(int, Address);
- bool existGlobalStoreConflict(int, Address);
-
- void profileTransactionMiss(int, bool);
-
- void xactReplacement(Address);
-
- /* DEPRECATED */
- bool existLoadConflict(Address);
- bool existStoreConflict(Address);
- bool isInReadFilterSummary(Address);
- bool isInWriteFilterSummary(Address);
- bool isTokenOwner(int);
- void setAbortFlag(int, Address);
- void setEnemyProcessor(int, MachineID);
- bool isRemoteOlder(uint64);
-
-}
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm b/src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm
deleted file mode 100644
index 468cf3c1c..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm
+++ /dev/null
@@ -1,1800 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- This file has been modified by Kevin Moore and Dan Nussbaum of the
- Scalable Systems Research Group at Sun Microsystems Laboratories
- (http://research.sun.com/scalable/) to support the Adaptive
- Transactional Memory Test Platform (ATMTP).
-
- Please send email to atmtp-interest@sun.com with feedback, questions, or
- to request future announcements about ATMTP.
-
- ----------------------------------------------------------------------
-
- File modification date: 2008-02-23
-
- ----------------------------------------------------------------------
-*/
-
-/*
- * $Id$
- *
- */
-
-
-machine(L1Cache, "MESI Directory L1 Cache CMP") {
-
- // NODE L1 CACHE
- // From this node's L1 cache TO the network
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
- // a local L1 -> this L2 bank
- MessageBuffer responseFromL1Cache, network="To", virtual_network="3", ordered="false";
- MessageBuffer unblockFromL1Cache, network="To", virtual_network="4", ordered="false";
-
-
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false";
- // a L2 bank -> this L1
- MessageBuffer responseToL1Cache, network="From", virtual_network="3", ordered="false";
-
- // STATES
- enumeration(State, desc="Cache states", default="L1Cache_State_I") {
- // Base states
- NP, desc="Not present in either cache";
- I, desc="a L1 cache entry Idle";
- S, desc="a L1 cache entry Shared";
- E, desc="a L1 cache entry Exclusive";
- M, desc="a L1 cache entry Modified", format="!b";
-
- // Transient States
- IS, desc="L1 idle, issued GETS, have not seen response yet";
- IM, desc="L1 idle, issued GETX, have not seen response yet";
- SM, desc="L1 idle, issued GETX, have not seen response yet";
- IS_I, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
- IS_S, desc="L1 idle, issued GETS, L2 sent us data but responses from filters have not arrived";
- IS_E, desc="L1 idle, issued GETS, L2 sent us exclusive data, but responses from filters have not arrived";
- IM_M, desc="L1 idle, issued GETX, L2 sent us data, but responses from filters have not arrived";
-
- M_I, desc="L1 replacing, waiting for ACK";
- E_I, desc="L1 replacing, waiting for ACK";
-
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- // L1 events
- Load, desc="Load request from the home processor";
- Ifetch, desc="I-fetch request from the home processor";
- Store, desc="Store request from the home processor";
-
- Replace, desc="lower level cache replaced this line, also need to invalidate to maintain inclusion";
- Inv, desc="Invalidate request from L2 bank";
- Inv_X, desc="Invalidate request from L2 bank, trans CONFLICT";
-
- // internal generated request
- L1_Replacement, desc="L1 Replacement", format="!r";
- L1_Replacement_XACT, desc="L1 Replacement of trans. data", format="!r";
-
- // other requests
- Fwd_GETX, desc="GETX from other processor";
- Fwd_GETS, desc="GETS from other processor";
- Fwd_GET_INSTR, desc="GET_INSTR from other processor";
-
- //Data, desc="Data for processor";
- L2_Data, desc="Data for processor, from L2";
- L2_Data_all_Acks, desc="Data for processor, from L2, all acks";
- L2_Exclusive_Data, desc="Exlusive Data for processor, from L2";
- L2_Exclusive_Data_all_Acks, desc="Exlusive Data for processor, from L2, all acks";
- DataS_fromL1, desc="data for GETS request, need to unblock directory";
- Data_all_Acks, desc="Data for processor, all acks";
-
- Ack, desc="Ack for processor";
- Ack_all, desc="Last ack for processor";
-
- WB_Ack, desc="Ack for replacement";
-
- // Transactional responses/requests
- Nack, desc="Nack for processor";
- Nack_all, desc="Last Nack for processor";
- Check_Write_Filter, desc="Check the write filter";
- Check_Read_Write_Filter, desc="Check the read and write filters";
-
- //Fwd_GETS_T, desc="A GetS from another processor, part of a trans, but not a conflict";
- Fwd_GETS_X, desc="A GetS from another processor, trans CONFLICT";
- Fwd_GETX_X, desc="A GetS from another processor, trans CONFLICT";
- Fwd_GET_INSTR_X, desc="A GetInstr from another processor, trans CONFLICT";
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
- State CacheState, desc="cache state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, default="false", desc="data is dirty";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- Address Address, desc="Line address for this TBE";
- Address PhysicalAddress, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Buffer for the data block";
- bool Dirty, default="false", desc="data is dirty";
- bool isPrefetch, desc="Set if this was caused by a prefetch";
- int pendingAcks, default="0", desc="number of pending acks";
- int ThreadID, default="0", desc="SMT thread issuing the request";
-
- bool RemoveLastOwnerFromDir, default="false", desc="The forwarded data was being replaced";
- MachineID LastOwnerID, desc="What component forwarded (last owned) the data"; // For debugging
-
- // for Transactional Memory
- uint64 Timestamp, default="0", desc="Timestamp of request";
- bool nack, default="false", desc="has this request been nacked?";
- NetDest Nackers, desc="The nodes which sent a NACK to us";
- }
-
- external_type(CacheMemory) {
- bool cacheAvail(Address);
- Address cacheProbe(Address);
- void allocate(Address);
- void deallocate(Address);
- Entry lookup(Address);
- void changePermission(Address, AccessPermission);
- bool isTagPresent(Address);
- }
-
- external_type(TBETable) {
- TBE lookup(Address);
- void allocate(Address);
- void deallocate(Address);
- bool isPresent(Address);
- }
-
- TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
-
- CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
- CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
-
-
- MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
-
- Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
- TransactionInterfaceManager xact_mgr, abstract_chip_ptr="true", constructor_hack="i";
-
- // triggerQueue used to indicate when all acks/nacks have been received
- MessageBuffer triggerQueue, ordered="false";
-
- int cache_state_to_int(State state);
-
- // inclusive cache returns L1 entries only
- Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
- if (L1DcacheMemory.isTagPresent(addr)) {
- return L1DcacheMemory[addr];
- } else {
- return L1IcacheMemory[addr];
- }
- }
-
- void changeL1Permission(Address addr, AccessPermission permission) {
- if (L1DcacheMemory.isTagPresent(addr)) {
- return L1DcacheMemory.changePermission(addr, permission);
- } else if(L1IcacheMemory.isTagPresent(addr)) {
- return L1IcacheMemory.changePermission(addr, permission);
- } else {
- error("cannot change permission, L1 block not present");
- }
- }
-
- bool isL1CacheTagPresent(Address addr) {
- return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
- }
-
- State getState(Address addr) {
- if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
- DEBUG_EXPR(id);
- DEBUG_EXPR(addr);
- }
- assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
-
- if(L1_TBEs.isPresent(addr)) {
- return L1_TBEs[addr].TBEState;
- } else if (isL1CacheTagPresent(addr)) {
- return getL1CacheEntry(addr).CacheState;
- }
- return State:NP;
- }
-
-
- // For detecting read/write conflicts on requests from remote processors
- bool shouldNackLoad(Address addr, uint64 remote_timestamp, MachineID remote_id){
- return xact_mgr.shouldNackLoad(addr, remote_timestamp, remote_id);
- }
-
- bool shouldNackStore(Address addr, uint64 remote_timestamp, MachineID remote_id){
- return xact_mgr.shouldNackStore(addr, remote_timestamp, remote_id);
- }
-
- // For querying read/write signatures on current processor
- bool checkReadWriteSignatures(Address addr){
- return xact_mgr.checkReadWriteSignatures(addr);
- }
-
- bool checkWriteSignatures(Address addr){
- return xact_mgr.checkWriteSignatures(addr);
- }
-
- void setState(Address addr, State state) {
- assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
-
- // MUST CHANGE
- if(L1_TBEs.isPresent(addr)) {
- L1_TBEs[addr].TBEState := state;
- }
-
- if (isL1CacheTagPresent(addr)) {
- getL1CacheEntry(addr).CacheState := state;
-
- // Set permission
- if (state == State:I) {
- changeL1Permission(addr, AccessPermission:Invalid);
- } else if (state == State:S || state == State:E) {
- changeL1Permission(addr, AccessPermission:Read_Only);
- } else if (state == State:M) {
- changeL1Permission(addr, AccessPermission:Read_Write);
- } else {
- changeL1Permission(addr, AccessPermission:Busy);
- }
- }
- }
-
- Event mandatory_request_type_to_event(CacheRequestType type) {
- if (type == CacheRequestType:LD) {
- return Event:Load;
- } else if (type == CacheRequestType:LD_XACT) {
- return Event:Load;
- } else if (type == CacheRequestType:IFETCH) {
- return Event:Ifetch;
- } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
- return Event:Store;
- } else if((type == CacheRequestType:ST_XACT) || (type == CacheRequestType:LDX_XACT) ) {
- return Event:Store;
- } else {
- error("Invalid CacheRequestType");
- }
- }
-
-
- void printRequest(CacheMsg in_msg){
- DEBUG_EXPR("Regquest msg: ");
- DEBUG_EXPR(machineID);
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.PhysicalAddress);
- DEBUG_EXPR(in_msg.Type);
- DEBUG_EXPR(in_msg.ProgramCounter);
- DEBUG_EXPR(in_msg.AccessMode);
- DEBUG_EXPR(in_msg.Size);
- DEBUG_EXPR(in_msg.Prefetch);
- DEBUG_EXPR(in_msg.Version);
- DEBUG_EXPR(in_msg.LogicalAddress);
- DEBUG_EXPR(in_msg.ThreadID);
- DEBUG_EXPR(in_msg.Timestamp);
- DEBUG_EXPR(in_msg.ExposedAction);
- }
-
- out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
- out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
- out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
- peek(triggerQueue_in, TriggerMsg) {
- if (in_msg.Type == TriggerType:ALL_ACKS) {
- if (L1_TBEs[in_msg.Address].nack == true){
- trigger(Event:Nack_all, in_msg.Address);
- } else {
- trigger(Event:Ack_all, in_msg.Address);
- }
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // Response IntraChip L1 Network - response msg to this L1 cache
- in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
- if (responseIntraChipL1Network_in.isReady()) {
- peek(responseIntraChipL1Network_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if(in_msg.Type == CoherenceResponseType:L2_DATA_EXCLUSIVE) {
- if( in_msg.AckCount == 0 ){
- trigger(Event:L2_Exclusive_Data_all_Acks, in_msg.Address);
- }
- else{
- trigger(Event:L2_Exclusive_Data, in_msg.Address);
- }
- } else if(in_msg.Type == CoherenceResponseType:L2_DATA) {
- if( in_msg.AckCount == 0 ){
- trigger(Event:L2_Data_all_Acks, in_msg.Address);
- }
- else{
- trigger(Event:L2_Data, in_msg.Address);
- }
- } else if(in_msg.Type == CoherenceResponseType:DATA) {
- if ( (getState(in_msg.Address) == State:IS || getState(in_msg.Address) == State:IS_I) &&
- machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache ) {
-
- trigger(Event:DataS_fromL1, in_msg.Address);
- } else if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
- trigger(Event:Data_all_Acks, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:NACK) {
- trigger(Event:Nack, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
- trigger(Event:WB_Ack, in_msg.Address);
- } else {
- error("Invalid L1 response type");
- }
- }
- }
- }
-
- // Request InterChip network - request from this L1 cache to the shared L2
- in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
- if(requestIntraChipL1Network_in.isReady()) {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceRequestType:INV) {
- // check whether we have a inter-proc conflict
- if(shouldNackStore(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == false){
- trigger(Event:Inv, in_msg.Address);
- }
- else{
- // there's a conflict
- trigger(Event:Inv_X, in_msg.Address);
- }
- } else if(in_msg.Type == CoherenceRequestType:INV_ESCAPE) {
- // we cannot NACK this
- trigger(Event:Inv, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
- // check whether we have a conflict
- if(shouldNackStore(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
- trigger(Event:Fwd_GETX_X, in_msg.Address);
- }
- else{
- // else no conflict
- // upgrade transforms to GETX due to race
- trigger(Event:Fwd_GETX, in_msg.Address);
- }
- } else if(in_msg.Type == CoherenceRequestType:GETX_ESCAPE) {
- // no need for filter checks
- trigger(Event:Fwd_GETX, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- // check whether we have a conflict
- if(shouldNackLoad(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
- trigger(Event:Fwd_GETS_X, in_msg.Address);
- }
- else{
- // else no conflict
- trigger(Event:Fwd_GETS, in_msg.Address);
- }
- } else if(in_msg.Type == CoherenceRequestType:GETS_ESCAPE) {
- // no need for filter checks
- trigger(Event:Fwd_GETS, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
- if(shouldNackLoad(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
- trigger(Event:Fwd_GET_INSTR_X, in_msg.Address);
- }
- else{
- // else no conflict
- trigger(Event:Fwd_GET_INSTR, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceRequestType:GET_INSTR_ESCAPE) {
- // no need for filter checks
- trigger(Event:Fwd_GET_INSTR, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:REPLACE) {
- trigger(Event:Replace, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:CHECK_WRITE_FILTER) {
- trigger(Event:Check_Write_Filter, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:CHECK_READ_WRITE_FILTER) {
- trigger(Event:Check_Read_Write_Filter, in_msg.Address);
- } else {
- error("Invalid forwarded request type");
- }
- }
- }
- }
-
- // Mandatory Queue betweens Node's CPU and it's L1 caches
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg) {
-
- // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
-
- if (in_msg.Type == CacheRequestType:IFETCH) {
- // ** INSTRUCTION ACCESS ***
-
- // Check to see if it is in the OTHER L1
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
- // check whether block is transactional
- if (checkReadWriteSignatures(in_msg.Address) == true){
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement_XACT, in_msg.Address);
- }
- else{
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.Address);
- }
- }
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
- // The tag matches for the L1, so the L1 asks the L2 for it.
- printRequest(in_msg);
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
- } else {
- if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
- // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
- printRequest(in_msg);
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
- } else {
- // check whether block is transactional
- if(checkReadWriteSignatures( L1IcacheMemory.cacheProbe(in_msg.Address) ) == true){
- // No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement_XACT, L1IcacheMemory.cacheProbe(in_msg.Address));
- }
- else{
- // No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
- }
- }
- }
- } else {
- // *** DATA ACCESS ***
-
- // Check to see if it is in the OTHER L1
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
- // check whether block is transactional
- if(checkReadWriteSignatures(in_msg.Address) == true){
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement_XACT, in_msg.Address);
- }
- else{
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.Address);
- }
- }
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
- // The tag matches for the L1, so the L1 ask the L2 for it
- printRequest(in_msg);
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
- } else {
- if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
- // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
- printRequest(in_msg);
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
- } else {
- // check whether block is transactional
- if(checkReadWriteSignatures( L1DcacheMemory.cacheProbe(in_msg.Address) ) == true){
- // No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement_XACT, L1DcacheMemory.cacheProbe(in_msg.Address));
- }
- else{
- // No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
- }
- }
- }
- }
- }
- }
- }
-
- // ACTIONS
- action(a_issueGETS, "a", desc="Issue GETS") {
- peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- if(in_msg.ExposedAction){
- out_msg.Type := CoherenceRequestType:GETS_ESCAPE;
- }
- else{
- out_msg.Type := CoherenceRequestType:GETS;
- }
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- DEBUG_EXPR(address);
- DEBUG_EXPR(out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- // either return transactional timestamp or current time
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- }
- }
- }
-
- action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
- peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- if(in_msg.ExposedAction){
- out_msg.Type := CoherenceRequestType:GET_INSTR_ESCAPE;
- }
- else{
- out_msg.Type := CoherenceRequestType:GET_INSTR;
- }
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- DEBUG_EXPR(address);
- DEBUG_EXPR(out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- // either return transactional timestamp or current time
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- }
- }
- }
-
-
- action(b_issueGETX, "b", desc="Issue GETX") {
- peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- if(in_msg.ExposedAction){
- out_msg.Type := CoherenceRequestType:GETX_ESCAPE;
- }
- else{
- out_msg.Type := CoherenceRequestType:GETX;
- }
- out_msg.Requestor := machineID;
- DEBUG_EXPR(machineID);
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- DEBUG_EXPR(address);
- DEBUG_EXPR(out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- // either return transactional timestamp or current time
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- }
- }
- }
-
- action(c_issueUPGRADE, "c", desc="Issue GETX") {
- peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:UPGRADE;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- DEBUG_EXPR(address);
- DEBUG_EXPR(out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- // either return transactional timestamp or current time
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- }
- }
- }
-
- /****************************BEGIN Transactional Actions*************************/
- // send a NACK to requestor - the equivalent of a NACKed data response
- // Note we don't have to track the ackCount here because we only send data NACKs when
- // we are exclusive with the data. Otherwise the L2 will source the data (and set the ackCount
- // appropriately)
- action(e_sendNackToRequestor, "en", desc="send nack to requestor (could be L2 or L1)") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:NACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // send oldest timestamp (or current time if no thread in transaction)
- out_msg.Timestamp := xact_mgr.getOldestTimestamp();
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- // ackCount is by default 0
- }
- // also inform driver about sending NACK
- xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
- }
- }
-
- // send a NACK when L2 wants us to invalidate ourselves
- action(fi_sendInvNack, "fin", desc="send data to the L2 cache") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:NACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // send oldest timestamp (or current time if no thread in transaction)
- out_msg.Timestamp := xact_mgr.getOldestTimestamp();
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- // also inform driver about sending NACK
- xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
- }
- }
-
- // for when we want to check our Write filters
- action(a_checkWriteFilter, "awf", desc="Check our write filter for conflicts") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- // For correct conflict detection, should call shouldNackLoad() NOT
- // checkWriteSignatures()
- if(shouldNackLoad(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
- // conflict - send a NACK
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:NACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // send oldest timestamp (or current time if no thread in transaction)
- out_msg.Timestamp := xact_mgr.getOldestTimestamp();
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- // also inform driver about sending NACK
- xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
- }
- else{
- // no conflict - send ACK
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
- }
-
- // for when we want to check our Read + Write filters
- action(a_checkReadWriteFilter, "arwf", desc="Check our write filter for conflicts") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- // For correct conflict detection, we should call shouldNackStore() NOT
- // checkReadWriteSignatures()
- if(shouldNackStore(in_msg.PhysicalAddress, in_msg.Timestamp,in_msg.Requestor ) == true){
- // conflict - send a NACK
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:NACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // send oldest timestamp (or current time if no thread in transaction)
- out_msg.Timestamp := xact_mgr.getOldestTimestamp();
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- // also inform driver about sending NACK
- xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
- }
- else{
- // no conflict - send ACK
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
- }
-
- action(r_notifyReceiveNack, "nrn", desc="Notify the driver when a nack is received"){
- peek(responseIntraChipL1Network_in, ResponseMsg) {
- xact_mgr.notifyReceiveNack(L1_TBEs[address].ThreadID, in_msg.PhysicalAddress, L1_TBEs[address].Timestamp, in_msg.Timestamp, in_msg.Sender);
- }
- }
-
- // Used to driver to take abort or retry action
- action(r_notifyReceiveNackFinal, "nrnf", desc="Notify the driver when the final nack is received"){
- xact_mgr.notifyReceiveNackFinal(L1_TBEs[address].ThreadID, L1_TBEs[address].PhysicalAddress);
- }
-
- // this version uses physical address stored in TBE
-
- action(x_tbeSetPrefetch, "xp", desc="Set the prefetch bit in the TBE."){
- peek(mandatoryQueue_in, CacheMsg) {
- if(in_msg.Prefetch == PrefetchBit:No){
- L1_TBEs[address].isPrefetch := false;
- }
- else{
- assert(in_msg.Prefetch == PrefetchBit:Yes);
- L1_TBEs[address].isPrefetch := true;
- }
- }
- }
-
- action(x_tbeSetPhysicalAddress, "ia", desc="Sets the physical address field of the TBE"){
- peek(mandatoryQueue_in, CacheMsg) {
- L1_TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
- L1_TBEs[address].ThreadID := in_msg.ThreadID;
- L1_TBEs[address].Timestamp := in_msg.Timestamp;
- }
- }
-
- // Send unblock cancel to L2 (for nacked requests that blocked directory)
- action(jj_sendUnblockCancel, "\jc", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK_CANCEL;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along list of NACKers
- out_msg.Nackers := L1_TBEs[address].Nackers;
- }
- }
-
- //same as ACK case, but sets the NACK flag for TBE entry
- action(q_updateNackCount, "qn", desc="Update ack count") {
- peek(responseIntraChipL1Network_in, ResponseMsg) {
- // mark this request as having been NACKed
- L1_TBEs[address].nack := true;
- APPEND_TRANSITION_COMMENT(" before pendingAcks: ");
- APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
- L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
- L1_TBEs[address].Nackers.add(in_msg.Sender);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
-
- APPEND_TRANSITION_COMMENT(" after pendingAcks: ");
- APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
- APPEND_TRANSITION_COMMENT(" sender: ");
- APPEND_TRANSITION_COMMENT(in_msg.Sender);
- if (L1_TBEs[address].pendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := TriggerType:ALL_ACKS;
- APPEND_TRANSITION_COMMENT(" Triggering All_Acks");
- }
- }
- }
- }
-
- action(q_profileOverflow, "po", desc="profile the overflowed block"){
- profileOverflow(address, machineID);
- }
-
- action(qq_xactReplacement, "\q", desc="replaced a transactional block"){
- xact_mgr.xactReplacement(address);
- }
-
- action(p_profileRequest, "pcc", desc="Profile request msg") {
- peek(mandatoryQueue_in, CacheMsg) {
- APPEND_TRANSITION_COMMENT(" request: Timestamp: ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" PA: ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" Type: ");
- APPEND_TRANSITION_COMMENT(in_msg.Type);
- APPEND_TRANSITION_COMMENT(" VPC: ");
- APPEND_TRANSITION_COMMENT(in_msg.ProgramCounter);
- APPEND_TRANSITION_COMMENT(" Mode: ");
- APPEND_TRANSITION_COMMENT(in_msg.AccessMode);
- APPEND_TRANSITION_COMMENT(" PF: ");
- APPEND_TRANSITION_COMMENT(in_msg.Prefetch);
- APPEND_TRANSITION_COMMENT(" VA: ");
- APPEND_TRANSITION_COMMENT(in_msg.LogicalAddress);
- APPEND_TRANSITION_COMMENT(" Thread: ");
- APPEND_TRANSITION_COMMENT(in_msg.ThreadID);
- APPEND_TRANSITION_COMMENT(" Exposed: ");
- APPEND_TRANSITION_COMMENT(in_msg.ExposedAction);
- }
- }
-
- /********************************END Transactional Actions************************/
-
- action(d_sendDataToRequestor, "d", desc="send data to requestor") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := L1_TBEs[address].DataBlk;
- out_msg.Dirty := L1_TBEs[address].Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.RemoveLastOwnerFromDir := true;
- out_msg.LastOwnerID := machineID;
- }
- }
- }
-
- action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := L1_TBEs[address].DataBlk;
- out_msg.Dirty := L1_TBEs[address].Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- }
-
- action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := L1_TBEs[address].DataBlk;
- out_msg.Dirty := L1_TBEs[address].Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- }
-
- action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
-
-
- action(g_issuePUTX, "g", desc="send data to the L2 cache") {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:PUTX;
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Requestor:= machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- if (getL1CacheEntry(address).Dirty) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(g_issuePUTS, "gs", desc="send clean data to the L2 cache") {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:PUTS;
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Requestor:= machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- if (getL1CacheEntry(address).Dirty) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- // used to determine whether to set sticky-M or sticky-S state in directory (M or SS in L2)
- action(g_issuePUTXorPUTS, "gxs", desc="send data to the L2 cache") {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- if(checkWriteSignatures(address) == true){
- // we should set sticky-M
- out_msg.Type := CoherenceRequestType:PUTX;
- }
- else{
- // we should set sticky-S
- out_msg.Type := CoherenceRequestType:PUTS;
- }
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Requestor:= machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- if (getL1CacheEntry(address).Dirty) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // inform L2 whether request was transactional
- //out_msg.Transactional := L1_TBEs[address].Trans;
- out_msg.Transactional := checkReadWriteSignatures(address);
-
- out_msg.RemoveLastOwnerFromDir := L1_TBEs[address].RemoveLastOwnerFromDir;
- out_msg.LastOwnerID := L1_TBEs[address].LastOwnerID;
- }
- }
-
- action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // inform L2 whether request was transactional
- // out_msg.Transactional := L1_TBEs[address].Trans;
- out_msg.Transactional := checkReadWriteSignatures(address);
-
- out_msg.RemoveLastOwnerFromDir := L1_TBEs[address].RemoveLastOwnerFromDir;
- out_msg.LastOwnerID := L1_TBEs[address].LastOwnerID;
- }
- }
-
-
-
- action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
- DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
- sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
- }
-
- action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
- DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
- sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
- getL1CacheEntry(address).Dirty := true;
- }
-
- action(h_load_conflict, "hc", desc="Notify sequencer of conflict on load") {
- sequencer.readConflictCallback(address);
- }
-
- action(hh_store_conflict, "\hc", desc="If not prefetch, notify sequencer that store completed.") {
- sequencer.writeConflictCallback(address);
- }
-
- action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
- check_allocate(L1_TBEs);
- L1_TBEs.allocate(address);
- L1_TBEs[address].isPrefetch := false;
- L1_TBEs[address].Dirty := getL1CacheEntry(address).Dirty;
- L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
- }
-
- action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
- }
-
- action(j_popTriggerQueue, "jp", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
- }
-
- action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
- }
-
- action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate TBE") {
- L1_TBEs.deallocate(address);
- }
-
- action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
- peek(responseIntraChipL1Network_in, ResponseMsg) {
- getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
- getL1CacheEntry(address).Dirty := in_msg.Dirty;
- if (machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
- L1_TBEs[address].RemoveLastOwnerFromDir := in_msg.RemoveLastOwnerFromDir;
- L1_TBEs[address].LastOwnerID := in_msg.LastOwnerID;
- }
- }
- }
-
- action(q_updateAckCount, "q", desc="Update ack count") {
- peek(responseIntraChipL1Network_in, ResponseMsg) {
- APPEND_TRANSITION_COMMENT(" before pendingAcks: ");
- APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
- L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" after pendingAcks: ");
- APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
- APPEND_TRANSITION_COMMENT(" sender: ");
- APPEND_TRANSITION_COMMENT(in_msg.Sender);
- if (L1_TBEs[address].pendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := TriggerType:ALL_ACKS;
- APPEND_TRANSITION_COMMENT(" Triggering All_Acks");
- }
- }
- }
- }
-
- action(z_stall, "z", desc="Stall") {
- }
-
- action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- if (L1DcacheMemory.isTagPresent(address)) {
- L1DcacheMemory.deallocate(address);
- } else {
- L1IcacheMemory.deallocate(address);
- }
- }
-
- action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
- if (L1DcacheMemory.isTagPresent(address) == false) {
- L1DcacheMemory.allocate(address);
- // reset trans bit
- }
- }
-
- action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
- if (L1IcacheMemory.isTagPresent(address) == false) {
- L1IcacheMemory.allocate(address);
- // reset trans bit
- }
- }
-
- action(zz_recycleRequestQueue, "zz", desc="recycle L1 request queue") {
- requestIntraChipL1Network_in.recycle();
- }
-
- action(z_recycleMandatoryQueue, "\z", desc="recycle L1 request queue") {
- mandatoryQueue_in.recycle();
- }
-
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, CacheMsg) {
- profile_L1Cache_miss(in_msg, id);
- }
- }
-
- action(uuu_profileTransactionLoadMiss, "\uu", desc="Profile Miss") {
- xact_mgr.profileTransactionMiss(L1_TBEs[address].ThreadID, true);
- }
-
- action(uuu_profileTransactionStoreMiss, "\uuu", desc="Profile Miss") {
- xact_mgr.profileTransactionMiss(L1_TBEs[address].ThreadID, false);
- }
-
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- // For filter responses
- transition({NP, I, S, E, M, IS, IM, SM, IS_I, IS_S, IS_E, IM_M, M_I, E_I}, Check_Write_Filter){
- a_checkWriteFilter;
- l_popRequestQueue;
- }
-
- transition({NP, I, S, E, M, IS, IM, SM, IS_I, IS_S, IS_E, IM_M, M_I, E_I}, Check_Read_Write_Filter){
- a_checkReadWriteFilter;
- l_popRequestQueue;
- }
-
- // Transitions for Load/Store/Replacement/WriteBack from transient states
- transition({IS, IM, IS_I, IS_S, IS_E, IM_M, M_I, E_I}, {Load, Ifetch, Store, L1_Replacement, L1_Replacement_XACT}) {
- z_recycleMandatoryQueue;
- }
-
- // Transitions from Idle
- transition({NP,I}, {L1_Replacement, L1_Replacement_XACT}) {
- ff_deallocateL1CacheBlock;
- }
-
- transition({NP,I}, Load, IS) {
- p_profileRequest;
- oo_allocateL1DCacheBlock;
- i_allocateTBE;
- x_tbeSetPrefetch;
- x_tbeSetPhysicalAddress;
- a_issueGETS;
- uu_profileMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP,I}, Ifetch, IS) {
- p_profileRequest;
- pp_allocateL1ICacheBlock;
- i_allocateTBE;
- x_tbeSetPhysicalAddress;
- ai_issueGETINSTR;
- uu_profileMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP,I}, Store, IM) {
- p_profileRequest;
- oo_allocateL1DCacheBlock;
- i_allocateTBE;
- x_tbeSetPrefetch;
- x_tbeSetPhysicalAddress;
- b_issueGETX;
- uu_profileMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP, I}, Inv) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Transactional invalidates to blocks in NP or I are
- // transactional blocks that have been silently replaced
- // FALSE POSITIVE - can't tell whether block was never in our read/write set or was replaced
- transition({NP, I}, Inv_X) {
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- // for L2 replacements. This happens due to our silent replacements.
- transition({NP, I}, Replace) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Transitions from Shared
- transition(S, {Load,Ifetch}) {
- p_profileRequest;
- h_load_hit;
- k_popMandatoryQueue;
- }
-
- transition(S, Store, IM) {
- p_profileRequest;
- i_allocateTBE;
- x_tbeSetPrefetch;
- x_tbeSetPhysicalAddress;
- b_issueGETX;
- uu_profileMiss;
- k_popMandatoryQueue;
- }
-
- transition(S, L1_Replacement, I) {
- ff_deallocateL1CacheBlock;
- }
-
- transition(S, L1_Replacement_XACT, I) {
- q_profileOverflow;
- qq_xactReplacement;
- ff_deallocateL1CacheBlock;
- }
-
- transition(S, Inv, I) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(S, Inv_X) {
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- // for L2 replacements.
- transition(S, Replace, I){
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Transitions from Exclusive
-
- transition(E, {Load, Ifetch}) {
- p_profileRequest;
- h_load_hit;
- k_popMandatoryQueue;
- }
-
- transition(E, Store, M) {
- p_profileRequest;
- hh_store_hit;
- k_popMandatoryQueue;
- }
-
- transition(E, L1_Replacement, M_I) {
- // The data is clean
- i_allocateTBE;
- g_issuePUTX; // send data, but hold in case forwarded request
- ff_deallocateL1CacheBlock;
- }
-
- // we can't go to M_I here because we need to maintain transactional read isolation on this line, and M_I allows GETS and GETXs to
- // be serviced. For correctness we need to make sure we are marked as a transactional reader (if we never read transactionally written data back exclusively) or transactional writer
- transition(E, L1_Replacement_XACT, E_I) {
- q_profileOverflow;
- qq_xactReplacement;
- // The data is clean
- i_allocateTBE;
- g_issuePUTXorPUTS; // send data and hold, but do not release on forwarded requests
- ff_deallocateL1CacheBlock;
- }
-
- transition(E, Inv, I) {
- // don't send data
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(E, Inv_X){
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- // for L2 replacements
- transition(E, Replace, I) {
- // don't send data
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(E, Fwd_GETX, I) {
- d_sendDataToRequestor;
- l_popRequestQueue;
- }
-
- transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
- d_sendDataToRequestor;
- d2_sendDataToL2;
- l_popRequestQueue;
- }
-
- // If we see Fwd_GETS_X this is a FALSE POSITIVE, since we never
- // modified this block
- transition(E, {Fwd_GETS_X, Fwd_GETX_X, Fwd_GET_INSTR_X}){
- // send NACK instead of data
- e_sendNackToRequestor;
- l_popRequestQueue;
- }
-
- // Transitions from Modified
- transition(M, {Load, Ifetch}) {
- p_profileRequest;
- h_load_hit;
- k_popMandatoryQueue;
- }
-
- transition(M, Store) {
- p_profileRequest;
- hh_store_hit;
- k_popMandatoryQueue;
- }
-
- transition(M, L1_Replacement, M_I) {
- i_allocateTBE;
- g_issuePUTX; // send data, but hold in case forwarded request
- ff_deallocateL1CacheBlock;
- }
-
- // in order to prevent releasing isolation of transactional data (either written to just read) we need to
- // mark ourselves as a transactional reader (e.g. SS state in L2) or transactional writer (e.g. M state in L2). We need to transition to the same E_I
- // state as for transactional replacements from E state, and ignore all requests.
- transition(M, L1_Replacement_XACT, E_I) {
- q_profileOverflow;
- qq_xactReplacement;
- i_allocateTBE;
- g_issuePUTXorPUTS; // send data, but do not release on forwarded requests
- ff_deallocateL1CacheBlock;
- }
-
- transition({M_I, E_I}, WB_Ack, I) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(M, Inv, I) {
- f_sendDataToL2;
- l_popRequestQueue;
- }
-
- // for L2 replacement
- transition(M, Replace, I) {
- f_sendDataToL2;
- l_popRequestQueue;
- }
-
- transition(M, Inv_X){
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- transition(E_I, Inv) {
- // ack requestor's GETX, but wait for WB_Ack from L2
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // maintain isolation on M or E replacements
- // took out M_I, since L2 transitions to M upon PUTX, and we should no longer receives invalidates
- transition(E_I, Inv_X) {
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- // allow L2 to get data while we replace
- transition({M_I, E_I}, Replace, I) {
- ft_sendDataToL2_fromTBE;
- s_deallocateTBE;
- l_popRequestQueue;
- }
-
- transition(M, Fwd_GETX, I) {
- d_sendDataToRequestor;
- l_popRequestQueue;
- }
-
- transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
- d_sendDataToRequestor;
- d2_sendDataToL2;
- l_popRequestQueue;
- }
-
- transition(M, {Fwd_GETX_X, Fwd_GETS_X, Fwd_GET_INSTR_X}) {
- // send NACK instead of data
- e_sendNackToRequestor;
- l_popRequestQueue;
- }
-
- // for simplicity we ignore all other requests while we wait for L2 to receive the clean data. Otherwise we will incorrectly transfer
- // ownership and not mark ourselves as a transactional sharer in the L2 directory
- transition(E_I, {Fwd_GETX, Fwd_GETS, Fwd_GET_INSTR, Fwd_GETS_X, Fwd_GETX_X, Fwd_GET_INSTR_X}) {
- // send NACK instead of data
- e_sendNackToRequestor;
- l_popRequestQueue;
- }
-
- transition(M_I, Fwd_GETX, I) {
- dt_sendDataToRequestor_fromTBE;
- s_deallocateTBE;
- l_popRequestQueue;
- }
-
- transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, I) {
- dt_sendDataToRequestor_fromTBE;
- d2t_sendDataToL2_fromTBE;
- s_deallocateTBE;
- l_popRequestQueue;
- }
-
- // don't release isolation on forwarded conflicting requests
- transition(M_I, {Fwd_GETS_X, Fwd_GETX_X, Fwd_GET_INSTR_X}) {
- // send NACK instead of data
- e_sendNackToRequestor;
- l_popRequestQueue;
- }
-
- // Transitions from IS
- transition({IS, IS_I}, Inv, IS_I) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Only possible when L2 sends us data in SS state. No conflict is possible, so no need to unblock L2
- transition(IS, L2_Data_all_Acks, S) {
- u_writeDataToL1Cache;
- // unblock L2 because it blocks on GETS
- j_sendUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // Made the L2 block on GETS requests, so we are guaranteed to have no races with GETX
- // We only get into this transition if the writer had to retry his GETX request that invalidated us, and L2 went back to SS
- transition(IS_I, L2_Data_all_Acks, S) {
- u_writeDataToL1Cache;
- // unblock L2 because it blocks on GETS
- j_sendUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // for L2 replacements
- transition({IS, IS_I}, Replace, IS_I) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // These transitions are for when L2 sends us data, because it has exclusive copy, but L1 filter responses have not arrived
- transition({IS, IS_I}, Ack){
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition({IS, IS_I}, Nack) {
- r_notifyReceiveNack;
- q_updateNackCount;
- o_popIncomingResponseQueue;
- }
-
- // IS_I also allowed because L2 Inv beat our GETS request, and now L2 is in NP state, ready to service our GETS.
- transition({IS, IS_I}, L2_Data, IS_S) {
- u_writeDataToL1Cache;
- // This message carries the inverse of the ack count
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_S, Ack){
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_S, Nack) {
- r_notifyReceiveNack;
- q_updateNackCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_S, Ack_all, S){
- // tell L2 we succeeded
- j_sendUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // retry request from I
- transition(IS_S, Nack_all, I){
- ff_deallocateL1CacheBlock;
- // This is also the final NACK
- r_notifyReceiveNackFinal;
- // tell L2 we failed
- jj_sendUnblockCancel;
- h_load_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // L2 is trying to give us exclusive data
- // we can go to E because L2 is guaranteed to have only copy (ie no races from other L1s possible)
- transition({IS, IS_I}, L2_Exclusive_Data, IS_E) {
- u_writeDataToL1Cache;
- // This message carries the inverse of the ack count
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition({IS, IS_I}, L2_Exclusive_Data_all_Acks, E){
- u_writeDataToL1Cache;
- // tell L2 we succeeded
- jj_sendExclusiveUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_E, Ack){
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_E, Nack) {
- r_notifyReceiveNack;
- q_updateNackCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_E, Ack_all, E){
- // tell L2 we succeeded
- jj_sendExclusiveUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // retry request from I
- transition(IS_E, Nack_all, I){
- ff_deallocateL1CacheBlock;
- // This is also the final NACK
- r_notifyReceiveNackFinal;
- // need to tell L2 we failed
- jj_sendUnblockCancel;
- h_load_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // Normal case - when L2 doesn't have exclusive line, but L1 has line.
- // We got NACKed . Try again in state I
- // IMPORTANT: filters are NOT checked when L2 is in SS, because nobody has modified the line.
- // For this transition we only receive NACKs from the exclusive writer
- transition({IS, IS_I}, Nack_all, I) {
- // This is also the final NACK
- r_notifyReceiveNackFinal;
- // L2 is blocked when L1 is exclusive
- jj_sendUnblockCancel;
- h_load_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- transition({IS, IS_I}, Inv_X) {
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- transition(IS, DataS_fromL1, S) {
- u_writeDataToL1Cache;
- j_sendUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // This occurs when there is a race between our GETS and another L1's GETX, and the GETX wins
- // The L2 is now blocked because our request was forwarded to exclusive L1 (ie MT_IIB)
- transition(IS_I, DataS_fromL1, S) {
- u_writeDataToL1Cache;
- j_sendUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // Transitions from IM
- transition({IM, SM}, Inv, IM) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition({IM, SM}, Inv_X) {
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- // for L2 replacements
- transition({IM, SM}, Replace, IM) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // only possible when L1 exclusive sends us the line
- transition(IM, Data_all_Acks, M) {
- u_writeDataToL1Cache;
- jj_sendExclusiveUnblock;
- uuu_profileTransactionStoreMiss;
- hh_store_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // L2 is trying to give us data
- // Don't go to SM because we do not want a S copy on failure. This might cause conflicts for older writers that
- // nacked us.
- transition(IM, L2_Data, IM_M) {
- u_writeDataToL1Cache;
- // This message carries the inverse of the ack count
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IM, L2_Data_all_Acks, M){
- u_writeDataToL1Cache;
- // tell L2 we succeeded
- jj_sendExclusiveUnblock;
- uuu_profileTransactionStoreMiss;
- hh_store_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(IM_M, Ack){
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IM_M, Nack) {
- r_notifyReceiveNack;
- q_updateNackCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IM_M, Ack_all, M){
- // tell L2 we succeeded
- jj_sendExclusiveUnblock;
- uuu_profileTransactionStoreMiss;
- hh_store_hit;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // retry request from I
- transition(IM_M, Nack_all, I){
- ff_deallocateL1CacheBlock;
- // This is also the final NACK
- r_notifyReceiveNackFinal;
- // need to tell L2 we failed
- jj_sendUnblockCancel;
- hh_store_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // transitions from SM
- transition({SM, IM}, Ack) {
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- // instead of Data we receive Nacks
- transition({SM, IM}, Nack) {
- r_notifyReceiveNack;
- // mark this request as being NACKed
- q_updateNackCount;
- o_popIncomingResponseQueue;
- }
-
- transition(SM, Ack_all, M) {
- jj_sendExclusiveUnblock;
- uuu_profileTransactionStoreMiss;
- hh_store_hit;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // retry in state S
- transition(SM, Nack_all, S){
- // This is the final nack
- r_notifyReceiveNackFinal;
- // unblock the L2
- jj_sendUnblockCancel;
- hh_store_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // retry in state I
- transition(IM, Nack_all, I){
- // This is the final NACK
- r_notifyReceiveNackFinal;
- // unblock the L2
- jj_sendUnblockCancel;
- hh_store_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
-}
-
-
-
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm b/src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm
deleted file mode 100644
index 9085ae33f..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm
+++ /dev/null
@@ -1,2123 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
- *
- */
-
-machine(L2Cache, "MESI Directory L2 Cache CMP") {
-
- // L2 BANK QUEUES
- // From local bank of L2 cache TO the network
- MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="2", ordered="false"; // this L2 bank -> Memory
- MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> a local L1
- MessageBuffer responseFromL2Cache, network="To", virtual_network="3", ordered="false"; // this L2 bank -> a local L1 || Memory
-
- // FROM the network to this local bank of L2 cache
- MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false"; // a local L1 -> this L2 bank
- MessageBuffer responseToL2Cache, network="From", virtual_network="3", ordered="false"; // a local L1 || Memory -> this L2 bank
- MessageBuffer unblockToL2Cache, network="From", virtual_network="4", ordered="false"; // a local L1 || Memory -> this L2 bank
-
- // STATES
- enumeration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
- // Base states
- NP, desc="Not present in either cache";
- SS, desc="L2 cache entry Shared, also present in one or more L1s";
- M, desc="L2 cache entry Modified, not present in any L1s", format="!b";
- MT, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
-
- // L2 replacement
- M_I, desc="L2 cache replacing, have all acks, sent dirty data to memory, waiting for ACK from memory";
- MT_I, desc="L2 cache replacing, getting data from exclusive";
- MCT_I, desc="L2 cache replacing, clean in L2, getting data or ack from exclusive";
- I_I, desc="L2 replacing clean data, need to inv sharers and then drop data";
- S_I, desc="L2 replacing dirty data, collecting acks from L1s";
-
- // Transient States for fetching data from memory
- ISS, desc="L2 idle, got single L1_GETS, issued memory fetch, have not seen response yet";
- IS, desc="L2 idle, got L1_GET_INSTR or multiple L1_GETS, issued memory fetch, have not seen response yet";
- IM, desc="L2 idle, got L1_GETX, issued memory fetch, have not seen response(s) yet";
-
- // Blocking states
- SS_MB, desc="Blocked for L1_GETX from SS";
- SS_SSB, desc="Blocked for L1_GETS from SS";
- MT_MB, desc="Blocked for L1_GETX from MT";
- M_MB, desc="Blocked for L1_GETX from M";
- ISS_MB, desc="Blocked for L1_GETS or L1_GETX from NP, received Mem Data";
- IS_SSB, desc="Blocked for L1_GET_INSTR from NP, received Mem Data";
- M_SSB, desc="Blocked for L1_GET_INSTR from M";
-
- MT_IIB, desc="Blocked for L1_GETS from MT, waiting for unblock and data";
- MT_IB, desc="Blocked for L1_GETS from MT, got unblock, waiting for data";
- MT_SB, desc="Blocked for L1_GETS from MT, got data, waiting for unblock";
-
- // for resolving PUTX/PUTS races
- PB_MT, desc="Going to MT, got data and unblock, waiting for PUT";
- PB_SS, desc="Going to SS, got unblock, waiting for PUT";
- PB_MT_IB, desc="Blocked from MT, got unblock, waiting for data and PUT";
-
- }
-
- // EVENTS
- enumeration(Event, desc="L2 Cache events") {
- // L2 events
-
- // events initiated by the local L1s
- L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
- L1_GET_INSTR_ESCAPE, desc="a L1I GET INSTR in an escape action request for a block mapped to us";
- L1_GETS, desc="a L1D GETS request for a block maped to us";
- L1_GETS_ESCAPE, desc="a L1D GETS in an escape action request for a block mapped to us";
- L1_GETX, desc="a L1D GETX request for a block maped to us";
- L1_GETX_ESCAPE, desc="a L1D GETX in an escape action request for a block mapped to us";
- L1_UPGRADE, desc="a L1D GETX request for a block maped to us";
-
- L1_PUTX, desc="L1 replacing data";
- L1_PUTX_old, desc="L1 replacing data, but no longer sharer";
- L1_PUTS, desc="L1 replacing clean data";
- L1_PUTS_old, desc="L1 replacing clean data, but no longer sharer";
- L1_PUT_PENDING, desc="L1 PUT msg pending (recycled)";
-
- Fwd_L1_GETX, desc="L1 did not have data, so we supply";
- Fwd_L1_GETS, desc="L1 did not have data, so we supply";
- Fwd_L1_GET_INSTR, desc="L1 did not have data, so we supply";
-
- // events initiated by this L2
- L2_Replacement, desc="L2 Replacement", format="!r";
- L2_Replacement_XACT, desc="L2 Replacement of trans. data", format="!r";
- L2_Replacement_clean, desc="L2 Replacement, but data is clean", format="!r";
- L2_Replacement_clean_XACT, desc="L2 Replacement of trans. data, but data is clean", format="!r";
-
- // events from memory controller
- Mem_Data, desc="data from memory", format="!r";
- Mem_Ack, desc="ack from memory", format="!r";
-
- // M->S data writeback
- WB_Data, desc="data from L1";
- WB_Data_clean, desc="clean data from L1";
- Ack, desc="writeback ack";
- Ack_all, desc="writeback ack";
- // For transactional memory
- Nack, desc="filter indicates conflict";
- Nack_all, desc="all filters have responded, at least one conflict";
-
- Unblock, desc="Unblock from L1 requestor";
- Unblock_Cancel, desc="Unblock from L1 requestor (FOR XACT MEMORY)";
- Exclusive_Unblock, desc="Unblock from L1 requestor";
-
- Unblock_WaitPUTold, desc="Unblock from L1 requestor, last requestor was replacing so wait for PUT msg";
- Exclusive_Unblock_WaitPUTold, desc="Unblock from L1 requestor, last requestor was replacing so wait for PUT msg";
-
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- NetDest Sharers, desc="tracks the L1 shares on-chip";
- MachineID Exclusive, desc="Exclusive holder of block";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, default="false", desc="data is dirty";
-
- bool Trans, desc="dummy bit for debugging";
- bool Read, desc="LogTM R bit";
- bool Write, desc="LogTM W bit";
- bool L2Miss, desc="Was this block sourced from memory";
- int L1PutsPending, default="0", desc="how many PUT_ are pending for this entry (being recyled)";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- Address Address, desc="Line address for this TBE";
- Address PhysicalAddress, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Buffer for the data block";
- bool Dirty, default="false", desc="Data is Dirty";
-
- NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
- MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
- bool isPrefetch, desc="Set if this was caused by a prefetch";
-
- int pendingAcks, desc="number of pending acks for invalidates during writeback";
- bool nack, default="false", desc="has this request been NACKed?";
- }
-
- external_type(CacheMemory) {
- bool cacheAvail(Address);
- Address cacheProbe(Address);
- void allocate(Address);
- void deallocate(Address);
- Entry lookup(Address);
- void changePermission(Address, AccessPermission);
- bool isTagPresent(Address);
- void setMRU(Address);
- }
-
- external_type(TBETable) {
- TBE lookup(Address);
- void allocate(Address);
- void deallocate(Address);
- bool isPresent(Address);
- }
-
- TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
-
- CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
-
- // inclusive cache, returns L2 entries only
- Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
- return L2cacheMemory[addr];
- }
-
- void changeL2Permission(Address addr, AccessPermission permission) {
- if (L2cacheMemory.isTagPresent(addr)) {
- return L2cacheMemory.changePermission(addr, permission);
- }
- }
-
- string getCoherenceRequestTypeStr(CoherenceRequestType type) {
- return CoherenceRequestType_to_string(type);
- }
-
- bool isL2CacheTagPresent(Address addr) {
- return (L2cacheMemory.isTagPresent(addr));
- }
-
- bool isOneSharerLeft(Address addr, MachineID requestor) {
- assert(L2cacheMemory[addr].Sharers.isElement(requestor));
- return (L2cacheMemory[addr].Sharers.count() == 1);
- }
-
- bool isSharer(Address addr, MachineID requestor) {
- if (L2cacheMemory.isTagPresent(addr)) {
- return L2cacheMemory[addr].Sharers.isElement(requestor);
- } else {
- return false;
- }
- }
-
- void addSharer(Address addr, MachineID requestor) {
- DEBUG_EXPR(machineID);
- DEBUG_EXPR(requestor);
- DEBUG_EXPR(addr);
- assert(map_L1CacheMachId_to_L2Cache(addr, requestor) == machineID);
- L2cacheMemory[addr].Sharers.add(requestor);
- }
-
- State getState(Address addr) {
- if(L2_TBEs.isPresent(addr)) {
- return L2_TBEs[addr].TBEState;
- } else if (isL2CacheTagPresent(addr)) {
- return getL2CacheEntry(addr).CacheState;
- }
- return State:NP;
- }
-
- string getStateStr(Address addr) {
- return L2Cache_State_to_string(getState(addr));
- }
-
- // when is this called
- void setState(Address addr, State state) {
-
- // MUST CHANGE
- if (L2_TBEs.isPresent(addr)) {
- L2_TBEs[addr].TBEState := state;
- }
-
- if (isL2CacheTagPresent(addr)) {
- getL2CacheEntry(addr).CacheState := state;
-
- // Set permission
- if (state == State:SS ) {
- changeL2Permission(addr, AccessPermission:Read_Only);
- } else if (state == State:M) {
- changeL2Permission(addr, AccessPermission:Read_Write);
- } else if (state == State:MT) {
- changeL2Permission(addr, AccessPermission:Stale);
- } else {
- changeL2Permission(addr, AccessPermission:Busy);
- }
- }
- }
-
- Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr, MachineID requestor) {
- if (L2cacheMemory.isTagPresent(addr)){ /* Present */
- if(getL2CacheEntry(addr).L1PutsPending > 0 && /* At least one PUT pending */
- (getL2CacheEntry(addr).CacheState == State:SS || getL2CacheEntry(addr).CacheState == State:MT || getL2CacheEntry(addr).CacheState == State:M )) { /* Base state */
-
- /* Only allow PUTX/PUTS to go on */
- if (type != CoherenceRequestType:PUTX &&
- type != CoherenceRequestType:PUTS) {
- return Event:L1_PUT_PENDING; // Don't serve any req until the wb is serviced
- }
- }
- }
- if(type == CoherenceRequestType:GETS) {
- return Event:L1_GETS;
- } else if(type == CoherenceRequestType:GETS_ESCAPE) {
- return Event:L1_GETS_ESCAPE;
- } else if(type == CoherenceRequestType:GET_INSTR) {
- return Event:L1_GET_INSTR;
- } else if(type == CoherenceRequestType:GET_INSTR_ESCAPE) {
- return Event:L1_GET_INSTR_ESCAPE;
- } else if (type == CoherenceRequestType:GETX) {
- return Event:L1_GETX;
- } else if(type == CoherenceRequestType:GETX_ESCAPE) {
- return Event:L1_GETX_ESCAPE;
- } else if (type == CoherenceRequestType:UPGRADE) {
- if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).Sharers.isElement(requestor) ) {
- return Event:L1_UPGRADE;
- } else {
- return Event:L1_GETX;
- }
- } else if (type == CoherenceRequestType:PUTX) {
- if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).L1PutsPending > 0) {
- getL2CacheEntry(addr).L1PutsPending := getL2CacheEntry(addr).L1PutsPending - 1;
- DEBUG_EXPR("PUTX PutSPending ");
- DEBUG_EXPR(getL2CacheEntry(addr).L1PutsPending);
- }
- if (isSharer(addr, requestor)) {
- return Event:L1_PUTX;
- } else {
- return Event:L1_PUTX_old;
- }
- } else if (type == CoherenceRequestType:PUTS) {
- if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).L1PutsPending > 0) {
- getL2CacheEntry(addr).L1PutsPending := getL2CacheEntry(addr).L1PutsPending - 1;
- DEBUG_EXPR("PUTS PutSPending ");
- DEBUG_EXPR(getL2CacheEntry(addr).L1PutsPending);
- }
- if (isSharer(addr, requestor)) {
- return Event:L1_PUTS;
- } else {
- return Event:L1_PUTS_old;
- }
- } else {
- DEBUG_EXPR(addr);
- DEBUG_EXPR(type);
- error("Invalid L1 forwarded request type");
- }
- }
-
- // ** OUT_PORTS **
-
- out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
- out_port(DirRequestIntraChipL2Network_out, RequestMsg, DirRequestFromL2Cache);
- out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
-
-
- // Response IntraChip L2 Network - response msg to this particular L2 bank
- in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
- if (responseIntraChipL2Network_in.isReady()) {
- peek(responseIntraChipL2Network_in, ResponseMsg) {
- // test wether it's from a local L1 or an off chip source
- assert(in_msg.Destination.isElement(machineID));
- if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
- if(in_msg.Type == CoherenceResponseType:DATA) {
- if (in_msg.Dirty) {
- trigger(Event:WB_Data, in_msg.Address);
- } else {
- trigger(Event:WB_Data_clean, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceResponseType:ACK) {
- if ((L2_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0) {
- // check whether any previous responses have been NACKs
- if(L2_TBEs[in_msg.Address].nack == false) {
- trigger(Event:Ack_all, in_msg.Address);
- }
- else {
- // at least one nack received
- trigger(Event:Nack_all, in_msg.Address);
- }
- } else {
- trigger(Event:Ack, in_msg.Address);
- }
- // for NACKs
- } else if (in_msg.Type == CoherenceResponseType:NACK) {
- if ((L2_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0) {
- trigger(Event:Nack_all, in_msg.Address);
- } else {
- trigger(Event:Nack, in_msg.Address);
- }
- } else {
- error("unknown message type");
- }
-
- } else { // external message
- if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Mem_Data, in_msg.Address); // L2 now has data and all off-chip acks
- } else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
- trigger(Event:Mem_Ack, in_msg.Address); // L2 now has data and all off-chip acks
- } else {
- error("unknown message type");
- }
- }
- }
- } // if not ready, do nothing
- }
-
- // L1 Request
- in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
- if(L1RequestIntraChipL2Network_in.isReady()) {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- /*
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(id);
- DEBUG_EXPR(getState(in_msg.Address));
- DEBUG_EXPR(in_msg.Requestor);
- DEBUG_EXPR(in_msg.Type);
- DEBUG_EXPR(in_msg.Destination);
- */
- assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
- assert(in_msg.Destination.isElement(machineID));
- if (L2cacheMemory.isTagPresent(in_msg.Address)) {
- // The L2 contains the block, so proceeded with handling the request
- trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
- } else {
- if (L2cacheMemory.cacheAvail(in_msg.Address)) {
- // L2 does't have the line, but we have space for it in the L2
- trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
- } else {
- // No room in the L2, so we need to make room before handling the request
- if (L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Dirty ) {
- // check whether block is transactional
- if(L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Trans == true){
- trigger(Event:L2_Replacement_XACT, L2cacheMemory.cacheProbe(in_msg.Address));
- }
- else{
- trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
- }
- } else {
- // check whether block is transactional
- if(L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Trans == true){
- trigger(Event:L2_Replacement_clean_XACT, L2cacheMemory.cacheProbe(in_msg.Address));
- }
- else{
- trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address));
- }
- }
- }
- }
- }
- }
- }
-
- in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) {
- if(L1unblockNetwork_in.isReady()) {
- peek(L1unblockNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
- if (in_msg.RemoveLastOwnerFromDir == true) {
- if (isSharer(in_msg.Address,in_msg.LastOwnerID)) {
- trigger(Event:Exclusive_Unblock_WaitPUTold, in_msg.Address);
- }
- else { // PUT arrived, requestor already removed from dir
- trigger(Event:Exclusive_Unblock, in_msg.Address);
- }
- }
- else {
- trigger(Event:Exclusive_Unblock, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
- if (in_msg.RemoveLastOwnerFromDir == true) {
- if (isSharer(in_msg.Address,in_msg.LastOwnerID)) {
- trigger(Event:Unblock_WaitPUTold, in_msg.Address);
- }
- else { // PUT arrived, requestor already removed from dir
- trigger(Event:Unblock, in_msg.Address);
- }
- }
- else {
- trigger(Event:Unblock, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_CANCEL) {
- trigger(Event:Unblock_Cancel, in_msg.Address);
- } else {
- error("unknown unblock message");
- }
- }
- }
- }
-
- // ACTIONS
-
- action(a_issueFetchToMemory, "a", desc="fetch data from memory") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.MessageSize := MessageSizeType:Control;
- }
- }
- }
-
- action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(L2cacheMemory[address].Exclusive);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- // also pass along timestamp
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
-
- action(c_exclusiveReplacement, "c", desc="Send data to memory") {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.DataBlk := L2_TBEs[address].DataBlk;
- out_msg.Dirty := L2_TBEs[address].Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
-
- //************Transactional memory actions **************
- //broadcast a write filter lookup request to all L1s except for the requestor
- action(a_checkL1WriteFiltersExceptRequestor, "wr", desc="Broadcast a Write Filter lookup request"){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := getLocalL1IDs(machineID);
- // don't broadcast to requestor
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
-
- //broadcast a read + write filter lookup request to all L1s except for the requestor
- action(a_checkL1ReadWriteFiltersExceptRequestor, "rwr", desc="Broadcast a Read + Write Filter lookup request"){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
- // make L1 forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := getLocalL1IDs(machineID);
- // don't broadcast to requestor
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
-
- // These are to send out filter checks to those NACKers in our sharers or exclusive ptr list
- action(a_checkNackerL1WriteFiltersExceptRequestor, "wrn", desc="Broadcast a Write Filter lookup request"){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- // check if the L2 miss bit is set - if it is, send check filter requests to those in our Sharers list only
- if(getL2CacheEntry(address).L2Miss == true){
- // check whether we are the only sharer on the list. If so, no need to broadcast.
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT("L2 Miss: No need to check L1 write filter ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- else{
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- assert(getL2CacheEntry(address).Sharers.count() > 0);
- out_msg.Destination := getL2CacheEntry(address).Sharers;
- // don't broadcast to requestor
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
- }
- else{
- // This is a read request, so check whether we have a writer
- if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- // we have a writer, and it is not us
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(getL2CacheEntry(address).Exclusive);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
- else{
- APPEND_TRANSITION_COMMENT("L1 replacement: No need to check L1 write filter");
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" exclusive: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- // we should not have any sharers
- assert( getL2CacheEntry(address).Sharers.count() == 0 );
- }
- }
- }
- }
-
- action(a_checkNackerL1ReadWriteFiltersExceptRequestor, "wrrn", desc="Broadcast a Read + Write Filter lookup request"){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- // check if the L2 miss bit is set - if it is, send check filter requests to those in our Sharers list only
- if(getL2CacheEntry(address).L2Miss == true){
- // check whether we are the only sharer on the list. If so, no need to broadcast.
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT("L2 Miss: No need to check L1 read/write filter");
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- else{
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- assert(getL2CacheEntry(address).Sharers.count() > 0);
- out_msg.Destination := getL2CacheEntry(address).Sharers;
- // don't broadcast to requestor
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
- }
- else{
- // This is a write request, so check whether we have readers not including us or a writer that is not us
- if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- // we have a writer, and it is not us
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(getL2CacheEntry(address).Exclusive);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
- else if(getL2CacheEntry(address).Sharers.count() > 0){
- // this should never happen - since we allow silent S replacements but we always track exclusive L1
- assert(false);
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter - we are only reader");
- }
- else{
- // reader(s) exist that is not us
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := getL2CacheEntry(address).Sharers;
- // don't check our own filter
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
- }
- else{
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter");
- }
- }
- }
- }
-
- // send data but force L1 requestor to wait for filter responses
- action(f_sendDataToGetSRequestor, "f", desc="Send data from cache to reqeustor") {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := L2_TBEs[address].PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- // wait for the filter responses from other L1s
- out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
- }
- }
-
- // send exclusive data
- action(f_sendExclusiveDataToGetSRequestor, "fx", desc="Send data from cache to reqeustor") {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := L2_TBEs[address].PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- // wait for the filter responses from other L1s
- out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
- }
- }
-
- action(f_sendDataToGetXRequestor, "fxx", desc="Send data from cache to reqeustor") {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := L2_TBEs[address].PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // internal nodes
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- // wait for the filter responses from other L1s
- out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
- }
- }
-
- action(f_sendDataToRequestor, "fd", desc="Send data from cache to reqeustor") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- // different ack counts for different situations
- if(in_msg.Type == CoherenceRequestType:GET_INSTR_ESCAPE || in_msg.Type == CoherenceRequestType:GETX_ESCAPE){
- // no acks needed
- out_msg.AckCount := 0;
- }
- else{
-
- // ORIGINAL
- if( false ) {
- out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
- }
-
- else{
- // NEW***
- // differentiate btw read and write requests
- if(in_msg.Type == CoherenceRequestType:GET_INSTR){
- if(getL2CacheEntry(address).L2Miss == true){
- // check whether we are the only sharer on the list. If so, no need to broadcast.
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT("We are only sharer");
- out_msg.AckCount := 0;
- }
- else{
- // wait for ACKs from the other NACKers
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
- if(isSharer(address, in_msg.Requestor)){
- // don't include us
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- APPEND_TRANSITION_COMMENT("Nackers exist");
- }
- }
- else{
- // This is a read request, so check whether we have a writer
- if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
- // we have a writer and it is not us
- out_msg.AckCount := 0 - 1;
-
- APPEND_TRANSITION_COMMENT(" Writer exists ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- }
- else{
- // we should have no sharers!
- assert(getL2CacheEntry(address).Sharers.count() == 0);
- assert(getL2CacheEntry(address).Exclusive == in_msg.Requestor);
-
- APPEND_TRANSITION_COMMENT(" Sharers or we are writer exist, ok to read ");
- APPEND_TRANSITION_COMMENT(" sharers: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
- APPEND_TRANSITION_COMMENT(" exclusive: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- out_msg.AckCount := 0;
- }
- }
- }
- else if(in_msg.Type == CoherenceRequestType:GETX){
- if(getL2CacheEntry(address).L2Miss == true){
- // check whether we are the only sharer on the list. If so, no need to broadcast.
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT(" L2Miss and we are only sharer ");
- out_msg.AckCount := 0;
- }
- else{
- // nackers exist
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
- if(isSharer(address, in_msg.Requestor)){
- // don't include us
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- APPEND_TRANSITION_COMMENT("Nackers exist");
- }
- }
- else{
- // This is a write request, so check whether we have readers not including us or a writer that is not us
- if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
- // we have a writer and it is not us
- out_msg.AckCount := 0 - 1;
-
- APPEND_TRANSITION_COMMENT(" Writer exists ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
-
- }
- else if(getL2CacheEntry(address).Sharers.count() > 0){
- // this shouldn't be possible - we always track exclusive owner, but allow silent S replacements
- assert(false);
-
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter - we are only reader");
- out_msg.AckCount := 0;
- }
- else{
- // reader(s) exist that is not us
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
- if(isSharer(address, in_msg.Requestor)){
- // don't include us
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- APPEND_TRANSITION_COMMENT(" Readers exist ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
- }
- }
- else{
- // we should always have no sharers!
- assert(getL2CacheEntry(address).Sharers.count() == 0);
- assert(getL2CacheEntry(address).Exclusive == in_msg.Requestor);
-
- out_msg.AckCount := 0;
-
- APPEND_TRANSITION_COMMENT(" sharers: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
- APPEND_TRANSITION_COMMENT(" exclusive: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter");
- }
- }
- } // for GETX
- else{
- // unknown request type
- APPEND_TRANSITION_COMMENT(in_msg.Type);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- assert(false);
- }
- }
- } // for original vs new code
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" AckCount: ");
- APPEND_TRANSITION_COMMENT(out_msg.AckCount);
- }
- }
- }
-
- action(f_sendExclusiveDataToRequestor, "fdx", desc="Send data from cache to reqeustor") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- // different ack counts depending on situation
- // IMPORTANT: assuming data sent exclusively for GETS request
- if(in_msg.Type == CoherenceRequestType:GETS_ESCAPE){
- // no acks needed
- out_msg.AckCount := 0;
- }
- else{
-
- // ORIGINAL :
- if( false ){
- // request filter checks from all L1s
- out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
- }
- else{
- // NEW***
- if(getL2CacheEntry(address).L2Miss == true){
- // check whether we are the only sharer on the list. If so, no need to broadcast.
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT("We are only sharer");
- out_msg.AckCount := 0;
- }
- else{
- // wait for ACKs from the other NACKers
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
- if(isSharer(address, in_msg.Requestor)){
- // don't include us
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- APPEND_TRANSITION_COMMENT("Nackers exist");
- }
- }
- else{
- // This is a read request, so check whether we have a writer
- if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
- // we have a writer and it is not us
- out_msg.AckCount := 0 - 1;
-
- APPEND_TRANSITION_COMMENT(" Writer exists ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- }
- else{
- // we should always have no sharers!
- APPEND_TRANSITION_COMMENT(address);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" sharers: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
-
- DEBUG_EXPR(address);
- DEBUG_EXPR(" requestor: ");
- DEBUG_EXPR(in_msg.Requestor);
- DEBUG_EXPR(" sharers: ");
- DEBUG_EXPR(getL2CacheEntry(address).Sharers);
-
- assert(getL2CacheEntry(address).Sharers.count() == 0);
- assert(getL2CacheEntry(address).Exclusive == in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" Sharers exist or we are writer, ok to read ");
- out_msg.AckCount := 0;
- }
- }
- } // for orginal vs new code
- }
-
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" AckCount: ");
- APPEND_TRANSITION_COMMENT(out_msg.AckCount);
- }
- }
- }
-
- // send an accumulated ACK to requestor when we don't care about checking filters (for escape actions)
- action(f_sendAccumulatedAckToRequestor, "faa", desc="Send ACKs to requestor") {
- // special case: don't send ACK if uniprocessor, since we don't need it (just send data)
- if((numberOfL1CachePerChip() - 1) > 0){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // count all L1s except requestor
- out_msg.AckCount := numberOfL1CachePerChip() - 1;
- APPEND_TRANSITION_COMMENT(" Total L1s: ");
- APPEND_TRANSITION_COMMENT(numberOfL1CachePerChip());
- APPEND_TRANSITION_COMMENT(" Total ACKS: ");
- APPEND_TRANSITION_COMMENT(out_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
- }
-
- // special INV used when we receive an escape action request. Sharers cannot NACK this invalidate.
- action(fwm_sendFwdInvEscapeToSharersMinusRequestor, "fwme", desc="invalidate sharers for request, requestor is sharer") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="1") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:INV_ESCAPE;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := L2cacheMemory[address].Sharers;
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- //also pass along timestamp
- out_msg.Timestamp := in_msg.Timestamp;
- }
- }
- }
-
- action(f_profileRequestor, "prq", desc="Profiles the requestor") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- }
-
- // marks the L2 block as transactional if request was transactional
- action(f_markBlockTransIfTrans, "\mbt", desc="Mark an L2 block as transactional") {
- peek(L1unblockNetwork_in, ResponseMsg) {
- if(in_msg.Transactional == true){
- L2cacheMemory[address].Trans := true;
- }
- }
- }
-
- action(q_profileOverflow, "po", desc="profile the overflowed block"){
- profileOverflow(address, machineID);
- }
-
- action(p_profileRequest, "pcc", desc="Profile request msg") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- APPEND_TRANSITION_COMMENT(" request: Timestamp: ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" Requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" Dest: ");
- APPEND_TRANSITION_COMMENT(in_msg.Destination);
- APPEND_TRANSITION_COMMENT(" PA: ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" Type: ");
- APPEND_TRANSITION_COMMENT(in_msg.Type);
- APPEND_TRANSITION_COMMENT(" Mode: ");
- APPEND_TRANSITION_COMMENT(in_msg.AccessMode);
- APPEND_TRANSITION_COMMENT(" PF: ");
- APPEND_TRANSITION_COMMENT(in_msg.Prefetch);
- }
- }
-
- //********************************END***************************
-
- action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
- if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- APPEND_TRANSITION_COMMENT(" AckCount: ");
- APPEND_TRANSITION_COMMENT(out_msg.AckCount);
- }
- }
- }
-
- // use DATA instead of L2_DATA because L1 doesn't need to wait for acks from L1 filters in this case
- action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:L2_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- // no ACKS needed because no possible conflicts
- out_msg.AckCount := 0;
- }
- }
- }
-
- action(f_sendInvToSharers, "fsi", desc="invalidate sharers for L2 replacement") {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:REPLACE;
- out_msg.Requestor := machineID;
- out_msg.Destination := L2cacheMemory[address].Sharers;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := L2cacheMemory[address].Sharers;
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- //also pass along timestamp
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" Sharers: ");
- APPEND_TRANSITION_COMMENT(L2cacheMemory[address].Sharers);
- }
- }
- }
-
- // OTHER ACTIONS
- action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
- check_allocate(L2_TBEs);
- L2_TBEs.allocate(address);
- L2_TBEs[address].L1_GetS_IDs.clear();
- L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
- L2_TBEs[address].Dirty := getL2CacheEntry(address).Dirty;
- L2_TBEs[address].pendingAcks := getL2CacheEntry(address).Sharers.count();
- }
-
- action(i_setTBEPhysicalAddress, "ia", desc="Sets the physical address field of the TBE"){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- L2_TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
- }
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
- L2_TBEs.deallocate(address);
- }
-
- action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
- profileMsgDelay(0, L1RequestIntraChipL2Network_in.dequeue_getDelayCycles());
- }
-
- action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
- profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
- }
-
-
- action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
- profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
- }
-
-
- action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
- peek(responseIntraChipL2Network_in, ResponseMsg) {
- getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
- getL2CacheEntry(address).Dirty := in_msg.Dirty;
- // reset the L2 miss bit
- getL2CacheEntry(address).L2Miss := false;
- }
- }
-
- // Sets the L2Miss bit in the L2 entry - indicates data was sourced from memory
- action(m_markL2MissBit, "mi", desc="Set the entry's L2 Miss bit") {
- getL2CacheEntry(address).L2Miss := true;
- }
-
- action(m_copyNackersIntoSharers, "mn", desc="Copy the NACKers list into our sharers list") {
- peek(L1unblockNetwork_in, ResponseMsg) {
- assert(in_msg.Nackers.count() > 0);
- getL2CacheEntry(address).Sharers.clear();
- // only need to copy into sharers list if we are in special state of "multicast" filter checks
- if(getL2CacheEntry(address).L2Miss == true){
- getL2CacheEntry(address).Sharers := in_msg.Nackers;
- APPEND_TRANSITION_COMMENT(" Unblocker: ");
- APPEND_TRANSITION_COMMENT(in_msg.Sender);
- APPEND_TRANSITION_COMMENT(" Nackers: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
- }
- }
- }
-
- action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
- getL2CacheEntry(address).Dirty := in_msg.Dirty;
- // reset the L2 miss bit
- getL2CacheEntry(address).L2Miss := false;
- }
- }
-
- action(q_updateAck, "q", desc="update pending ack count") {
- peek(responseIntraChipL2Network_in, ResponseMsg) {
- L2_TBEs[address].pendingAcks := L2_TBEs[address].pendingAcks - in_msg.AckCount;
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" p: ");
- APPEND_TRANSITION_COMMENT(L2_TBEs[address].pendingAcks);
- }
- }
-
- // For transactional memory. If received NACK instead of ACK
- action(q_updateNack, "qn", desc="update pending ack count") {
- peek(responseIntraChipL2Network_in, ResponseMsg) {
- // set flag indicating we have seen NACK
- L2_TBEs[address].nack := true;
- L2_TBEs[address].pendingAcks := L2_TBEs[address].pendingAcks - in_msg.AckCount;
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" p: ");
- APPEND_TRANSITION_COMMENT(L2_TBEs[address].pendingAcks);
- }
- }
-
- action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
- peek(responseIntraChipL2Network_in, ResponseMsg) {
- L2_TBEs[address].DataBlk := in_msg.DataBlk;
- L2_TBEs[address].Dirty := in_msg.Dirty;
- }
- }
-
-
- action(z_stall, "z", desc="Stall") {
- }
-
-
- action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- L2_TBEs[address].L1_GetS_IDs.add(in_msg.Requestor);
- }
- }
-
- action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- L2_TBEs[address].L1_GetX_ID := in_msg.Requestor;
- }
- }
-
- action(set_setMRU, "\set", desc="set the MRU entry") {
- L2cacheMemory.setMRU(address);
- }
-
- action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
- if (L2cacheMemory.isTagPresent(address) == false) {
- L2cacheMemory.allocate(address);
- }
- }
-
- action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- L2cacheMemory.deallocate(address);
- }
-
- action(t_sendWBAck, "t", desc="Send writeback ACK") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:WB_ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // upgrader doesn't get ack from itself, hence the + 1
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count() + 1;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
-
- // same as above, but send NACK instead of ACK
- action(ts_sendInvNackToUpgrader, "tsn", desc="Send NACK to upgrader") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:NACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // upgrader doesn't get ack from itself, hence the + 1
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count() + 1;
- }
- }
- }
-
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, L1CacheMachIDToProcessorNum(in_msg.Requestor));
- }
- }
-
- action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- // profile_request(in_msg.L1CacheStateStr, getStateStr(address), "NA", getCoherenceRequestTypeStr(in_msg.Type));
- }
- }
-
-
-
- action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- addSharer(address, in_msg.Requestor);
- APPEND_TRANSITION_COMMENT( getL2CacheEntry(address).Sharers );
- }
- }
-
- action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
- peek(L1unblockNetwork_in, ResponseMsg) {
- addSharer(address, in_msg.Sender);
- if (in_msg.RemoveLastOwnerFromDir == true) {
- // We do this to solve some races with PUTX
- APPEND_TRANSITION_COMMENT("Last owner removed, it was ");
- APPEND_TRANSITION_COMMENT(in_msg.LastOwnerID);
- L2cacheMemory[address].Sharers.remove(in_msg.LastOwnerID);
- assert(in_msg.LastOwnerID == L2cacheMemory[address].Exclusive);
- }
- }
- }
-
-
- action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- L2cacheMemory[address].Sharers.remove(in_msg.Requestor);
- }
- }
-
- action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
- L2cacheMemory[address].Sharers.clear();
- }
-
- action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
- peek(L1unblockNetwork_in, ResponseMsg) {
- if (in_msg.RemoveLastOwnerFromDir == true) {
- // We do this to solve some races with PUTX
- APPEND_TRANSITION_COMMENT(" Last owner removed, it was ");
- APPEND_TRANSITION_COMMENT(in_msg.LastOwnerID);
- assert(in_msg.LastOwnerID == L2cacheMemory[address].Exclusive);
- }
- L2cacheMemory[address].Sharers.clear();
- L2cacheMemory[address].Exclusive := in_msg.Sender;
- addSharer(address, in_msg.Sender);
- }
- }
-
- action(zz_recycleL1RequestQueue, "zz", desc="recycle L1 request queue") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:PUTX || in_msg.Type == CoherenceRequestType:PUTS) {
- if (L2cacheMemory.isTagPresent(in_msg.Address)) {
- getL2CacheEntry(in_msg.Address).L1PutsPending := getL2CacheEntry(in_msg.Address).L1PutsPending + 1;
- DEBUG_EXPR("RECYCLE PutSPending ");
- DEBUG_EXPR(getL2CacheEntry(in_msg.Address).L1PutsPending);
- DEBUG_EXPR(in_msg.Type);
- DEBUG_EXPR(in_msg.Requestor);
- }
- }
- }
- L1RequestIntraChipL2Network_in.recycle();
- }
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- /* Recycle while waiting for PUT */
- transition({PB_MT, PB_MT_IB, PB_SS}, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_GETS_ESCAPE, L1_GETX_ESCAPE, L1_GET_INSTR_ESCAPE, L2_Replacement, L2_Replacement_clean, L2_Replacement_XACT, L2_Replacement_clean_XACT}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition({IM, IS, ISS, SS_MB, M_MB, ISS_MB, IS_SSB, MT_MB, MT_IIB, MT_IB, MT_SB, M_SSB, SS_SSB},
- {L2_Replacement, L2_Replacement_clean, L2_Replacement_XACT, L2_Replacement_clean_XACT}) {
- zz_recycleL1RequestQueue;
- }
-
- transition({SS_MB, M_MB, ISS_MB, IS_SSB, MT_MB, MT_IIB, MT_IB, MT_SB, M_SSB, SS_SSB},
- {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_GETS_ESCAPE, L1_GETX_ESCAPE, L1_GET_INSTR_ESCAPE}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition({NP, SS, M, M_I, MT_I, MCT_I, I_I, S_I, ISS, IS, IM, /*SS_MB,*/ SS_SSB, /* MT_MB, M_MB, ISS_MB,*/ IS_SSB, M_SSB, /*MT_IIB, */MT_IB/*, MT_SB*/}, {L1_PUTX,L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // occurs when L2 replacement raced with L1 replacement, and L2 finished its replacement first
- transition({NP, M_I, MCT_I, I_I, S_I, IS, ISS, IM, SS, M, MT, IS_SSB, MT_IB, M_SSB, SS_SSB}, {L1_PUTX_old, L1_PUTS_old}){
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- // PUT from current (last) exclusive owner, that was replacing the line when it received Fwd req
- transition(MT_I, {L1_PUTX_old, L1_PUTS_old}) {
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- transition({SS, M, MT}, {L1_PUT_PENDING}) { // L1_PUT_ msg pending for the block, don't accept new requests until PUT is processed */
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- //===============================================
- // BASE STATE - I
-
- // Transitions from I (Idle)
-
- // When L2 doesn't have block, need to send broadcasst to all L1s to check appropriate filter(s)
- transition(NP, L1_GETS, ISS) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- // will mark as exclusive when we get unblocked with success
- //nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- ss_recordGetSL1ID;
- a_issueFetchToMemory;
- // for correctness we need to query both read + write filters
- a_checkL1ReadWriteFiltersExceptRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- // no need to check filters, send accumulated ACK to requestor
- transition(NP, L1_GETS_ESCAPE, ISS) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- // will mark as exclusive when we get unblocked with success
- //nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- ss_recordGetSL1ID;
- a_issueFetchToMemory;
- // send accumulated ACK
- f_sendAccumulatedAckToRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- transition(NP, L1_GET_INSTR, IS) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- ss_recordGetSL1ID;
- a_issueFetchToMemory;
- // for correctness query the read + write filters
- a_checkL1ReadWriteFiltersExceptRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- // no need to query filters, send accumluated ACK to requestor
- transition(NP, L1_GET_INSTR_ESCAPE, IS) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- ss_recordGetSL1ID;
- a_issueFetchToMemory;
- // send accumulated ACK
- f_sendAccumulatedAckToRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- transition(NP, L1_GETX, IM) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- // nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- xx_recordGetXL1ID;
- a_issueFetchToMemory;
- // also query the L1 write and read filters
- a_checkL1ReadWriteFiltersExceptRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- // don't check filters
- transition(NP, L1_GETX_ESCAPE, IM) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- // nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- xx_recordGetXL1ID;
- a_issueFetchToMemory;
- // send accumulated ACK to requestor
- f_sendAccumulatedAckToRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
-
- // transitions from IS/IM
-
- // force L1s to respond success or failure
- transition(ISS, Mem_Data, ISS_MB){
- m_writeDataToCache;
- m_markL2MissBit;
- // send exclusive data but force L1 to wait for filter responses
- f_sendExclusiveDataToGetSRequestor;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(IS, Mem_Data, IS_SSB){
- m_writeDataToCache;
- m_markL2MissBit;
- // send data but force L1 to wait for filter responses
- f_sendDataToGetSRequestor;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(IM, Mem_Data, ISS_MB){
- m_writeDataToCache;
- m_markL2MissBit;
- // send data but force L1 to wait for filter responses
- f_sendDataToGetXRequestor;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // disallow grouping of requestors. There is a correctness problem if we check the wrong
- // filters as indicated by the original requestor.
- transition({IS, ISS}, {L1_GETX, L1_GETS, L1_GET_INSTR, L1_GETX_ESCAPE, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition(IM, {L1_GETX, L1_GETS, L1_GET_INSTR, L1_GETX_ESCAPE, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // transitions from SS
- transition(SS, {L1_GETS, L1_GET_INSTR, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}, SS_SSB) {
- p_profileRequest;
- f_profileRequestor;
- ds_sendSharedDataToRequestor;
- nn_addSharer;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // For isolation the L1 filters might return NACKs to the requestor
- transition(SS, L1_GETX, SS_MB) {
- p_profileRequest;
- f_profileRequestor;
- d_sendDataToRequestor;
- fwm_sendFwdInvToSharersMinusRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // send special INV to sharers - they have to invalidate
- transition(SS, L1_GETX_ESCAPE, SS_MB) {
- p_profileRequest;
- f_profileRequestor;
- d_sendDataToRequestor;
- fwm_sendFwdInvEscapeToSharersMinusRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // For isolation the L1 filters might return NACKs to the requestor
- transition(SS, L1_UPGRADE, SS_MB) {
- f_profileRequestor;
- fwm_sendFwdInvToSharersMinusRequestor;
- ts_sendInvAckToUpgrader;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(SS, L2_Replacement_clean, I_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(SS, L2_Replacement_clean_XACT, I_I) {
- q_profileOverflow;
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(SS, L2_Replacement, S_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(SS, L2_Replacement_XACT, S_I) {
- q_profileOverflow;
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- // Transitions from M
-
- // send data, but force L1 to wait for filter responses
- transition(M, L1_GETS, M_MB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendExclusiveDataToRequestor;
- // selective filter checks, but need to check both read+write in case nackers put NP block into M state
- a_checkNackerL1ReadWriteFiltersExceptRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // don't care about filters
- transition(M, L1_GETS_ESCAPE, M_MB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendExclusiveDataToRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(M, L1_GET_INSTR, M_SSB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendDataToRequestor;
- // NEW - selective filter checks, but need to check both read+write in case nackers put NP block into M state
- a_checkNackerL1ReadWriteFiltersExceptRequestor;
- // This should always be _after_ f_sendDataToRequestor and a_checkNackerL1WriteFiltersExceptRequestor, since they
- // explicitly look at the sharers list!
- nn_addSharer;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // don't care about filters
- transition(M, L1_GET_INSTR_ESCAPE, M_SSB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendDataToRequestor;
- nn_addSharer;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(M, L1_GETX, M_MB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendDataToRequestor;
- // selective filter checks
- a_checkNackerL1ReadWriteFiltersExceptRequestor;
- // issue filter checks
- //a_checkL1ReadWriteFiltersExceptRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // don't care about filters
- transition(M, L1_GETX_ESCAPE, M_MB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendDataToRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(M, L2_Replacement, M_I) {
- i_allocateTBE;
- c_exclusiveReplacement;
- rr_deallocateL2CacheBlock;
- }
-
- transition(M, L2_Replacement_clean, M_I) {
- rr_deallocateL2CacheBlock;
- }
-
- transition(M, L2_Replacement_XACT, M_I) {
- q_profileOverflow;
- i_allocateTBE;
- c_exclusiveReplacement;
- rr_deallocateL2CacheBlock;
- }
-
- transition(M, L2_Replacement_clean_XACT, M_I) {
- q_profileOverflow;
- rr_deallocateL2CacheBlock;
- }
-
-
- // transitions from MT
- transition(MT, {L1_GETX, L1_GETX_ESCAPE}, MT_MB) {
- p_profileRequest;
- f_profileRequestor;
- b_forwardRequestToExclusive;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
-
- transition(MT, {L1_GETS, L1_GET_INSTR, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}, MT_IIB) {
- p_profileRequest;
- f_profileRequestor;
- b_forwardRequestToExclusive;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(MT, L2_Replacement, MT_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(MT, L2_Replacement_clean, MCT_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(MT, L2_Replacement_XACT, MT_I) {
- q_profileOverflow;
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(MT, L2_Replacement_clean_XACT, MCT_I) {
- q_profileOverflow;
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(MT, L1_PUTX, M) {
- f_profileRequestor;
- // this doesn't affect exlusive ptr
- ll_clearSharers;
- mr_writeDataToCacheFromRequest;
- t_sendWBAck;
- jj_popL1RequestQueue;
- }
-
- // This is for the case of transactional read line in E state being replaced from L1. We need to maintain isolation on this
- // in the event of a future transactional store from another proc, so we maintain this transactional sharer on the list
- transition(MT, L1_PUTS, SS) {
- f_profileRequestor;
- ll_clearSharers;
- // maintain transactional read isolation
- nn_addSharer;
- mr_writeDataToCacheFromRequest;
- t_sendWBAck;
- jj_popL1RequestQueue;
- }
-
- // transitions from blocking states
- transition(SS_MB, Unblock_Cancel, SS) {
- k_popUnblockQueue;
- }
-
- transition(M_SSB, Unblock_Cancel, M) {
- ll_clearSharers;
- // copy NACKers list from unblock message to our sharers list
- m_copyNackersIntoSharers;
- k_popUnblockQueue;
- }
-
- transition(MT_MB, Unblock_Cancel, MT) {
- k_popUnblockQueue;
- }
-
- transition(MT_IB, Unblock_Cancel, MT) {
- k_popUnblockQueue;
- }
-
- transition(MT_IIB, Unblock_Cancel, MT){
- k_popUnblockQueue;
- }
-
- // L2 just got the data from memory, but we have Nackers. We can let nacked block reside in M, but GETS request needs to check read+write
- // signatures to avoid atomicity violations.
- transition({ISS_MB, IS_SSB}, Unblock_Cancel, M){
- //rr_deallocateL2CacheBlock;
- // copy NACKers list from unblock message to our sharers list
- m_copyNackersIntoSharers;
- k_popUnblockQueue;
- }
-
- transition(M_MB, Unblock_Cancel, M) {
- // copy NACKers list from unblock message to our sharers list
- m_copyNackersIntoSharers;
- k_popUnblockQueue;
- }
-
- transition(SS_MB, Exclusive_Unblock, MT) {
- // update actual directory
- mmu_markExclusiveFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- // PUT from next exclusive surpassed its own ExclusiveUnblock
- // Perceived as PUTX_old because the directory is outdated
- transition(SS_MB, {L1_PUTX_old, L1_PUTS_old}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // PUT from current (old) exclusive, can't do anything with it in this state
- // Don't know whether exclusive was replacing or not, so wait to see what Unblock says
- transition(SS_MB, {L1_PUTX, L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // Next exclusive informs that last owner was replacing the line when it received Fwd req
- // Thus, expect a PUTX_old from previous owner
- transition(SS_MB, Exclusive_Unblock_WaitPUTold, PB_MT) {
- // update actual directory
- mmu_markExclusiveFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- transition(PB_MT, {L1_PUTX_old, L1_PUTS_old}, MT) { // OK, PUT_old received, go to MT
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- // PUT from current (next) exclusive, so recycle
- // Expecting PUT_old, won't take in new PUT until previous PUT arrives
- transition(PB_MT, {L1_PUTX, L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // L2 blocks on GETS requests in SS state
- transition(SS_SSB, Unblock, SS) {
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- transition({M_SSB, IS_SSB}, Unblock, SS) {
- // we already added the sharer when we received original request
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- transition({M_MB, MT_MB, ISS_MB}, Exclusive_Unblock, MT) {
- // update actual directory
- mmu_markExclusiveFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- transition({M_MB, MT_MB, ISS_MB}, Exclusive_Unblock_WaitPUTold, PB_MT) {
- // update actual directory
- mmu_markExclusiveFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- // PUT from (not yet) next exclusive surpassed its own ExclusiveUnblock
- // thus became PUTX_old (since directory is not up-to-date)
- transition({M_MB, MT_MB, ISS_MB}, {L1_PUTX_old, L1_PUTS_old}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // PUT from current (previous) owner: recycle until unblock arrives
- // We don't know whether replacing cache is waiting for WB_Ack or it was replacing when fwd arrived
- transition({M_MB, MT_MB, ISS_MB}, {L1_PUTX, L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // L1 requestor received data from exclusive L1, but writeback data from exclusive L1 hasn't arrived yet
- transition(MT_IIB, Unblock, MT_IB) {
- nnu_addSharerFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- // PUT from current (previous) owner: recycle
- // We don't know whether replacing cache is waiting for WB_Ack or it was replacing when fwd arrived
- transition(MT_IIB, {L1_PUTX, L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition(MT_IB, {WB_Data, WB_Data_clean}, SS) {
- m_writeDataToCache;
- o_popIncomingResponseQueue;
- }
-
- // PUT from (not yet) next exclusive, but unblock hasn't arrived yet, so it became PUT_old: recycle
- transition(MT_IIB, {L1_PUTX_old, L1_PUTS_old}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition(MT_IIB, Unblock_WaitPUTold, PB_MT_IB) { // Now arrives Unblock, wait for PUT and WB_Data
- nnu_addSharerFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- // L1 requestor has not received data from exclusive L1, but we received writeback data from exclusive L1
- transition(MT_IIB, {WB_Data, WB_Data_clean}, MT_SB) {
- m_writeDataToCache;
- o_popIncomingResponseQueue;
- }
-
- // PUT_old from previous owner, that was replacing when it received Fwd req
- transition(PB_MT_IB, {L1_PUTX_old, L1_PUTS_old}, MT_IB) { // Go to MT_IB, and wait for WB_Data
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- transition(PB_MT_IB, {L1_PUTX, L1_PUTS}) { // Waiting for PUT_old, don't take new PUT in
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // WB_data from previous owner, we already received unblock, just wait for PUT_old to go to SS
- transition(PB_MT_IB, {WB_Data, WB_Data_clean}, PB_SS) { // Received Unblock, now arrives WB_Data, wait for PUT
- m_writeDataToCache;
- o_popIncomingResponseQueue;
- }
-
- transition(PB_SS, {L1_PUTX_old, L1_PUTS_old}, SS) { // Received Unblock and WB_Data, now arrives PUT, go to SS
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- // PUT from new exclusive owner, while waiting for PUT from previous exclusive owner: recycle
- transition(PB_SS, {L1_PUTX, L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition(MT_SB, Unblock, SS) {
- nnu_addSharerFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- transition(MT_SB, Unblock_WaitPUTold, PB_SS) { // Received WB_Data, now arriving Unblock, wait for PUT
- nnu_addSharerFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- // PUT from (not yet) new exclusive owner, before we receive Unblock from it (became PUT_old because directory is not up-to-date)
- transition(MT_SB, {L1_PUTX_old, L1_PUTS_old}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // PUT from current (last) exclusive owner, that was replacing the line when it received Fwd req
- transition(MT_SB, {L1_PUTX, L1_PUTS}) {
- kk_removeRequestSharer; // When Unblock arrives, it'll trigger Unblock, not Unblock_WaitPUTold
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- // writeback states
- transition({I_I, S_I, MT_I, MCT_I, M_I}, {L1_GETX, L1_UPGRADE, L1_GETS, L1_GET_INSTR, L1_GETX_ESCAPE, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition(I_I, Ack) {
- q_updateAck;
- o_popIncomingResponseQueue;
- }
-
- transition(I_I, Ack_all, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition({MT_I, MCT_I}, WB_Data, M_I) {
- qq_writeDataToTBE;
- ct_exclusiveReplacementFromTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(MCT_I, WB_Data_clean, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // L1 never changed Dirty data
- transition(MT_I, Ack_all, M_I) {
- ct_exclusiveReplacementFromTBE;
- o_popIncomingResponseQueue;
- }
-
- // clean data that L1 exclusive never wrote
- transition(MCT_I, Ack_all, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(MT_I, WB_Data_clean, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(S_I, Ack) {
- q_updateAck;
- o_popIncomingResponseQueue;
- }
-
- transition(S_I, Ack_all, M_I) {
- ct_exclusiveReplacementFromTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(M_I, Mem_Ack, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-}
-
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-mem.sm b/src/mem/protocol/MESI_CMP_filter_directory-mem.sm
deleted file mode 100644
index 1fcd234fe..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory-mem.sm
+++ /dev/null
@@ -1,166 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
- */
-
-
-machine(Directory, "Token protocol") {
-
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
- MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
- MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
-
- // STATES
- enumeration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, desc="Owner";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- Fetch, desc="A GETX arrives";
- Data, desc="A GETS arrives";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...") {
- DataBlock DataBlk, desc="data for the block";
- }
-
- external_type(DirectoryMemory) {
- Entry lookup(Address);
- bool isPresent(Address);
- }
-
-
- // ** OBJECTS **
-
- DirectoryMemory directory, constructor_hack="i";
-
- State getState(Address addr) {
- return State:I;
- }
-
- void setState(Address addr, State state) {
- }
-
- // ** OUT_PORTS **
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
-
- // ** IN_PORTS **
-
- in_port(requestNetwork_in, RequestMsg, requestToDir) {
- if (requestNetwork_in.isReady()) {
- peek(requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:Fetch, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:Fetch, in_msg.Address);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(responseNetwork_in, ResponseMsg, responseToDir) {
- if (responseNetwork_in.isReady()) {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Data, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- // Actions
- action(a_sendAck, "a", desc="Send ack to L2") {
- peek(responseNetwork_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Sender);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(d_sendData, "d", desc="Send data to requestor") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue();
- }
-
- action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
- responseNetwork_in.dequeue();
- }
-
- action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- directory[in_msg.Address].DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
- }
- }
-
- // TRANSITIONS
-
- transition(I, Fetch) {
- d_sendData;
- j_popIncomingRequestQueue;
- }
-
- transition(I, Data) {
- m_writeDataToMemory;
- a_sendAck;
- k_popIncomingResponseQueue;
- }
-}
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-msg.sm b/src/mem/protocol/MESI_CMP_filter_directory-msg.sm
deleted file mode 100644
index a888e2450..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory-msg.sm
+++ /dev/null
@@ -1,153 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: MSI_MOSI_CMP_directory-msg.sm 1.5 05/01/19 15:48:37-06:00 mikem@royal16.cs.wisc.edu $
- *
- */
-
-// CoherenceRequestType
-enumeration(CoherenceRequestType, desc="...") {
- GETX, desc="Get eXclusive";
- GETX_ESCAPE, desc="Get eXclusive, while in escape action";
- UPGRADE, desc="UPGRADE to exclusive";
- GETS, desc="Get Shared";
- GETS_ESCAPE, desc="Get Shared, while in escape action";
- GET_INSTR, desc="Get Instruction";
- GET_INSTR_ESCAPE, desc="Get Instruction, while in escape action";
- INV, desc="INValidate, could be NACKed";
- INV_ESCAPE, desc="INValidate, cannot be NACKed";
- PUTX, desc="replacement message, for writeback to lower caches";
- PUTS, desc="clean replacement message, for writeback to lower caches";
- REPLACE, desc="replacement message, from lowest cache";
- CHECK_WRITE_FILTER, desc="check write filter message";
- CHECK_READ_WRITE_FILTER, desc="check both read and write filters message";
-}
-
-// CoherenceResponseType
-enumeration(CoherenceResponseType, desc="...") {
- MEMORY_ACK, desc="Ack from memory controller";
- DATA, desc="Data";
- DATA_EXCLUSIVE, desc="Data";
- L2_DATA, desc="data from L2, in shared mode";
- L2_DATA_EXCLUSIVE, desc="data from L2, in exclusive mode";
- MEMORY_DATA, desc="Data";
- ACK, desc="Generic invalidate ack";
- NACK, desc="NACK used to maintain transactional isolation";
- WB_ACK, desc="writeback ack";
- UNBLOCK, desc="unblock";
- EXCLUSIVE_UNBLOCK, desc="exclusive unblock";
- UNBLOCK_CANCEL, desc="unblock when trans. request fails";
-}
-
-// RequestMsg
-structure(RequestMsg, desc="...", interface="NetworkMessage") {
- Address Address, desc="Line address for this request";
- Address PhysicalAddress, desc="Physical address for this request";
- CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
- AccessModeType AccessMode, desc="user/supervisor access type";
- MachineID Requestor , desc="What component request";
- NetDest Destination, desc="What components receive the request, includes MachineType and num";
- MessageSizeType MessageSize, desc="size category of the message";
- DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
- bool Dirty, default="false", desc="Dirty bit";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
- uint64 Timestamp, desc="TLR-like Timestamp";
-}
-
-// ResponseMsg
-structure(ResponseMsg, desc="...", interface="NetworkMessage") {
- Address Address, desc="Line address for this request";
- Address PhysicalAddress, desc="Physical address for this request";
- CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
- MachineID Sender, desc="What component sent the data";
- NetDest Destination, desc="Node to whom the data is sent";
- DataBlock DataBlk, desc="Data for the cache line";
- bool Dirty, default="false", desc="Dirty bit";
- int AckCount, default="0", desc="number of acks in this message";
- MessageSizeType MessageSize, desc="size category of the message";
- uint64 Timestamp, desc="TLR-like Timestamp";
- NetDest Nackers, desc="The nodes which sent NACKs to requestor";
- bool Transactional, desc="Whether this address was transactional";
- bool RemoveLastOwnerFromDir, desc="To solve some races with PUTX/GETS";
- MachineID LastOwnerID, desc="What component sent the data";
-}
-
-// TriggerType
-enumeration(TriggerType, desc="...") {
- ALL_ACKS, desc="When all acks/nacks have been received";
-}
-
-// TriggerMsg
-structure(TriggerMsg, desc="...", interface="Message") {
- Address Address, desc="Line address for this request";
- Address PhysicalAddress, desc="Physical address for this request";
- TriggerType Type, desc="Type of trigger";
-}
-
-/*
- GETX, desc="Get eXclusive";
- UPGRADE, desc="UPGRADE to exclusive";
- GETS, desc="Get Shared";
- GET_INSTR, desc="Get Instruction";
- INV, desc="INValidate";
- PUTX, desc="replacement message, for writeback to lower caches";
- REPLACE, desc="replacement message, from lowest cache";
- CHECK_WRITE_FILTER, desc="check write filter message";
- CHECK_READ_WRITE_FILTER, desc="check both read and write filters message";
-*/
-
-GenericRequestType convertToGenericType(CoherenceRequestType type) {
- if(type == CoherenceRequestType:PUTX) {
- return GenericRequestType:PUTX;
- } else if(type == CoherenceRequestType:GETS) {
- return GenericRequestType:GETS;
- } else if(type == CoherenceRequestType:GETS_ESCAPE) {
- return GenericRequestType:GETS;
- } else if(type == CoherenceRequestType:GET_INSTR) {
- return GenericRequestType:GET_INSTR;
- } else if(type == CoherenceRequestType:GET_INSTR_ESCAPE) {
- return GenericRequestType:GET_INSTR;
- } else if(type == CoherenceRequestType:GETX) {
- return GenericRequestType:GETX;
- } else if(type == CoherenceRequestType:GETX_ESCAPE) {
- return GenericRequestType:GETX;
- } else if(type == CoherenceRequestType:UPGRADE) {
- return GenericRequestType:UPGRADE;
- } else if(type == CoherenceRequestType:INV) {
- return GenericRequestType:INV;
- } else if( type == CoherenceRequestType:REPLACE) {
- return GenericRequestType:REPLACEMENT;
- } else {
- DEBUG_EXPR(type);
- error("invalid CoherenceRequestType");
- }
-}
-
-
diff --git a/src/mem/protocol/MESI_CMP_filter_directory.slicc b/src/mem/protocol/MESI_CMP_filter_directory.slicc
deleted file mode 100644
index 715da5795..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory.slicc
+++ /dev/null
@@ -1,7 +0,0 @@
-../protocols/LogTM.sm
-../protocols/MESI_CMP_filter_directory-msg.sm
-../protocols/MESI_CMP_filter_directory-L2cache.sm
-../protocols/MESI_CMP_filter_directory-L1cache.sm
-../protocols/MESI_CMP_filter_directory-mem.sm
-../protocols/standard_CMP-protocol.sm
-
diff --git a/src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm b/src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm
deleted file mode 100644
index 2f8818489..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm
+++ /dev/null
@@ -1,250 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
- */
-
-// This file is copied from Yasuko Watanabe's prefetch / memory protocol
-// Copied here by aep 12/14/07
-
-
-machine(Directory, "MESI_CMP_filter_directory protocol") {
-
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
- MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
- MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
-
- // STATES
- enumeration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, desc="Owner";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- Fetch, desc="A memory fetch arrives";
- Data, desc="writeback data arrives";
- Memory_Data, desc="Fetched data from memory arrives";
- Memory_Ack, desc="Writeback Ack from memory arrives";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...") {
- DataBlock DataBlk, desc="data for the block";
- }
-
- external_type(DirectoryMemory) {
- Entry lookup(Address);
- bool isPresent(Address);
- }
-
- // to simulate detailed DRAM
- external_type(MemoryControl, inport="yes", outport="yes") {
-
- }
-
-
- // ** OBJECTS **
-
- DirectoryMemory directory, constructor_hack="i";
- MemoryControl memBuffer, constructor_hack="i";
-
- State getState(Address addr) {
- return State:I;
- }
-
- void setState(Address addr, State state) {
- }
-
- bool isGETRequest(CoherenceRequestType type) {
- return (type == CoherenceRequestType:GETS) ||
- (type == CoherenceRequestType:GET_INSTR) ||
- (type == CoherenceRequestType:GETX);
- }
-
-
- // ** OUT_PORTS **
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
- out_port(memQueue_out, MemoryMsg, memBuffer);
-
- // ** IN_PORTS **
-
- in_port(requestNetwork_in, RequestMsg, requestToDir) {
- if (requestNetwork_in.isReady()) {
- peek(requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (isGETRequest(in_msg.Type)) {
- trigger(Event:Fetch, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg);
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(responseNetwork_in, ResponseMsg, responseToDir) {
- if (responseNetwork_in.isReady()) {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Data, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
- if (memQueue_in.isReady()) {
- peek(memQueue_in, MemoryMsg) {
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.Address);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
-
-
- // Actions
- action(a_sendAck, "a", desc="Send ack to L2") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(d_sendData, "d", desc="Send data to requestor") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue();
- }
-
- action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
- responseNetwork_in.dequeue();
- }
-
- action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
- }
-
- action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
-
- DEBUG_EXPR(out_msg);
- }
- }
- }
-
- action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
- peek(responseNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Sender;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := in_msg.Prefetch;
-
- DEBUG_EXPR(out_msg);
- }
- }
- }
-
- action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- directory[in_msg.Address].DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
- }
- }
-
- // TRANSITIONS
-
- transition(I, Fetch) {
- //d_sendData;
- qf_queueMemoryFetchRequest;
- j_popIncomingRequestQueue;
- }
-
- transition(I, Data) {
- m_writeDataToMemory;
- //a_sendAck;
- qw_queueMemoryWBRequest;
- k_popIncomingResponseQueue;
- }
-
- transition(I, Memory_Data) {
- d_sendData;
- l_popMemQueue;
- }
-
- transition(I, Memory_Ack) {
- a_sendAck;
- l_popMemQueue;
- }
-}
diff --git a/src/mem/protocol/MESI_CMP_filter_directory_m.slicc b/src/mem/protocol/MESI_CMP_filter_directory_m.slicc
deleted file mode 100644
index 43c9d4019..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory_m.slicc
+++ /dev/null
@@ -1,7 +0,0 @@
-../protocols/LogTM.sm
-../protocols/MESI_CMP_filter_directory-msg.sm
-../protocols/MESI_CMP_filter_directory-L2cache.sm
-../protocols/MESI_CMP_filter_directory-L1cache.sm
-../protocols/MESI_CMP_filter_directory_m-mem.sm
-../protocols/standard_CMP-protocol.sm
-