/*
 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are
 * met: redistributions of source code must retain the above copyright
 * notice, this list of conditions and the following disclaimer;
 * redistributions in binary form must reproduce the above copyright
 * notice, this list of conditions and the following disclaimer in the
 * documentation and/or other materials provided with the distribution;
 * neither the name of the copyright holders nor the names of its
 * contributors may be used to endorse or promote products derived from
 * this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/*
 * $Id$
 *
 */

machine(L2Cache, "Token protocol") 
 : CacheMemory * L2cacheMemory,
   int N_tokens,
   int l2_request_latency = 10,
   int l2_response_latency = 10,
   bool filtering_enabled = true
{

  // L2 BANK QUEUES
  // From local bank of L2 cache TO the network

  // this L2 bank -> a local L1 || mod-directory
  MessageBuffer responseFromL2Cache, network="To", virtual_network="1", ordered="false";  
  // this L2 bank -> mod-directory
  MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="3", ordered="false";  
  // this L2 bank -> a local L1
  MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="4", ordered="false";  


  // FROM the network to this local bank of L2 cache

  // a local L1 || mod-directory -> this L2 bank
  MessageBuffer responseToL2Cache, network="From", virtual_network="1", ordered="false";  
  MessageBuffer persistentToL2Cache, network="From", virtual_network="2", ordered="true";
  // mod-directory -> this L2 bank
  MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="3", ordered="false";  
  // a local L1 -> this L2 bank
  MessageBuffer L1RequestToL2Cache, network="From", virtual_network="4", ordered="false";  

  // STATES
  enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
    // Base states
    NP, desc="Not Present";
    I, desc="Idle";
    S, desc="Shared, not present in any local L1s";
    O, desc="Owned, not present in any L1s";
    M, desc="Modified, not present in any L1s";

    // Locked states
    I_L,  "I^L",   desc="Invalid, Locked";
    S_L,  "S^L",   desc="Shared, Locked";
  }

  // EVENTS
  enumeration(Event, desc="Cache events") {

    // Requests
    L1_GETS,             desc="local L1 GETS request";
    L1_GETS_Last_Token,    desc="local L1 GETS request";
    L1_GETX,             desc="local L1 GETX request";
    L1_INV,              desc="L1 no longer has tokens";
    Transient_GETX,      desc="A GetX from another processor";
    Transient_GETS,      desc="A GetS from another processor";
    Transient_GETS_Last_Token,   desc="A GetS from another processor";

    // events initiated by this L2
    L2_Replacement,     desc="L2 Replacement", format="!r";

    // events of external L2 responses

    // Responses
    Writeback_Tokens,               desc="Received a writeback from L1 with only tokens (no data)";
    Writeback_Shared_Data,               desc="Received a writeback from L1 that includes clean data";
    Writeback_All_Tokens,    desc="Received a writeback from L1";
    Writeback_Owned,                desc="Received a writeback from L1";


    Data_Shared,             desc="Received a data message, we are now a sharer";
    Data_Owner,              desc="Received a data message, we are now the owner";
    Data_All_Tokens,   desc="Received a data message, we are now the owner, we now have all the tokens";
    Ack,                     desc="Received an ack message";
    Ack_All_Tokens,          desc="Received an ack message, we now have all the tokens";

    // Lock/Unlock
    Persistent_GETX,     desc="Another processor has priority to read/write";
    Persistent_GETS,     desc="Another processor has priority to read";
    Own_Lock_or_Unlock,  desc="This processor now has priority";
  }

  // TYPES

  // CacheEntry
  structure(Entry, desc="...", interface="AbstractCacheEntry") {
    State CacheState,        desc="cache state";
    bool Dirty,              desc="Is the data dirty (different than memory)?";
    int Tokens,              desc="The number of tokens we're holding for the line";
    DataBlock DataBlk,       desc="data for the block";
  }

  structure(DirEntry, desc="...") {
    Set Sharers,            desc="Set of the internal processors that want the block in shared state";
    bool exclusive, default="false", desc="if local exclusive is likely";
  }

  external_type(PerfectCacheMemory) {
    void allocate(Address);
    void deallocate(Address);
    DirEntry lookup(Address);
    bool isTagPresent(Address);
  }

  external_type(PersistentTable) {
    void persistentRequestLock(Address, MachineID, AccessType);
    void persistentRequestUnlock(Address, MachineID);
    MachineID findSmallest(Address);
    AccessType typeOfSmallest(Address);
    void markEntries(Address);
    bool isLocked(Address);
    int countStarvingForAddress(Address);
    int countReadStarvingForAddress(Address);
  }

  PersistentTable persistentTable;
  PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";

  Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
    if (L2cacheMemory.isTagPresent(addr)) {
      return static_cast(Entry, L2cacheMemory[addr]);
    }
    assert(false);
    return static_cast(Entry, L2cacheMemory[addr]);
  }

  int getTokens(Address addr) {
    if (L2cacheMemory.isTagPresent(addr)) {
      return getL2CacheEntry(addr).Tokens;
    } else {
      return 0;
    }
  }

  void changePermission(Address addr, AccessPermission permission) {
    if (L2cacheMemory.isTagPresent(addr)) {
      return L2cacheMemory.changePermission(addr, permission);
    }
  }

  bool isCacheTagPresent(Address addr) {
    return (L2cacheMemory.isTagPresent(addr) );
  }

  State getState(Address addr) {
    if (isCacheTagPresent(addr)) {
      return getL2CacheEntry(addr).CacheState;
    } else if (persistentTable.isLocked(addr) == true) {
      return State:I_L;
    } else {
      return State:NP;
    }
  }

  string getStateStr(Address addr) {
    return L2Cache_State_to_string(getState(addr));
  }

  void setState(Address addr, State state) {


    if (isCacheTagPresent(addr)) {
      // Make sure the token count is in range
      assert(getL2CacheEntry(addr).Tokens >= 0);
      assert(getL2CacheEntry(addr).Tokens <= max_tokens());

      // Make sure we have no tokens in L
      if ((state == State:I_L) ) {
        if (isCacheTagPresent(addr)) {
          assert(getL2CacheEntry(addr).Tokens == 0);
        }
      }

      // in M and E you have all the tokens
      if (state == State:M ) {
        assert(getL2CacheEntry(addr).Tokens == max_tokens());
      }

      // in NP you have no tokens
      if (state == State:NP) {
        assert(getL2CacheEntry(addr).Tokens == 0);
      }

      // You have at least one token in S-like states
      if (state == State:S ) {
        assert(getL2CacheEntry(addr).Tokens > 0);
      }

      // You have at least half the token in O-like states
      if (state == State:O ) {
        assert(getL2CacheEntry(addr).Tokens >= 1); // Must have at least one token
      //  assert(getL2CacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
      }

      getL2CacheEntry(addr).CacheState := state;

      // Set permission
      if (state == State:I) {
        changePermission(addr, AccessPermission:Invalid);
      } else if (state == State:S || state == State:O ) {
        changePermission(addr, AccessPermission:Read_Only);
      } else if (state == State:M ) {
        changePermission(addr, AccessPermission:Read_Write);
      } else {
        changePermission(addr, AccessPermission:Invalid);
      }
    }
  }

  void removeSharer(Address addr, NodeID id) {

    if (localDirectory.isTagPresent(addr)) {
      localDirectory[addr].Sharers.remove(id);
      if (localDirectory[addr].Sharers.count() == 0) {
        localDirectory.deallocate(addr);
      }
    }
  }

  bool sharersExist(Address addr) {
    if (localDirectory.isTagPresent(addr)) {
      if (localDirectory[addr].Sharers.count() > 0) {
        return true;
      }
      else {
        return false;
      }
    }
    else {
      return false;
    }
  }

  bool exclusiveExists(Address addr) {
    if (localDirectory.isTagPresent(addr)) {
      if (localDirectory[addr].exclusive == true) {
        return true;
      }
      else {
        return false;
      }
    }
    else {
      return false;
    }
  }

  // assumes that caller will check to make sure tag is present
  Set getSharers(Address addr) {
    return localDirectory[addr].Sharers;
  }

  void setNewWriter(Address addr, NodeID id) {
    if (localDirectory.isTagPresent(addr) == false) {
      localDirectory.allocate(addr);
    }
    localDirectory[addr].Sharers.clear();
    localDirectory[addr].Sharers.add(id);
    localDirectory[addr].exclusive := true;
  }

  void addNewSharer(Address addr, NodeID id) {
    if (localDirectory.isTagPresent(addr) == false) {
      localDirectory.allocate(addr);
    }
    localDirectory[addr].Sharers.add(id);
    // localDirectory[addr].exclusive := false;
  }

  void clearExclusiveBitIfExists(Address addr) {
    if (localDirectory.isTagPresent(addr) == true) {
      localDirectory[addr].exclusive := false;
    }
  }

  // ** OUT_PORTS **
  out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
  out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
  out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);



  // ** IN_PORTS **

  // Persistent Network
  in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
    if (persistentNetwork_in.isReady()) {
      peek(persistentNetwork_in, PersistentMsg) {
        assert(in_msg.Destination.isElement(machineID));

        if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
          persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
        } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
          persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
        } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
          persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
        } else {
          error("Unexpected message");
        }

        // React to the message based on the current state of the table
        if (persistentTable.isLocked(in_msg.Address)) {

          if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
            trigger(Event:Persistent_GETS, in_msg.Address);
          } else {
            trigger(Event:Persistent_GETX, in_msg.Address);
          }
        }
        else {
            trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
        }
      }
    }
  }


  // Request Network
  in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
    if (requestNetwork_in.isReady()) {
      peek(requestNetwork_in, RequestMsg) {
        assert(in_msg.Destination.isElement(machineID));

        if (in_msg.Type == CoherenceRequestType:GETX) {
            trigger(Event:Transient_GETX, in_msg.Address);
        } else if (in_msg.Type == CoherenceRequestType:GETS) {
          if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
            trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
          }
          else {
            trigger(Event:Transient_GETS, in_msg.Address);
          }
        } else {
          error("Unexpected message");
        }
      }
    }
  }

  in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
    if (L1requestNetwork_in.isReady()) {
      peek(L1requestNetwork_in, RequestMsg) {
        assert(in_msg.Destination.isElement(machineID));
        if (in_msg.Type == CoherenceRequestType:GETX) {
          trigger(Event:L1_GETX, in_msg.Address);
        } else if (in_msg.Type == CoherenceRequestType:GETS) {
          if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
            trigger(Event:L1_GETS_Last_Token, in_msg.Address);
          }
          else {
            trigger(Event:L1_GETS, in_msg.Address);
          }
        } else {
          error("Unexpected message");
        }
      }
    }
  }


  // Response Network
  in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
    if (responseNetwork_in.isReady()) {
      peek(responseNetwork_in, ResponseMsg) {
        assert(in_msg.Destination.isElement(machineID));
        if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
          if (in_msg.Type == CoherenceResponseType:ACK) {
            trigger(Event:Ack, in_msg.Address);
          } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
            trigger(Event:Data_Owner, in_msg.Address);
          } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
            trigger(Event:Data_Shared, in_msg.Address);
          } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {

            if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {

              // either room is available or the block is already present

              if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
                assert(in_msg.Dirty == false);
                trigger(Event:Writeback_Tokens, in_msg.Address);
              } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
                assert(in_msg.Dirty == false);
                trigger(Event:Writeback_Shared_Data, in_msg.Address);
              }
              else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
                //assert(in_msg.Dirty == false);
                trigger(Event:Writeback_Owned, in_msg.Address);
              }
            }
            else {
                trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
            }
          } else if (in_msg.Type == CoherenceResponseType:INV) {
            trigger(Event:L1_INV, in_msg.Address);
          } else {
            error("Unexpected message");
          }
        } else {
          if (in_msg.Type == CoherenceResponseType:ACK) {
            trigger(Event:Ack_All_Tokens, in_msg.Address);
          } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
            trigger(Event:Data_All_Tokens, in_msg.Address);
          } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
            if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {

              // either room is available or the block is already present

              if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
                assert(in_msg.Dirty == false);
                assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
                trigger(Event:Writeback_All_Tokens, in_msg.Address);
              } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
                assert(in_msg.Dirty == false);
                trigger(Event:Writeback_All_Tokens, in_msg.Address);
              }
              else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
                trigger(Event:Writeback_All_Tokens, in_msg.Address);
              }
            }
            else {
                trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
            }
          } else if (in_msg.Type == CoherenceResponseType:INV) {
            trigger(Event:L1_INV, in_msg.Address);
          } else {
            DEBUG_EXPR(in_msg.Type);
            error("Unexpected message");
          }
        }
      }
    }
  }


  // ACTIONS

  action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {

    peek(L1requestNetwork_in, RequestMsg) {

     // if this is a retry or no local sharers, broadcast normally

     // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
        enqueue(globalRequestNetwork_out, RequestMsg, latency=l2_request_latency) {
           out_msg.Address := in_msg.Address;
           out_msg.Type := in_msg.Type;
           out_msg.Requestor := in_msg.Requestor;
           out_msg.RequestorMachine := in_msg.RequestorMachine;
           out_msg.RetryNum := in_msg.RetryNum;

           //
           // If a statically shared L2 cache, then no other L2 caches can 
           // store the block
           //
           //out_msg.Destination.broadcast(MachineType:L2Cache);
           //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
           //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));

           out_msg.Destination.add(map_Address_to_Directory(address));
           out_msg.MessageSize := MessageSizeType:Request_Control;
           out_msg.AccessMode := in_msg.AccessMode;
           out_msg.Prefetch := in_msg.Prefetch;
        } //enqueue
      // } // if

         //profile_filter_action(0);
    } // peek
  } //action


  action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
    peek(responseNetwork_in, ResponseMsg) {
      // FIXME, should use a 3rd vnet
      enqueue(responseNetwork_out, ResponseMsg, latency="1") {
        out_msg.Address := address;
        out_msg.Type := in_msg.Type;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(map_Address_to_Directory(address));
        out_msg.Tokens := in_msg.Tokens;
        out_msg.MessageSize := in_msg.MessageSize;
        out_msg.DataBlk := in_msg.DataBlk;
        out_msg.Dirty := in_msg.Dirty;
      }
    }
  }

  action(c_cleanReplacement, "c", desc="Issue clean writeback") {
    if (getL2CacheEntry(address).Tokens > 0) {
      enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:ACK;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(map_Address_to_Directory(address));
        out_msg.Tokens := getL2CacheEntry(address).Tokens;
        out_msg.MessageSize := MessageSizeType:Writeback_Control;
      }
      getL2CacheEntry(address).Tokens := 0;
    }
  }

  action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
    enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
      out_msg.Address := address;
      out_msg.Sender := machineID;
      out_msg.SenderMachine := MachineType:L2Cache;
      out_msg.Destination.add(map_Address_to_Directory(address));
      out_msg.Tokens := getL2CacheEntry(address).Tokens;
      out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
      out_msg.Dirty := getL2CacheEntry(address).Dirty;

      if (getL2CacheEntry(address).Dirty) {
        out_msg.MessageSize := MessageSizeType:Writeback_Data;
        out_msg.Type := CoherenceResponseType:DATA_OWNER;
      } else {
        out_msg.MessageSize := MessageSizeType:Writeback_Control;
        out_msg.Type := CoherenceResponseType:ACK_OWNER;
      }
    }
    getL2CacheEntry(address).Tokens := 0;
  }

  action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
    peek(requestNetwork_in, RequestMsg) {
      if (getL2CacheEntry(address).Tokens > N_tokens) {
        enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
          out_msg.Address := address;
          out_msg.Type := CoherenceResponseType:DATA_SHARED;
          out_msg.Sender := machineID;
          out_msg.SenderMachine := MachineType:L2Cache;
          out_msg.Destination.add(in_msg.Requestor);
          out_msg.Tokens := N_tokens;
          out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
          out_msg.Dirty := false;
          out_msg.MessageSize := MessageSizeType:Response_Data;
        }
        getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens;
      }
      else {
        enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
          out_msg.Address := address;
          out_msg.Type := CoherenceResponseType:DATA_SHARED;
          out_msg.Sender := machineID;
          out_msg.SenderMachine := MachineType:L2Cache;
          out_msg.Destination.add(in_msg.Requestor);
          out_msg.Tokens := 1;
          out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
          out_msg.Dirty := false;
          out_msg.MessageSize := MessageSizeType:Response_Data;
        }
        getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
      }
    }
  }

  action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
    peek(requestNetwork_in, RequestMsg) {
      enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:DATA_OWNER;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(in_msg.Requestor);
        assert(getL2CacheEntry(address).Tokens >= 1);
        out_msg.Tokens := getL2CacheEntry(address).Tokens;
        out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
        out_msg.Dirty := getL2CacheEntry(address).Dirty;
        out_msg.MessageSize := MessageSizeType:Response_Data;
      }
    }
    getL2CacheEntry(address).Tokens := 0;
  }

  action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
    if (getL2CacheEntry(address).Tokens > 0) {
      enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:ACK;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(persistentTable.findSmallest(address));
        assert(getL2CacheEntry(address).Tokens >= 1);
        out_msg.Tokens := getL2CacheEntry(address).Tokens;
        out_msg.MessageSize := MessageSizeType:Response_Control;
      }
    }
    getL2CacheEntry(address).Tokens := 0;
  }

  action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
    enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
      out_msg.Address := address;
      out_msg.Type := CoherenceResponseType:DATA_OWNER;
      out_msg.Sender := machineID;
      out_msg.SenderMachine := MachineType:L2Cache;
      out_msg.Destination.add(persistentTable.findSmallest(address));
      assert(getL2CacheEntry(address).Tokens >= 1);
      out_msg.Tokens := getL2CacheEntry(address).Tokens;
      out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
      out_msg.Dirty := getL2CacheEntry(address).Dirty;
      out_msg.MessageSize := MessageSizeType:Response_Data;
    }
    getL2CacheEntry(address).Tokens := 0;
  }

  action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
    //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
    assert(getL2CacheEntry(address).Tokens > 0);
    if (getL2CacheEntry(address).Tokens > 1) {
      enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:ACK;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(persistentTable.findSmallest(address));
        assert(getL2CacheEntry(address).Tokens >= 1);
        out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
        out_msg.MessageSize := MessageSizeType:Response_Control;
      }
    }
    getL2CacheEntry(address).Tokens := 1;
  }

  action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
    //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
    assert(getL2CacheEntry(address).Tokens > 0);
    if (getL2CacheEntry(address).Tokens > 1) {
      enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:DATA_OWNER;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(persistentTable.findSmallest(address));
        assert(getL2CacheEntry(address).Tokens >= 1);
        out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
        out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
        out_msg.Dirty := getL2CacheEntry(address).Dirty;
        out_msg.MessageSize := MessageSizeType:Response_Data;
      }
      getL2CacheEntry(address).Tokens := 1;
    }
  }



  action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
    // assert(persistentTable.isLocked(address));
    peek(responseNetwork_in, ResponseMsg) {
      // FIXME, should use a 3rd vnet in some cases
      enqueue(responseNetwork_out, ResponseMsg, latency="1") {
        out_msg.Address := address;
        out_msg.Type := in_msg.Type;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(persistentTable.findSmallest(address));
        out_msg.Tokens := in_msg.Tokens;
        out_msg.DataBlk := in_msg.DataBlk;
        out_msg.Dirty := in_msg.Dirty;
        out_msg.MessageSize := in_msg.MessageSize;
      }
    }
  }

  action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
    //assert(persistentTable.isLocked(address));
    peek(responseNetwork_in, ResponseMsg) {
      // FIXME, should use a 3rd vnet in some cases
      enqueue(responseNetwork_out, ResponseMsg, latency="1") {
        out_msg.Address := address;
        if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
          out_msg.Type := CoherenceResponseType:DATA_SHARED;
        } else {
          out_msg.Type := CoherenceResponseType:ACK;
        }
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(persistentTable.findSmallest(address));
        out_msg.Tokens := in_msg.Tokens;
        out_msg.DataBlk := in_msg.DataBlk;
        out_msg.Dirty := in_msg.Dirty;
        out_msg.MessageSize := in_msg.MessageSize;
      }
    }
  }

  action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
    // assert(persistentTable.isLocked(address));
    peek(responseNetwork_in, ResponseMsg) {
      // FIXME, should use a 3rd vnet in some cases
      enqueue(responseNetwork_out, ResponseMsg, latency="1") {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:DATA_OWNER;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(persistentTable.findSmallest(address));
        out_msg.Tokens := in_msg.Tokens;
        out_msg.DataBlk := in_msg.DataBlk;
        out_msg.Dirty := in_msg.Dirty;
        out_msg.MessageSize := in_msg.MessageSize;
      }
    }
  }


  action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
    peek(responseNetwork_in, ResponseMsg) {
      removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
    }
  }

  action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
    peek(requestNetwork_in, RequestMsg) {
      if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
        //profile_filter_action(1);
        DEBUG_EXPR("filtered message");
        DEBUG_EXPR(in_msg.RetryNum);
      }
      else {
        enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
           out_msg.Address := in_msg.Address;
           out_msg.Requestor := in_msg.Requestor;
           out_msg.RequestorMachine := in_msg.RequestorMachine;
           
           //
           // Currently assuming only one chip so all L1s are local
           //
           //out_msg.Destination := getLocalL1IDs(machineID);
           out_msg.Destination.broadcast(MachineType:L1Cache);
           out_msg.Destination.remove(in_msg.Requestor);

           out_msg.Type := in_msg.Type;
           out_msg.isLocal := false;
           out_msg.MessageSize := MessageSizeType:Request_Control;
           out_msg.AccessMode := in_msg.AccessMode;
           out_msg.Prefetch := in_msg.Prefetch;
        }
        //profile_filter_action(0);
      }
    }
  }


  action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
    peek(L1requestNetwork_in, RequestMsg) {
      assert(getL2CacheEntry(address).Tokens > 0);
      //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
      enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:DATA_SHARED;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(in_msg.Requestor);
        out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
        out_msg.Dirty := false;
        out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
        out_msg.Tokens := 1;
      }
      getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
    }
  }

  action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
    peek(L1requestNetwork_in, RequestMsg) {
      assert(getL2CacheEntry(address).Tokens > 0);
      enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:DATA_OWNER;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(in_msg.Requestor);
        out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
        out_msg.Dirty := getL2CacheEntry(address).Dirty;
        out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
        out_msg.Tokens := 1;
      }
      getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
    }
  }

  action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
    peek(L1requestNetwork_in, RequestMsg) {
//      assert(getL2CacheEntry(address).Tokens == max_tokens());
      //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
      enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:DATA_OWNER;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L2Cache;
        out_msg.Destination.add(in_msg.Requestor);
        out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
        out_msg.Dirty := getL2CacheEntry(address).Dirty;
        out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
        //out_msg.Tokens := max_tokens();
        out_msg.Tokens := getL2CacheEntry(address).Tokens;
      }
      getL2CacheEntry(address).Tokens := 0;
    }
  }

  action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
    persistentNetwork_in.dequeue();
  }

  action(m_popRequestQueue, "m", desc="Pop request queue.") {
    requestNetwork_in.dequeue();
  }

  action(n_popResponseQueue, "n", desc="Pop response queue") {
    responseNetwork_in.dequeue();
  }

  action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
    L1requestNetwork_in.dequeue();
  }


  action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
    peek(responseNetwork_in, ResponseMsg) {
      assert(in_msg.Tokens != 0);
      getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;

      // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
      //  may not trigger this action.
      if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
        getL2CacheEntry(address).Dirty := true;
      }
    }
  }

  action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
    peek(L1requestNetwork_in, RequestMsg) {
      if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
        if (in_msg.Type == CoherenceRequestType:GETX) {
          setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
        } else if (in_msg.Type == CoherenceRequestType:GETS) {
          addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
        }
      }
    }
  }

  action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
    clearExclusiveBitIfExists(address);
  }

  action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
    peek(L1requestNetwork_in, RequestMsg) {
      if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
          (isCacheTagPresent(address))) {
        L2cacheMemory.setMRU(address);
      }
    }
  }

  action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
    if (getL2CacheEntry(address).Tokens > 0) {
      peek(requestNetwork_in, RequestMsg) {
        enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
          out_msg.Address := address;
          out_msg.Type := CoherenceResponseType:ACK;
          out_msg.Sender := machineID;
          out_msg.SenderMachine := MachineType:L2Cache;
          out_msg.Destination.add(in_msg.Requestor);
          assert(getL2CacheEntry(address).Tokens >= 1);
          out_msg.Tokens := getL2CacheEntry(address).Tokens;
          out_msg.MessageSize := MessageSizeType:Response_Control;
        }
      }
    }
    getL2CacheEntry(address).Tokens := 0;
  }

  action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
    if (getL2CacheEntry(address).Tokens > 0) {
      peek(L1requestNetwork_in, RequestMsg) {
        enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
          out_msg.Address := address;
          out_msg.Type := CoherenceResponseType:ACK;
          out_msg.Sender := machineID;
          out_msg.SenderMachine := MachineType:L2Cache;
          out_msg.Destination.add(in_msg.Requestor);
          assert(getL2CacheEntry(address).Tokens >= 1);
          out_msg.Tokens := getL2CacheEntry(address).Tokens;
          out_msg.MessageSize := MessageSizeType:Response_Control;
        }
      }
    }
    getL2CacheEntry(address).Tokens := 0;
  }

  action(u_writeDataToCache, "u", desc="Write data to cache") {
    peek(responseNetwork_in, ResponseMsg) {
      getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
      if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
        getL2CacheEntry(address).Dirty := in_msg.Dirty;
      }
    }
  }

  action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
    L2cacheMemory.allocate(address, new Entry);
  }

  action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block.  Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
    L2cacheMemory.deallocate(address);
  }

  //action(uu_profileMiss, "\u", desc="Profile the demand miss") {
  //  peek(L1requestNetwork_in, RequestMsg) {
      // AccessModeType not implemented
      //profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize),  in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
  //  }
  //}


  action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
    peek(responseNetwork_in, ResponseMsg) {
      assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
    }
  }


  //*****************************************************
  // TRANSITIONS
  //*****************************************************

  transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {

    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
    l_popPersistentQueue;
  }


  // Transitions from NP

  transition(NP, {Transient_GETX, Transient_GETS}) {
    // forward message to local sharers
    r_clearExclusive;
    j_forwardTransientRequestToLocalSharers;
    m_popRequestQueue;
  }


  transition(NP,  {L1_GETS, L1_GETX}) {
    a_broadcastLocalRequest;
    r_markNewSharer;
    //uu_profileMiss;
    o_popL1RequestQueue;
  }

  transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
    bb_bounceResponse;
    n_popResponseQueue;
  }

  transition(NP, Writeback_Shared_Data, S) {
    vv_allocateL2CacheBlock;
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(NP, Writeback_Tokens, I) {
    vv_allocateL2CacheBlock;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(NP, Writeback_All_Tokens, M) {
    vv_allocateL2CacheBlock;
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(NP, Writeback_Owned, O) {
    vv_allocateL2CacheBlock;
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }


  transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
    l_popPersistentQueue;
  }

  // Transitions from Idle

  transition(I, {L1_GETS, L1_GETS_Last_Token}) {
    a_broadcastLocalRequest;
    tt_sendLocalAckWithCollectedTokens;  // send any tokens we have collected
    r_markNewSharer;
    //uu_profileMiss;
    o_popL1RequestQueue;
  }

  transition(I, L1_GETX) {
    a_broadcastLocalRequest;
    tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
    r_markNewSharer;
    //uu_profileMiss;
    o_popL1RequestQueue;
  }

  transition(I, L2_Replacement) {
    c_cleanReplacement; // Only needed in some cases
    rr_deallocateL2CacheBlock;
  }

  transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
    r_clearExclusive;
    t_sendAckWithCollectedTokens;
    j_forwardTransientRequestToLocalSharers;
    m_popRequestQueue;
  }

  transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
    e_sendAckWithCollectedTokens;
    l_popPersistentQueue;
  }


  transition(I, Ack) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(I, Data_Shared, S) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(I, Writeback_Shared_Data, S) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(I, Writeback_Tokens) {
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(I, Data_Owner, O) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(I, Writeback_Owned, O) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(I, Data_All_Tokens, M) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }


  transition(I, Writeback_All_Tokens, M) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  // Transitions from Shared

  transition(S, L2_Replacement, I) {
    c_cleanReplacement;
    rr_deallocateL2CacheBlock;
  }

  transition(S, Transient_GETX, I) {
    r_clearExclusive;
    t_sendAckWithCollectedTokens;
    j_forwardTransientRequestToLocalSharers;
    m_popRequestQueue;
  }

  transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
    j_forwardTransientRequestToLocalSharers;
    r_clearExclusive;
    m_popRequestQueue;
  }

  transition(S, Persistent_GETX, I_L) {
    e_sendAckWithCollectedTokens;
    l_popPersistentQueue;
  }


  transition(S, Persistent_GETS, S_L) {
    f_sendAckWithAllButOneTokens;
    l_popPersistentQueue;
  }


  transition(S, Ack) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(S, Data_Shared) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(S, Writeback_Tokens) {
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(S, Writeback_Shared_Data) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }


  transition(S, Data_Owner, O) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(S, Writeback_Owned, O) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(S, Data_All_Tokens, M) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(S, Writeback_All_Tokens,  M) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(S, L1_GETX, I) {
    a_broadcastLocalRequest;
    tt_sendLocalAckWithCollectedTokens;
    r_markNewSharer;
    r_setMRU;
    //uu_profileMiss;
    o_popL1RequestQueue;
  }


  transition(S, L1_GETS) {
    k_dataFromL2CacheToL1Requestor;
    r_markNewSharer;
    r_setMRU;
    o_popL1RequestQueue;
  }

  transition(S, L1_GETS_Last_Token, I) {

    k_dataFromL2CacheToL1Requestor;
    r_markNewSharer;
    r_setMRU;
    o_popL1RequestQueue;
  }

  // Transitions from Owned

  transition(O, L2_Replacement, I) {
    cc_dirtyReplacement;
    rr_deallocateL2CacheBlock;
  }

  transition(O, Transient_GETX, I) {
    r_clearExclusive;
    dd_sendDataWithAllTokens;
    j_forwardTransientRequestToLocalSharers;
    m_popRequestQueue;
  }

  transition(O, Persistent_GETX, I_L) {
    ee_sendDataWithAllTokens;
    l_popPersistentQueue;
  }

  transition(O, Persistent_GETS, S_L) {
    ff_sendDataWithAllButOneTokens;
    l_popPersistentQueue;
  }

  transition(O, Transient_GETS) {
    // send multiple tokens
    r_clearExclusive;
    d_sendDataWithTokens;
    m_popRequestQueue;
  }

  transition(O, Transient_GETS_Last_Token) {
    // WAIT FOR IT TO GO PERSISTENT
    r_clearExclusive;
    m_popRequestQueue;
  }

  transition(O, Ack) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(O, Ack_All_Tokens, M) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(O, Data_Shared) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }


  transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(O, Data_All_Tokens, M) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(O, Writeback_All_Tokens, M) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(O, L1_GETS) {
    k_dataFromL2CacheToL1Requestor;
    r_markNewSharer;
    r_setMRU;
    o_popL1RequestQueue;
  }

  transition(O, L1_GETS_Last_Token, I) {
    k_dataOwnerFromL2CacheToL1Requestor;
    r_markNewSharer;
    r_setMRU;
    o_popL1RequestQueue;
  }

  transition(O, L1_GETX, I) {
    a_broadcastLocalRequest;
    k_dataAndAllTokensFromL2CacheToL1Requestor;
    r_markNewSharer;
    r_setMRU;
    //uu_profileMiss;
    o_popL1RequestQueue;
  }

  // Transitions from M

  transition(M, L2_Replacement, I) {
    cc_dirtyReplacement;
    rr_deallocateL2CacheBlock;
  }

  // MRM_DEBUG:  Give up all tokens even for GETS? ???
  transition(M, {Transient_GETX, Transient_GETS}, I) {
    r_clearExclusive;
    dd_sendDataWithAllTokens;
    m_popRequestQueue;
  }

  transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
    ee_sendDataWithAllTokens;
    l_popPersistentQueue;
  }


  transition(M, L1_GETS, O) {
    k_dataFromL2CacheToL1Requestor;
    r_markNewSharer;
    r_setMRU;
    o_popL1RequestQueue;
  }

  transition(M, L1_GETX, I) {
    k_dataAndAllTokensFromL2CacheToL1Requestor;
    r_markNewSharer;
    r_setMRU;
    o_popL1RequestQueue;
  }


  //Transitions from locked states

  transition({I_L, S_L}, Ack) {
    gg_bounceResponseToStarver;
    n_popResponseQueue;
  }

  transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
    gg_bounceResponseToStarver;
    n_popResponseQueue;
  }

  transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
    gg_bounceWBSharedToStarver;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
    gg_bounceWBOwnedToStarver;
    h_updateFilterFromL1HintOrWB;
    n_popResponseQueue;
  }

  transition(S_L, L2_Replacement, I) {
    c_cleanReplacement;
    rr_deallocateL2CacheBlock;
  }

  transition(I_L, L2_Replacement, I) {
    rr_deallocateL2CacheBlock;
  }

  transition(I_L, Own_Lock_or_Unlock, I) {
    l_popPersistentQueue;
  }

  transition(S_L, Own_Lock_or_Unlock, S) {
    l_popPersistentQueue;
  }

  transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
    r_clearExclusive;
    m_popRequestQueue;
  }

  transition(I_L, {L1_GETX, L1_GETS}) {
    a_broadcastLocalRequest;
    r_markNewSharer;
    //uu_profileMiss;
    o_popL1RequestQueue;
  }

  transition(S_L, L1_GETX, I_L) {
    a_broadcastLocalRequest;
    tt_sendLocalAckWithCollectedTokens;
    r_markNewSharer;
    r_setMRU;
    //uu_profileMiss;
    o_popL1RequestQueue;
  }

  transition(S_L, L1_GETS) {
    k_dataFromL2CacheToL1Requestor;
    r_markNewSharer;
    r_setMRU;
    o_popL1RequestQueue;
  }

  transition(S_L, L1_GETS_Last_Token, I_L) {
    k_dataFromL2CacheToL1Requestor;
    r_markNewSharer;
    r_setMRU;
    o_popL1RequestQueue;
  }

  transition(S_L, Persistent_GETX, I_L) {
    e_sendAckWithCollectedTokens;
    l_popPersistentQueue;
  }

  transition(S_L, Persistent_GETS) {
    l_popPersistentQueue;
  }

  transition(I_L, {Persistent_GETX, Persistent_GETS}) {
    l_popPersistentQueue;
  }
}