/*
 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are
 * met: redistributions of source code must retain the above copyright
 * notice, this list of conditions and the following disclaimer;
 * redistributions in binary form must reproduce the above copyright
 * notice, this list of conditions and the following disclaimer in the
 * documentation and/or other materials provided with the distribution;
 * neither the name of the copyright holders nor the names of its
 * contributors may be used to endorse or promote products derived from
 * this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/*
 * $Id: MOESI_token-cache.sm 1.10 05/01/19 15:41:25-06:00 beckmann@emperor11.cs.wisc.edu $
 *
 */

machine(L1Cache, "Token protocol") {

  MessageBuffer requestFromCache, network="To", virtual_network="1", ordered="false";
  MessageBuffer responseFromCache, network="To", virtual_network="0", ordered="false";
  MessageBuffer persistentFromCache, network="To", virtual_network="2", ordered="true";

  MessageBuffer requestToCache, network="From", virtual_network="1", ordered="false";
  MessageBuffer responseToCache, network="From", virtual_network="0", ordered="false";
  MessageBuffer persistentToCache, network="From", virtual_network="2", ordered="true";


  // STATES
  enumeration(State, desc="Cache states", default="L1Cache_State_I") {
    // Base states
    NP,   "NP",   desc="Not Present";
    I,    "I",    desc="Idle";
    S,    "S",    desc="Shared";
    O,    "O",    desc="Owned";
    M,    "M",    desc="Modified (dirty)";
    MM,   "MM",   desc="Modified (dirty and locally modified)";
    M_W,  "M^W",  desc="Modified (dirty), waiting";
    MM_W, "MM^W", desc="Modified (dirty and locally modified), waiting";

    // Transient States
    IM, "IM", desc="Issued GetX";
    SM, "SM", desc="Issued GetX, we still have an old copy of the line";
    OM, "OM", desc="Issued GetX, received data";
    IS, "IS", desc="Issued GetS";

    // Locked states
    I_L,  "I^L",   desc="Invalid, Locked";
    S_L,  "S^L",   desc="Shared, Locked";
    IM_L, "IM^L",  desc="Invalid, Locked, trying to go to Modified";
    SM_L, "SM^L",  desc="Shared, Locked, trying to go to Modified";
    IS_L, "IS^L",  desc="Invalid, Locked, trying to go to Shared";
  }

  // EVENTS
  enumeration(Event, desc="Cache events") {
    Load,            desc="Load request from the processor";
    Ifetch,          desc="I-fetch request from the processor";
    Store,           desc="Store request from the processor";
    L2_Replacement,  desc="L2 Replacement";
    L1_to_L2,        desc="L1 to L2 transfer";
    L2_to_L1D,       desc="L2 to L1-Data transfer";
    L2_to_L1I,       desc="L2 to L1-Instruction transfer";

    // Responses
    Data_Shared,             desc="Received a data message, we are now a sharer";
    Data_Shared_All_Tokens,  desc="Received a data message, we are now a sharer, we now have all the tokens";
    Data_Owner,              desc="Received a data message, we are now the owner";
    Data_Owner_All_Tokens,   desc="Received a data message, we are now the owner, we now have all the tokens";
    Ack,                     desc="Received an ack message";
    Ack_All_Tokens,          desc="Received an ack message, we now have all the tokens";

    // Requests
    Transient_GETX,  desc="A GetX from another processor";
    Transient_GETS,  desc="A GetS from another processor";

    // Lock/Unlock
    Persistent_GETX,     desc="Another processor has priority to read/write";
    Persistent_GETS,     desc="Another processor has priority to read";
    Own_Lock_or_Unlock,  desc="This processor now has priority";

    // Triggers
    Request_Timeout,         desc="Timeout";
    Use_Timeout,             desc="Timeout";

  }

  // TYPES

  int getRetryThreshold();

  // CacheEntry
  structure(Entry, desc="...", interface="AbstractCacheEntry") {
    DataBlock DataBlk,       desc="data for the block, required by CacheMemory";
    State CacheState,        desc="cache state";
    bool Dirty,              desc="Is the data dirty (different than memory)?";
    int Tokens,              desc="The number of tokens we're holding for the line";
  }

  // TBE fields
  structure(TBE, desc="...") {
    State TBEState,                       desc="Transient state";
    int IssueCount,      default="0",     desc="The number of times we've issued a request for this line.";
    Address PC,                           desc="Program counter of request";
    AccessType AccessType,                desc="Type of request (used for profiling)";
    Time IssueTime,                       desc="Time the request was issued";
  }

  external_type(CacheMemory) {
    bool cacheAvail(Address);
    Address cacheProbe(Address);
    void allocate(Address);
    void deallocate(Address);
    Entry lookup(Address);
    void changePermission(Address, AccessPermission);
    bool isTagPresent(Address);
  }

  external_type(TBETable) {
    TBE lookup(Address);
    void allocate(Address);
    void deallocate(Address);
    bool isPresent(Address);
  }

  external_type(TimerTable, inport="yes") {
    bool isReady();
    Address readyAddress();
    void set(Address, int);
    void unset(Address);
    bool isSet(Address);
  }

  MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
  Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";

  TBETable TBEs, template_hack="<L1Cache_TBE>";
  CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
  CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
  CacheMemory L2cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L2"', abstract_chip_ptr="true";
  PersistentTable persistentTable, constructor_hack="i";
  TimerTable useTimerTable;
  TimerTable reissueTimerTable;

  int outstandingRequests, default="0";
  int outstandingPersistentRequests, default="0";
  void profile_outstanding_request(int outstanding);
  void profile_outstanding_persistent_request(int outstanding);

  int averageLatencyHysteresis, default="(8)"; // Constant that provides hysteresis for calculated the estimated average
  int averageLatencyCounter, default="(500 << (*(m_L1Cache_averageLatencyHysteresis_vec[i])))";
  // int averageLatencyCounter, default="(250)";

  int averageLatencyEstimate() {
    return averageLatencyCounter >> averageLatencyHysteresis;
  }

  void updateAverageLatencyEstimate(int latency) {
    assert(latency >= 0);

    // By subtracting the current average and then adding the most
    // recent sample, we calculate an estimate of the recent average.
    // If we simply used a running sum and divided by the total number
    // of entries, the estimate of the average would adapt very slowly
    // after the execution has run for a long time.
    averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
  }

  Entry getCacheEntry(Address addr), return_by_ref="yes" {
    if (L2cacheMemory.isTagPresent(addr)) {
      return L2cacheMemory[addr];
    } else if (L1DcacheMemory.isTagPresent(addr)) {
      return L1DcacheMemory[addr];
    } else {
      return L1IcacheMemory[addr];
    }
  }

  int getTokens(Address addr) {
    if (L2cacheMemory.isTagPresent(addr)) {
      return L2cacheMemory[addr].Tokens;
    } else if (L1DcacheMemory.isTagPresent(addr)) {
      return L1DcacheMemory[addr].Tokens;
    } else if (L1IcacheMemory.isTagPresent(addr)) {
      return L1IcacheMemory[addr].Tokens;
    } else {
      return 0;
    }
  }

  void changePermission(Address addr, AccessPermission permission) {
    if (L2cacheMemory.isTagPresent(addr)) {
      return L2cacheMemory.changePermission(addr, permission);
    } else if (L1DcacheMemory.isTagPresent(addr)) {
      return L1DcacheMemory.changePermission(addr, permission);
    } else {
      return L1IcacheMemory.changePermission(addr, permission);
    }
  }

  bool isCacheTagPresent(Address addr) {
    return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
  }

  State getState(Address addr) {
    assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
    assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
    assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);

    if (TBEs.isPresent(addr)) {
      return TBEs[addr].TBEState;
    } else if (isCacheTagPresent(addr)) {
      return getCacheEntry(addr).CacheState;
    } else if ((persistentTable.isLocked(addr) == true) && (persistentTable.findSmallest(addr) != machineID)) {
      // Not in cache, in persistent table, but this processor isn't highest priority
      return State:I_L;
    } else {
      return State:NP;
    }
  }

  void setState(Address addr, State state) {
    assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
    assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
    assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);

    assert(outstandingPersistentRequests >= 0);
    assert(outstandingRequests >= 0);

    if (useTimerTable.isSet(addr)) {
      assert((state == State:M_W) || (state == State:MM_W));
    } else {
      assert(state != State:M_W);
      assert(state != State:MM_W);
    }

    if (reissueTimerTable.isSet(addr)) {
      assert((state == State:IS) ||
             (state == State:IM) ||
             (state == State:SM) ||
             (state == State:OM) ||
             (state == State:IS_L) ||
             (state == State:IM_L) ||
             (state == State:SM_L));
    } else if (TBEs.isPresent(addr) && TBEs[addr].IssueCount < getRetryThreshold()) {
      // If the timer is not set, you better have issued a persistent request
      assert(state != State:IS);
      assert(state != State:IM);
      assert(state != State:SM);
      assert(state != State:OM);
      assert(state != State:IS_L);
      assert(state != State:IM_L);
      assert(state != State:SM_L);
    }

    if (TBEs.isPresent(addr) && (TBEs[addr].IssueCount > getRetryThreshold())) {
      assert(reissueTimerTable.isSet(addr) == false);
    }

    if (TBEs.isPresent(addr)) {
      assert(state != State:I);
      assert(state != State:S);
      assert(state != State:O);
      assert(state != State:MM);
      assert(state != State:M);
      TBEs[addr].TBEState := state;
    }

    if (isCacheTagPresent(addr)) {
      // Make sure the token count is in range
      assert(getCacheEntry(addr).Tokens >= 0);
      assert(getCacheEntry(addr).Tokens <= max_tokens());

      if ((state == State:I_L) ||
          (state == State:IM_L) ||
          (state == State:IS_L)) {
        // Make sure we have no tokens in the "Invalid, locked" states
        if (isCacheTagPresent(addr)) {
          assert(getCacheEntry(addr).Tokens == 0);
        }

        // Make sure the line is locked
        assert(persistentTable.isLocked(addr));

        // But we shouldn't have highest priority for it
        assert(persistentTable.findSmallest(addr) != machineID);

      } else if ((state == State:S_L) ||
                 (state == State:SM_L)) {
        // Make sure we have only one token in the "Shared, locked" states
        assert(getCacheEntry(addr).Tokens == 1);

        // Make sure the line is locked...
        assert(persistentTable.isLocked(addr));

        // ...But we shouldn't have highest priority for it...
        assert(persistentTable.findSmallest(addr) != machineID);

        // ...And it must be a GETS request
        assert(persistentTable.typeOfSmallest(addr) == AccessType:Read);

      } else {

        // If there is an entry in the persistent table of this block,
        // this processor needs to have an entry in the table for this
        // block, and that entry better be the smallest (highest
        // priority).  Otherwise, the state should have been one of
        // locked states

        if (persistentTable.isLocked(addr)) {
          assert(persistentTable.findSmallest(addr) == machineID);
        }
      }

      // in M and E you have all the tokens
      if (state == State:MM || state == State:M || state == State:MM_W || state == State:M_W) {
        assert(getCacheEntry(addr).Tokens == max_tokens());
      }

      // in NP you have no tokens
      if (state == State:NP) {
        assert(getCacheEntry(addr).Tokens == 0);
      }

      // You have at least one token in S-like states
      if (state == State:S || state == State:SM) {
        assert(getCacheEntry(addr).Tokens > 0);
      }

      // You have at least half the token in O-like states
      if (state == State:O && state == State:OM) {
        assert(getCacheEntry(addr).Tokens >= 1); // Must have at least one token
        assert(getCacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
      }

      getCacheEntry(addr).CacheState := state;

      // Set permission
      if (state == State:MM ||
          state == State:MM_W) {
        changePermission(addr, AccessPermission:Read_Write);
      } else if ((state == State:S) ||
                 (state == State:O) ||
                 (state == State:M) ||
                 (state == State:M_W) ||
                 (state == State:SM) ||
                 (state == State:SM_L) ||
                 (state == State:OM)) {
        changePermission(addr, AccessPermission:Read_Only);
      } else {
        changePermission(addr, AccessPermission:Invalid);
      }
    }
  }

  Event mandatory_request_type_to_event(CacheRequestType type) {
    if (type == CacheRequestType:LD) {
      return Event:Load;
    } else if (type == CacheRequestType:IFETCH) {
      return Event:Ifetch;
    } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
      return Event:Store;
    } else {
      error("Invalid CacheRequestType");
    }
  }

  AccessType cache_request_type_to_access_type(CacheRequestType type) {
    if ((type == CacheRequestType:LD) || (type == CacheRequestType:IFETCH)) {
      return AccessType:Read;
    } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
      return AccessType:Write;
    } else {
      error("Invalid CacheRequestType");
    }
  }

  // ** OUT_PORTS **
  out_port(persistentNetwork_out, PersistentMsg, persistentFromCache);
  out_port(requestNetwork_out, RequestMsg, requestFromCache);
  out_port(responseNetwork_out, ResponseMsg, responseFromCache);

  // ** IN_PORTS **

  // Use Timer
  in_port(useTimerTable_in, Address, useTimerTable) {
    if (useTimerTable_in.isReady()) {
      trigger(Event:Use_Timeout, useTimerTable.readyAddress());
    }
  }

  // Reissue Timer
  in_port(reissueTimerTable_in, Address, reissueTimerTable) {
    if (reissueTimerTable_in.isReady()) {
      trigger(Event:Request_Timeout, reissueTimerTable.readyAddress());
    }
  }

  // Persistent Network
  in_port(persistentNetwork_in, PersistentMsg, persistentToCache) {
    if (persistentNetwork_in.isReady()) {
      peek(persistentNetwork_in, PersistentMsg) {

        // Apply the lockdown or unlockdown message to the table
        if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
          persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
        } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
          persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
        } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
          persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
        } else {
          error("Unexpected message");
        }

        // React to the message based on the current state of the table
        if (persistentTable.isLocked(in_msg.Address)) {
          if (persistentTable.findSmallest(in_msg.Address) == machineID) {
            // Our Own Lock - this processor is highest priority
            trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
          } else {
            if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
              trigger(Event:Persistent_GETS, in_msg.Address);
            } else {
              trigger(Event:Persistent_GETX, in_msg.Address);
            }
          }
        } else {
          // Unlock case - no entries in the table
          trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
        }
      }
    }
  }


  // Request Network
  in_port(requestNetwork_in, RequestMsg, requestToCache) {
    if (requestNetwork_in.isReady()) {
      peek(requestNetwork_in, RequestMsg) {
        if (in_msg.Type == CoherenceRequestType:GETX) {
          trigger(Event:Transient_GETX, in_msg.Address);
        } else if (in_msg.Type == CoherenceRequestType:GETS) {
          trigger(Event:Transient_GETS, in_msg.Address);
        } else {
          error("Unexpected message");
        }
      }
    }
  }

  // Response Network
  in_port(responseNetwork_in, ResponseMsg, responseToCache) {
    if (responseNetwork_in.isReady()) {
      peek(responseNetwork_in, ResponseMsg) {

        if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
          if (in_msg.Type == CoherenceResponseType:ACK) {
            trigger(Event:Ack, in_msg.Address);
          } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
            trigger(Event:Data_Owner, in_msg.Address);
          } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
            trigger(Event:Data_Shared, in_msg.Address);
          } else {
            error("Unexpected message");
          }
        } else {
          if (in_msg.Type == CoherenceResponseType:ACK) {
            trigger(Event:Ack_All_Tokens, in_msg.Address);
          } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
            trigger(Event:Data_Owner_All_Tokens, in_msg.Address);
          } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
            trigger(Event:Data_Shared_All_Tokens, in_msg.Address);
          } else {
            error("Unexpected message");
          }
        }
      }
    }
  }

  // Mandatory Queue
  in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
    if (mandatoryQueue_in.isReady()) {
      peek(mandatoryQueue_in, CacheMsg) {
        // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache

        if (in_msg.Type == CacheRequestType:IFETCH) {
          // ** INSTRUCTION ACCESS ***

          // Check to see if it is in the OTHER L1
          if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
            // The block is in the wrong L1, try to write it to the L2
            if (L2cacheMemory.cacheAvail(in_msg.Address)) {
              trigger(Event:L1_to_L2, in_msg.Address);
            } else {
              trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
            }
          }

          if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
            // The tag matches for the L1, so the L1 fetches the line.  We know it can't be in the L2 due to exclusion
            trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
          } else {
            if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
              // L1 does't have the line, but we have space for it in the L1
              if (L2cacheMemory.isTagPresent(in_msg.Address)) {
                // L2 has it (maybe not with the right permissions)
                trigger(Event:L2_to_L1I, in_msg.Address);
              } else {
                // We have room, the L2 doesn't have it, so the L1 fetches the line
                trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
              }
            } else {
              // No room in the L1, so we need to make room
              if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.Address))) {
                // The L2 has room, so we move the line from the L1 to the L2
                trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.Address));
              } else {
                // The L2 does not have room, so we replace a line from the L2
                trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.Address)));
              }
            }
          }
        } else {
          // *** DATA ACCESS ***

            // Check to see if it is in the OTHER L1
          if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
            // The block is in the wrong L1, try to write it to the L2
            if (L2cacheMemory.cacheAvail(in_msg.Address)) {
              trigger(Event:L1_to_L2, in_msg.Address);
            } else {
              trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
            }
          }

          if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
            // The tag matches for the L1, so the L1 fetches the line.  We know it can't be in the L2 due to exclusion
            trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
          } else {
            if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
              // L1 does't have the line, but we have space for it in the L1
              if (L2cacheMemory.isTagPresent(in_msg.Address)) {
                // L2 has it (maybe not with the right permissions)
                trigger(Event:L2_to_L1D, in_msg.Address);
              } else {
                // We have room, the L2 doesn't have it, so the L1 fetches the line
                trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
              }
            } else {
              // No room in the L1, so we need to make room
              if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.Address))) {
                // The L2 has room, so we move the line from the L1 to the L2
                trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.Address));
              } else {
                // The L2 does not have room, so we replace a line from the L2
                trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.Address)));
              }
            }
          }
        }
      }
    }
  }

  // ACTIONS

  action(a_issueRequest, "a", desc="Issue GETS or GETX request (transient or persistent)") {

    if (TBEs[address].IssueCount == 0) {
      // Update outstanding requests
      profile_outstanding_request(outstandingRequests);
      outstandingRequests := outstandingRequests + 1;
    }

    if (TBEs[address].IssueCount < getRetryThreshold()) {
      // Issue a normal request
      enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
        out_msg.Address := address;
        out_msg.Requestor := machineID;
        out_msg.Destination.broadcast(MachineType:L1Cache);
        out_msg.Destination.add(map_Address_to_Directory(address));

        if (TBEs[address].AccessType == AccessType:Read) {
          out_msg.Type := CoherenceRequestType:GETS;
        } else {
          out_msg.Type := CoherenceRequestType:GETX;
        }

        if (TBEs[address].IssueCount == 0) {
          out_msg.MessageSize := MessageSizeType:Request_Control;
        } else {
          out_msg.MessageSize := MessageSizeType:Reissue_Control;
        }
      }

      // Increment IssueCount
      TBEs[address].IssueCount := TBEs[address].IssueCount + 1;

      // Set a wakeup timer
      reissueTimerTable.set(address, 2*averageLatencyEstimate());

    } else {
      // Try to issue a Persistent Request
      if (persistentTable.okToIssueStarving(address)) {
        // Issue a persistent request
        enqueue(persistentNetwork_out, PersistentMsg, latency="ISSUE_LATENCY") {
          out_msg.Address := address;
          if (TBEs[address].AccessType == AccessType:Read) {
            out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
          } else {
            out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
          }
          out_msg.Requestor := machineID;
          out_msg.Destination.broadcast(MachineType:L1Cache);
          out_msg.Destination.add(map_Address_to_Directory(address));
          out_msg.MessageSize := MessageSizeType:Persistent_Control;
        }
        persistentTable.markEntries(address);

        // Update outstanding requests
        profile_outstanding_persistent_request(outstandingPersistentRequests);
        outstandingPersistentRequests := outstandingPersistentRequests + 1;

        // Increment IssueCount
        TBEs[address].IssueCount := TBEs[address].IssueCount + 1;

        // Do not schedule a wakeup, a persistent requests will always complete

      } else {
        // We'd like to issue a persistent request, but are not allowed
        // to issue a P.R. right now.  This, we do not increment the
        // IssueCount.


        // Set a wakeup timer
        reissueTimerTable.set(address, 10);
      }
    }
  }

  action(b_bounceResponse, "b", desc="Bounce tokens and data to memory") {
    peek(responseNetwork_in, ResponseMsg) {
      // FIXME, should use a 3rd vnet
      enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
        out_msg.Address := address;
        out_msg.Type := in_msg.Type;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L1Cache;
        out_msg.Destination.add(map_Address_to_Directory(address));
        out_msg.DestMachine := MachineType:Directory;
        out_msg.Tokens := in_msg.Tokens;
        out_msg.MessageSize := in_msg.MessageSize;
        out_msg.DataBlk := in_msg.DataBlk;
        out_msg.Dirty := in_msg.Dirty;
      }
    }
  }

  action(c_cleanReplacement, "c", desc="Issue clean writeback") {
    if (getCacheEntry(address).Tokens > 0) {
      enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:ACK;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L1Cache;
        out_msg.Destination.add(map_Address_to_Directory(address));
        out_msg.DestMachine := MachineType:Directory;
        out_msg.Tokens := getCacheEntry(address).Tokens;
        out_msg.Dirty := false;
        out_msg.MessageSize := MessageSizeType:Writeback_Control;
      }
      getCacheEntry(address).Tokens := 0;
    }
  }

  action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
    enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
      out_msg.Address := address;
      out_msg.Sender := machineID;
      out_msg.SenderMachine := MachineType:L1Cache;
      out_msg.Destination.add(map_Address_to_Directory(address));
      out_msg.DestMachine := MachineType:Directory;
      out_msg.Tokens := getCacheEntry(address).Tokens;
      out_msg.Dirty := getCacheEntry(address).Dirty;
      if (getCacheEntry(address).Dirty) {
        out_msg.Type := CoherenceResponseType:DATA_OWNER;
        out_msg.DataBlk := getCacheEntry(address).DataBlk;
        out_msg.MessageSize := MessageSizeType:Writeback_Data;
      } else {
        out_msg.Type := CoherenceResponseType:ACK_OWNER;
        // NOTE: in a real system this would not send data.  We send
        // data here only so we can check it at the memory
        out_msg.DataBlk := getCacheEntry(address).DataBlk;
        out_msg.MessageSize := MessageSizeType:Writeback_Control;
      }
    }
    getCacheEntry(address).Tokens := 0;
  }

  action(d_sendDataWithToken, "d", desc="Send data and a token from cache to requestor") {
    peek(requestNetwork_in, RequestMsg) {
      enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:DATA_SHARED;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L1Cache;
        out_msg.Destination.add(in_msg.Requestor);
        out_msg.DestMachine := MachineType:L1Cache;
        out_msg.Tokens := 1;
        out_msg.DataBlk := getCacheEntry(address).DataBlk;
        out_msg.Dirty := getCacheEntry(address).Dirty;
        out_msg.MessageSize := MessageSizeType:Response_Data;
      }
    }
    getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - 1;
    assert(getCacheEntry(address).Tokens >= 1);
  }

  action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
    peek(requestNetwork_in, RequestMsg) {
      enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:DATA_OWNER;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L1Cache;
        out_msg.Destination.add(in_msg.Requestor);
        out_msg.DestMachine := MachineType:L1Cache;
        assert(getCacheEntry(address).Tokens >= 1);
        out_msg.Tokens := getCacheEntry(address).Tokens;
        out_msg.DataBlk := getCacheEntry(address).DataBlk;
        out_msg.Dirty := getCacheEntry(address).Dirty;
        out_msg.MessageSize := MessageSizeType:Response_Data;
      }
    }
    getCacheEntry(address).Tokens := 0;
  }

  action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
    assert(persistentTable.findSmallest(address) != machineID); // Make sure we never bounce tokens to ourself
    if (getCacheEntry(address).Tokens > 0) {
      enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:ACK;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L1Cache;
        out_msg.Destination.add(persistentTable.findSmallest(address));
        out_msg.DestMachine := MachineType:L1Cache;
        assert(getCacheEntry(address).Tokens >= 1);
        out_msg.Tokens := getCacheEntry(address).Tokens;
        out_msg.MessageSize := MessageSizeType:Response_Control;
      }
    }
    getCacheEntry(address).Tokens := 0;
  }

  action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
    assert(persistentTable.findSmallest(address) != machineID); // Make sure we never bounce tokens to ourself
    assert(getCacheEntry(address).Tokens > 0);
    enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
      out_msg.Address := address;
      out_msg.Type := CoherenceResponseType:DATA_OWNER;
      out_msg.Sender := machineID;
      out_msg.SenderMachine := MachineType:L1Cache;
      out_msg.Destination.add(persistentTable.findSmallest(address));
      out_msg.DestMachine := MachineType:L1Cache;
      assert(getCacheEntry(address).Tokens >= 1);
      out_msg.Tokens := getCacheEntry(address).Tokens;
      out_msg.DataBlk := getCacheEntry(address).DataBlk;
      out_msg.Dirty := getCacheEntry(address).Dirty;
      out_msg.MessageSize := MessageSizeType:Response_Data;
    }
    getCacheEntry(address).Tokens := 0;
  }

  action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
    assert(persistentTable.findSmallest(address) != machineID); // Make sure we never bounce tokens to ourself
    assert(getCacheEntry(address).Tokens > 0);
    if (getCacheEntry(address).Tokens > 1) {
      enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:ACK;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L1Cache;
        out_msg.Destination.add(persistentTable.findSmallest(address));
        out_msg.DestMachine := MachineType:L1Cache;
        assert(getCacheEntry(address).Tokens >= 1);
        out_msg.Tokens := getCacheEntry(address).Tokens - 1;
        out_msg.MessageSize := MessageSizeType:Response_Control;
      }
    }
    getCacheEntry(address).Tokens := 1;
  }

  action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
    assert(persistentTable.findSmallest(address) != machineID); // Make sure we never bounce tokens to ourself
    assert(getCacheEntry(address).Tokens > 0);
    if (getCacheEntry(address).Tokens > 1) {
      enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
        out_msg.Address := address;
        out_msg.Type := CoherenceResponseType:DATA_OWNER;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L1Cache;
        out_msg.Destination.add(persistentTable.findSmallest(address));
        out_msg.DestMachine := MachineType:L1Cache;
        assert(getCacheEntry(address).Tokens >= 1);
        out_msg.Tokens := getCacheEntry(address).Tokens - 1;
        out_msg.DataBlk := getCacheEntry(address).DataBlk;
        out_msg.Dirty := getCacheEntry(address).Dirty;
        out_msg.MessageSize := MessageSizeType:Response_Data;
      }
      getCacheEntry(address).Tokens := 1;
    }
  }

  action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
    assert(persistentTable.isLocked(address));
    peek(responseNetwork_in, ResponseMsg) {
      assert(persistentTable.findSmallest(address) != machineID); // Make sure we never bounce tokens to ourself
      // FIXME, should use a 3rd vnet in some cases
      enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
        out_msg.Address := address;
        out_msg.Type := in_msg.Type;
        out_msg.Sender := machineID;
        out_msg.SenderMachine := MachineType:L1Cache;
        out_msg.Destination.add(persistentTable.findSmallest(address));
        out_msg.DestMachine := MachineType:L1Cache;
        out_msg.Tokens := in_msg.Tokens;
        out_msg.DataBlk := in_msg.DataBlk;
        out_msg.Dirty := in_msg.Dirty;
        out_msg.MessageSize := in_msg.MessageSize;
      }
    }
  }

  action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
    DEBUG_EXPR(getCacheEntry(address).DataBlk);
    sequencer.readCallback(address, getCacheEntry(address).DataBlk);
  }

  action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
    DEBUG_EXPR(getCacheEntry(address).DataBlk);
    sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
    getCacheEntry(address).Dirty := true;
  }

  action(i_allocateTBE, "i", desc="Allocate TBE") {
    check_allocate(TBEs);
    TBEs.allocate(address);
    TBEs[address].IssueCount := 0;
    peek(mandatoryQueue_in, CacheMsg) {
      TBEs[address].PC := in_msg.ProgramCounter;
      TBEs[address].AccessType := cache_request_type_to_access_type(in_msg.Type);
    }
    TBEs[address].IssueTime := get_time();
  }

  action(j_unsetReissueTimer, "j", desc="Unset reissue timer.") {
    if (reissueTimerTable.isSet(address)) {
      reissueTimerTable.unset(address);
    }
  }

  action(jj_unsetUseTimer, "\j", desc="Unset use timer.") {
    useTimerTable.unset(address);
  }

  action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
    mandatoryQueue_in.dequeue();
  }

  action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
    persistentNetwork_in.dequeue();
  }

  action(m_popRequestQueue, "m", desc="Pop request queue.") {
    requestNetwork_in.dequeue();
  }

  action(n_popResponseQueue, "n", desc="Pop response queue") {
    responseNetwork_in.dequeue();
  }

  action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
    useTimerTable.set(address, 15);
  }

  action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
    peek(responseNetwork_in, ResponseMsg) {
      assert(in_msg.Tokens != 0);
      getCacheEntry(address).Tokens := getCacheEntry(address).Tokens + in_msg.Tokens;
    }
  }

  action(s_deallocateTBE, "s", desc="Deallocate TBE") {
    outstandingRequests := outstandingRequests - 1;
    if (TBEs[address].IssueCount > getRetryThreshold()) {
      outstandingPersistentRequests := outstandingPersistentRequests - 1;
      enqueue(persistentNetwork_out, PersistentMsg, latency="ISSUE_LATENCY") {
        out_msg.Address := address;
        out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
        out_msg.Requestor := machineID;
        out_msg.Destination.broadcast(MachineType:L1Cache);
        out_msg.Destination.add(map_Address_to_Directory(address));
        out_msg.MessageSize := MessageSizeType:Persistent_Control;
      }
    }

    // Update average latency
    updateAverageLatencyEstimate(time_to_int(get_time()) - time_to_int(TBEs[address].IssueTime));

    // Profile
    profile_token_retry(address, TBEs[address].AccessType, TBEs[address].IssueCount);
    TBEs.deallocate(address);
  }

  action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
    if (getCacheEntry(address).Tokens > 0) {
      peek(requestNetwork_in, RequestMsg) {
        enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
          out_msg.Address := address;
          out_msg.Type := CoherenceResponseType:ACK;
          out_msg.Sender := machineID;
          out_msg.SenderMachine := MachineType:L1Cache;
          out_msg.Destination.add(in_msg.Requestor);
          out_msg.DestMachine := MachineType:L1Cache;
          assert(getCacheEntry(address).Tokens >= 1);
          out_msg.Tokens := getCacheEntry(address).Tokens;
          out_msg.MessageSize := MessageSizeType:Response_Control;
        }
      }
    }
    getCacheEntry(address).Tokens := 0;
  }

  action(u_writeDataToCache, "u", desc="Write data to cache") {
    peek(responseNetwork_in, ResponseMsg) {
      getCacheEntry(address).DataBlk := in_msg.DataBlk;
      getCacheEntry(address).Dirty := in_msg.Dirty;
    }
  }

  action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block.  Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
    if (L1DcacheMemory.isTagPresent(address)) {
      L1DcacheMemory.deallocate(address);
    } else {
      L1IcacheMemory.deallocate(address);
    }
  }

  action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
    if (L1DcacheMemory.isTagPresent(address) == false) {
      L1DcacheMemory.allocate(address);
    }
  }

  action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
    if (L1IcacheMemory.isTagPresent(address) == false) {
      L1IcacheMemory.allocate(address);
    }
  }

  action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
    L2cacheMemory.allocate(address);
  }

  action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block.  Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
    L2cacheMemory.deallocate(address);
  }

  action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
    if (L1DcacheMemory.isTagPresent(address)) {
      L2cacheMemory[address] := L1DcacheMemory[address];
    } else {
      L2cacheMemory[address] := L1IcacheMemory[address];
    }
  }

  action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
    if (L1DcacheMemory.isTagPresent(address)) {
      L1DcacheMemory[address] := L2cacheMemory[address];
    } else {
      L1IcacheMemory[address] := L2cacheMemory[address];
    }
  }

  action(uu_profileMiss, "\u", desc="Profile the demand miss") {
    peek(mandatoryQueue_in, CacheMsg) {
      profile_miss(in_msg, id);
    }
  }

  action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
    peek(responseNetwork_in, ResponseMsg) {
      assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
    }
  }

  //  action(z_stall, "z", desc="Stall") {
  //  }

  action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
    mandatoryQueue_in.recycle();
  }

  //*****************************************************
  // TRANSITIONS
  //*****************************************************

  // Transitions for Load/Store/L2_Replacement from transient states
  transition({IM, SM, OM, IS, IM_L, IS_L, I_L, S_L, SM_L, M_W, MM_W}, L2_Replacement) {
    zz_recycleMandatoryQueue;
  }

  transition({IM, SM, OM, IS, IM_L, IS_L, SM_L}, Store) {
    zz_recycleMandatoryQueue;
  }

  transition({IM, IS, IM_L, IS_L}, {Load, Ifetch}) {
    zz_recycleMandatoryQueue;
  }

  transition({IM, SM, OM, IS, I_L, IM_L, IS_L, S_L, SM_L}, {L1_to_L2, L2_to_L1D, L2_to_L1I}) {
    zz_recycleMandatoryQueue;
  }

  // Transitions moving data between the L1 and L2 caches
  transition({I, S, O, M, MM, M_W, MM_W}, L1_to_L2) {
    vv_allocateL2CacheBlock;
    ss_copyFromL1toL2;
    gg_deallocateL1CacheBlock;
  }

  transition({I, S, O, M, MM, M_W, MM_W}, L2_to_L1D) {
    ii_allocateL1DCacheBlock;
    tt_copyFromL2toL1;
    rr_deallocateL2CacheBlock;
  }

  transition({I, S, O, M, MM, M_W, MM_W}, L2_to_L1I) {
    pp_allocateL1ICacheBlock;
    tt_copyFromL2toL1;
    rr_deallocateL2CacheBlock;
  }

  // Locks
  transition({NP, I, S, O, M, MM, M_W, MM_W, IM, SM, OM, IS}, Own_Lock_or_Unlock) {
    l_popPersistentQueue;
  }

  // Transitions from NP
  transition(NP, Load, IS) {
    ii_allocateL1DCacheBlock;
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  transition(NP, Ifetch, IS) {
    pp_allocateL1ICacheBlock;
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  transition(NP, Store, IM) {
    ii_allocateL1DCacheBlock;
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  transition(NP, {Ack, Data_Shared, Data_Owner, Data_Owner_All_Tokens}) {
    b_bounceResponse;
    n_popResponseQueue;
  }

  transition(NP, {Transient_GETX, Transient_GETS}) {
    m_popRequestQueue;
  }

  transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
    l_popPersistentQueue;
  }

  // Transitions from Idle
  transition(I, Load, IS) {
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  transition(I, Ifetch, IS) {
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  transition(I, Store, IM) {
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  transition(I, L2_Replacement) {
    c_cleanReplacement; // Only needed in some cases
    rr_deallocateL2CacheBlock;
  }

  transition(I, Transient_GETX) {
    t_sendAckWithCollectedTokens;
    m_popRequestQueue;
  }

  transition(I, Transient_GETS) {
    m_popRequestQueue;
  }

  transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
    e_sendAckWithCollectedTokens;
    l_popPersistentQueue;
  }

  transition(I_L, {Persistent_GETX, Persistent_GETS}) {
    l_popPersistentQueue;
  }

  transition(I, Ack) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(I, Data_Shared, S) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(I, Data_Owner, O) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(I, Data_Owner_All_Tokens, M) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  // Transitions from Shared
  transition({S, SM, S_L, SM_L}, {Load, Ifetch}) {
    h_load_hit;
    k_popMandatoryQueue;
  }

  transition(S, Store, SM) {
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  transition(S, L2_Replacement, I) {
    c_cleanReplacement;
    rr_deallocateL2CacheBlock;
  }

  transition(S, Transient_GETX, I) {
    t_sendAckWithCollectedTokens;
    m_popRequestQueue;
  }

  transition(S, Transient_GETS) {
    m_popRequestQueue;
  }

  transition({S, S_L}, Persistent_GETX, I_L) {
    e_sendAckWithCollectedTokens;
    l_popPersistentQueue;
  }

  transition(S, Persistent_GETS, S_L) {
    f_sendAckWithAllButOneTokens;
    l_popPersistentQueue;
  }

  transition(S_L, Persistent_GETS) {
    l_popPersistentQueue;
  }

  transition(S, Ack) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(S, Data_Shared) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(S, Data_Owner, O) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(S, Data_Owner_All_Tokens, M) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  // Transitions from Owned
  transition({O, OM}, {Load, Ifetch}) {
    h_load_hit;
    k_popMandatoryQueue;
  }

  transition(O, Store, OM) {
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  transition(O, L2_Replacement, I) {
    cc_dirtyReplacement;
    rr_deallocateL2CacheBlock;
  }

  transition(O, Transient_GETX, I) {
    dd_sendDataWithAllTokens;
    m_popRequestQueue;
  }

  transition(O, Persistent_GETX, I_L) {
    ee_sendDataWithAllTokens;
    l_popPersistentQueue;
  }

  transition(O, Persistent_GETS, S_L) {
    ff_sendDataWithAllButOneTokens;
    l_popPersistentQueue;
  }

  transition(O, Transient_GETS) {
    d_sendDataWithToken;
    m_popRequestQueue;
  }

  transition(O, Ack) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(O, Ack_All_Tokens, M) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(O, Data_Shared) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(O, Data_Shared_All_Tokens, M) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  // Transitions from Modified
  transition({MM, MM_W}, {Load, Ifetch}) {
    h_load_hit;
    k_popMandatoryQueue;
  }

  transition({MM, MM_W}, Store) {
    hh_store_hit;
    k_popMandatoryQueue;
  }

  transition(MM, L2_Replacement, I) {
    cc_dirtyReplacement;
    rr_deallocateL2CacheBlock;
  }

  transition(MM, {Transient_GETX, Transient_GETS}, I) {
    dd_sendDataWithAllTokens;
    m_popRequestQueue;
  }

  transition(MM_W, {Transient_GETX, Transient_GETS}) { // Ignore the request
    m_popRequestQueue;
  }

  // Implement the migratory sharing optimization, even for persistent requests
  transition(MM, {Persistent_GETX, Persistent_GETS}, I_L) {
    ee_sendDataWithAllTokens;
    l_popPersistentQueue;
  }

  // Implement the migratory sharing optimization, even for persistent requests
  transition(MM_W, {Persistent_GETX, Persistent_GETS}, I_L) {
    s_deallocateTBE;
    ee_sendDataWithAllTokens;
    jj_unsetUseTimer;
    l_popPersistentQueue;
  }

  transition(MM_W, Use_Timeout, MM) {
    s_deallocateTBE;
    jj_unsetUseTimer;
  }

  // Transitions from Dirty Exclusive
  transition({M, M_W}, {Load, Ifetch}) {
    h_load_hit;
    k_popMandatoryQueue;
  }

  transition(M, Store, MM) {
    hh_store_hit;
    k_popMandatoryQueue;
  }

  transition(M_W, Store, MM_W) {
    hh_store_hit;
    k_popMandatoryQueue;
  }

  transition(M, L2_Replacement, I) {
    cc_dirtyReplacement;
    rr_deallocateL2CacheBlock;
  }

  transition(M, Transient_GETX, I) {
    dd_sendDataWithAllTokens;
    m_popRequestQueue;
  }

  transition(M, Transient_GETS, O) {
    d_sendDataWithToken;
    m_popRequestQueue;
  }

  transition(M_W,{Transient_GETX, Transient_GETS}) { // Ignore the request
    m_popRequestQueue;
  }

  transition(M, Persistent_GETX, I_L) {
    ee_sendDataWithAllTokens;
    l_popPersistentQueue;
  }

  transition(M, Persistent_GETS, S_L) {
    ff_sendDataWithAllButOneTokens;
    l_popPersistentQueue;
  }

  transition(M_W, Persistent_GETX, I_L) {
    s_deallocateTBE;
    ee_sendDataWithAllTokens;
    jj_unsetUseTimer;
    l_popPersistentQueue;
  }

  transition(M_W, Persistent_GETS, S_L) {
    s_deallocateTBE;
    ff_sendDataWithAllButOneTokens;
    jj_unsetUseTimer;
    l_popPersistentQueue;
  }

  transition(M_W, Use_Timeout, M) {
    s_deallocateTBE;
    jj_unsetUseTimer;
  }

  // Transient_GETX and Transient_GETS in transient states
  transition(OM, {Transient_GETX, Transient_GETS}) {
    m_popRequestQueue;  // Even if we have the data, we can pretend we don't have it yet.
  }

  transition(IS, Transient_GETX) {
    t_sendAckWithCollectedTokens;
    m_popRequestQueue;
  }

  transition(IS, Transient_GETS) {
    m_popRequestQueue;
  }

  transition(IS, {Persistent_GETX, Persistent_GETS}, IS_L) {
    e_sendAckWithCollectedTokens;
    l_popPersistentQueue;
  }

  transition(IS_L, {Persistent_GETX, Persistent_GETS}) {
    l_popPersistentQueue;
  }

  transition(IM, {Persistent_GETX, Persistent_GETS}, IM_L) {
    e_sendAckWithCollectedTokens;
    l_popPersistentQueue;
  }

  transition(IM_L, {Persistent_GETX, Persistent_GETS}) {
    l_popPersistentQueue;
  }

  transition({SM, SM_L}, Persistent_GETX, IM_L) {
    e_sendAckWithCollectedTokens;
    l_popPersistentQueue;
  }

  transition(SM, Persistent_GETS, SM_L) {
    f_sendAckWithAllButOneTokens;
    l_popPersistentQueue;
  }

  transition(SM_L, Persistent_GETS) {
    l_popPersistentQueue;
  }

  transition(OM, Persistent_GETX, IM_L) {
    ee_sendDataWithAllTokens;
    l_popPersistentQueue;
  }

  transition(OM, Persistent_GETS, SM_L) {
    ff_sendDataWithAllButOneTokens;
    l_popPersistentQueue;
  }

  // Transitions from IM/SM

  transition({IM, SM}, Ack) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(IM, Data_Shared, SM) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(IM, Data_Owner, OM) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(IM, Data_Owner_All_Tokens, MM_W) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    hh_store_hit;
    o_scheduleUseTimeout;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  transition(SM, Data_Shared) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(SM, Data_Owner, OM) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(SM, Data_Owner_All_Tokens, MM_W) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    hh_store_hit;
    o_scheduleUseTimeout;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  transition({IM, SM}, Transient_GETX, IM) {
    t_sendAckWithCollectedTokens;
    m_popRequestQueue;
  }

  transition({IM, SM}, Transient_GETS) {
    m_popRequestQueue;
  }

  transition({IM, SM}, Request_Timeout) {
    j_unsetReissueTimer;
    a_issueRequest;
  }

  // Transitions from OM

  transition(OM, Ack) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(OM, Ack_All_Tokens, MM_W) {
    q_updateTokensFromResponse;
    hh_store_hit;
    o_scheduleUseTimeout;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  transition(OM, Data_Shared) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(OM, Data_Shared_All_Tokens, MM_W) {
    w_assertIncomingDataAndCacheDataMatch;
    q_updateTokensFromResponse;
    hh_store_hit;
    o_scheduleUseTimeout;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  transition(OM, Request_Timeout) {
    j_unsetReissueTimer;
    a_issueRequest;
  }

  // Transitions from IS

  transition(IS, Ack) {
    q_updateTokensFromResponse;
    n_popResponseQueue;
  }

  transition(IS, Data_Shared, S) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_load_hit;
    s_deallocateTBE;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  transition(IS, Data_Owner, O) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_load_hit;
    s_deallocateTBE;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  transition(IS, Data_Owner_All_Tokens, M_W) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_load_hit;
    o_scheduleUseTimeout;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  transition(IS, Request_Timeout) {
    j_unsetReissueTimer;
    a_issueRequest;
  }

  // Transitions from I_L

  transition(I_L, Load, IS_L) {
    ii_allocateL1DCacheBlock;
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  transition(I_L, Ifetch, IS_L) {
    pp_allocateL1ICacheBlock;
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  transition(I_L, Store, IM_L) {
    ii_allocateL1DCacheBlock;
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }


  // Transitions from S_L

  transition(S_L, Store, SM_L) {
    i_allocateTBE;
    a_issueRequest;
    uu_profileMiss;
    k_popMandatoryQueue;
  }

  // Other transitions from *_L states

  transition({I_L, IM_L, IS_L, S_L, SM_L}, {Transient_GETS, Transient_GETX}) {
    m_popRequestQueue;
  }

  transition({I_L, IM_L, IS_L, S_L, SM_L}, Ack) {
    g_bounceResponseToStarver;
    n_popResponseQueue;
  }

  transition({I_L, IM_L, S_L, SM_L}, {Data_Shared, Data_Owner}) {
    g_bounceResponseToStarver;
    n_popResponseQueue;
  }

  transition({I_L, S_L}, Data_Owner_All_Tokens) {
    g_bounceResponseToStarver;
    n_popResponseQueue;
  }

  transition(IS_L, Request_Timeout) {
    j_unsetReissueTimer;
    a_issueRequest;
  }

  transition({IM_L, SM_L}, Request_Timeout) {
    j_unsetReissueTimer;
    a_issueRequest;
  }

  // Opportunisticly Complete the memory operation in the following
  // cases.  Note: these transitions could just use
  // g_bounceResponseToStarver, but if we have the data and tokens, we
  // might as well complete the memory request while we have the
  // chance (and then immediately forward on the data)

  transition(IM_L, Data_Owner_All_Tokens, I_L) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    hh_store_hit;
    ee_sendDataWithAllTokens;
    s_deallocateTBE;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  transition(SM_L, Data_Owner_All_Tokens, S_L) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    hh_store_hit;
    ff_sendDataWithAllButOneTokens;
    s_deallocateTBE;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  transition(IS_L, Data_Shared, I_L) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_load_hit;
    s_deallocateTBE;
    e_sendAckWithCollectedTokens;
    j_unsetReissueTimer;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  transition(IS_L, {Data_Owner, Data_Owner_All_Tokens}, I_L) {
    u_writeDataToCache;
    q_updateTokensFromResponse;
    h_load_hit;
    ee_sendDataWithAllTokens;
    s_deallocateTBE;
    j_unsetReissueTimer;
    n_popResponseQueue;
  }

  // Own_Lock_or_Unlock

  transition(I_L, Own_Lock_or_Unlock, I) {
    l_popPersistentQueue;
  }

  transition(S_L, Own_Lock_or_Unlock, S) {
    l_popPersistentQueue;
  }

  transition(IM_L, Own_Lock_or_Unlock, IM) {
    l_popPersistentQueue;
  }

  transition(IS_L, Own_Lock_or_Unlock, IS) {
    l_popPersistentQueue;
  }

  transition(SM_L, Own_Lock_or_Unlock, SM) {
    l_popPersistentQueue;
  }
}