/* * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood * Copyright (c) 2009 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer; * redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution; * neither the name of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * AMD's contributions to the MOESI hammer protocol do not constitute an * endorsement of its similarity to any AMD products. * * Authors: Milo Martin * Brad Beckmann */ machine(L1Cache, "AMD Hammer-like protocol") : Sequencer * sequencer, CacheMemory * L1IcacheMemory, CacheMemory * L1DcacheMemory, CacheMemory * L2cacheMemory, int cache_response_latency = 10, int issue_latency = 2, int l2_cache_hit_latency = 10, bool no_mig_atomic = true { // NETWORK BUFFERS MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false"; MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false"; MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false"; MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false"; MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false"; // STATES enumeration(State, desc="Cache states", default="L1Cache_State_I") { // Base states I, desc="Idle"; S, desc="Shared"; O, desc="Owned"; M, desc="Modified (dirty)"; MM, desc="Modified (dirty and locally modified)"; // Transient States IM, "IM", desc="Issued GetX"; SM, "SM", desc="Issued GetX, we still have an old copy of the line"; OM, "OM", desc="Issued GetX, received data"; ISM, "ISM", desc="Issued GetX, received data, waiting for all acks"; M_W, "M^W", desc="Issued GetS, received exclusive data"; MM_W, "MM^W", desc="Issued GetX, received exclusive data"; IS, "IS", desc="Issued GetS"; SS, "SS", desc="Issued GetS, received data, waiting for all acks"; OI, "OI", desc="Issued PutO, waiting for ack"; MI, "MI", desc="Issued PutX, waiting for ack"; II, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack"; IT, "IT", desc="Invalid block transferring to L1"; ST, "ST", desc="S block transferring to L1"; OT, "OT", desc="O block transferring to L1"; MT, "MT", desc="M block transferring to L1"; MMT, "MMT", desc="MM block transferring to L1"; } // EVENTS enumeration(Event, desc="Cache events") { Load, desc="Load request from the processor"; Ifetch, desc="I-fetch request from the processor"; Store, desc="Store request from the processor"; L2_Replacement, desc="L2 Replacement"; L1_to_L2, desc="L1 to L2 transfer"; Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer"; Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer"; Complete_L2_to_L1, desc="L2 to L1 transfer completed"; // Requests Other_GETX, desc="A GetX from another processor"; Other_GETS, desc="A GetS from another processor"; Merged_GETS, desc="A Merged GetS from another processor"; Other_GETS_No_Mig, desc="A GetS from another processor"; NC_DMA_GETS, desc="special GetS when only DMA exists"; Invalidate, desc="Invalidate block"; // Responses Ack, desc="Received an ack message"; Shared_Ack, desc="Received an ack message, responder has a shared copy"; Data, desc="Received a data message"; Shared_Data, desc="Received a data message, responder has a shared copy"; Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us"; Writeback_Ack, desc="Writeback O.K. from directory"; Writeback_Nack, desc="Writeback not O.K. from directory"; // Triggers All_acks, desc="Received all required data and message acks"; All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy"; } // TYPES // STRUCTURE DEFINITIONS MessageBuffer mandatoryQueue, ordered="false"; // CacheEntry structure(Entry, desc="...", interface="AbstractCacheEntry") { State CacheState, desc="cache state"; bool Dirty, desc="Is the data dirty (different than memory)?"; DataBlock DataBlk, desc="data for the block"; bool FromL2, default="false", desc="block just moved from L2"; bool AtomicAccessed, default="false", desc="block just moved from L2"; } // TBE fields structure(TBE, desc="...") { State TBEState, desc="Transient state"; DataBlock DataBlk, desc="data for the block, required for concurrent writebacks"; bool Dirty, desc="Is the data dirty (different than memory)?"; int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for"; bool Sharers, desc="On a GetS, did we find any other sharers in the system"; MachineID LastResponder, desc="last machine to send a response for this request"; MachineID CurOwner, desc="current owner of the block, used for UnblockS responses"; Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache"; Time ForwardRequestTime, default="0", desc="time the dir forwarded the request"; Time FirstResponseTime, default="0", desc="the time the first response was received"; } external_type(TBETable) { TBE lookup(Address); void allocate(Address); void deallocate(Address); bool isPresent(Address); } TBETable TBEs, template_hack=""; Entry getCacheEntry(Address addr), return_by_ref="yes" { if (L2cacheMemory.isTagPresent(addr)) { return static_cast(Entry, L2cacheMemory[addr]); } else if (L1DcacheMemory.isTagPresent(addr)) { return static_cast(Entry, L1DcacheMemory[addr]); } else { return static_cast(Entry, L1IcacheMemory[addr]); } } void changePermission(Address addr, AccessPermission permission) { if (L2cacheMemory.isTagPresent(addr)) { return L2cacheMemory.changePermission(addr, permission); } else if (L1DcacheMemory.isTagPresent(addr)) { return L1DcacheMemory.changePermission(addr, permission); } else { return L1IcacheMemory.changePermission(addr, permission); } } bool isCacheTagPresent(Address addr) { return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr)); } State getState(Address addr) { assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false); assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false); assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false); if(TBEs.isPresent(addr)) { return TBEs[addr].TBEState; } else if (isCacheTagPresent(addr)) { return getCacheEntry(addr).CacheState; } return State:I; } void setState(Address addr, State state) { assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false); assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false); assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false); if (TBEs.isPresent(addr)) { TBEs[addr].TBEState := state; } if (isCacheTagPresent(addr)) { getCacheEntry(addr).CacheState := state; // Set permission if ((state == State:MM) || (state == State:MM_W)) { changePermission(addr, AccessPermission:Read_Write); } else if (state == State:S || state == State:O || state == State:M || state == State:M_W || state == State:SM || state == State:ISM || state == State:OM || state == State:SS) { changePermission(addr, AccessPermission:Read_Only); } else { changePermission(addr, AccessPermission:Invalid); } } } Event mandatory_request_type_to_event(CacheRequestType type) { if (type == CacheRequestType:LD) { return Event:Load; } else if (type == CacheRequestType:IFETCH) { return Event:Ifetch; } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) { return Event:Store; } else { error("Invalid CacheRequestType"); } } GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) { if (machineIDToMachineType(sender) == MachineType:L1Cache) { // // NOTE direct local hits should not call this // return GenericMachineType:L1Cache_wCC; } else { return ConvertMachToGenericMach(machineIDToMachineType(sender)); } } GenericMachineType testAndClearLocalHit(Address addr) { if (getCacheEntry(addr).FromL2) { getCacheEntry(addr).FromL2 := false; return GenericMachineType:L2Cache; } else { return GenericMachineType:L1Cache; } } MessageBuffer triggerQueue, ordered="true"; // ** OUT_PORTS ** out_port(requestNetwork_out, RequestMsg, requestFromCache); out_port(responseNetwork_out, ResponseMsg, responseFromCache); out_port(unblockNetwork_out, ResponseMsg, unblockFromCache); out_port(triggerQueue_out, TriggerMsg, triggerQueue); // ** IN_PORTS ** // Trigger Queue in_port(triggerQueue_in, TriggerMsg, triggerQueue) { if (triggerQueue_in.isReady()) { peek(triggerQueue_in, TriggerMsg) { if (in_msg.Type == TriggerType:L2_to_L1) { trigger(Event:Complete_L2_to_L1, in_msg.Address); } else if (in_msg.Type == TriggerType:ALL_ACKS) { trigger(Event:All_acks, in_msg.Address); } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) { trigger(Event:All_acks_no_sharers, in_msg.Address); } else { error("Unexpected message"); } } } } // Nothing from the request network // Forward Network in_port(forwardToCache_in, RequestMsg, forwardToCache) { if (forwardToCache_in.isReady()) { peek(forwardToCache_in, RequestMsg, block_on="Address") { if (in_msg.Type == CoherenceRequestType:GETX) { trigger(Event:Other_GETX, in_msg.Address); } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) { trigger(Event:Merged_GETS, in_msg.Address); } else if (in_msg.Type == CoherenceRequestType:GETS) { if (machineCount(MachineType:L1Cache) > 1) { if (isCacheTagPresent(in_msg.Address)) { if (getCacheEntry(in_msg.Address).AtomicAccessed && no_mig_atomic) { trigger(Event:Other_GETS_No_Mig, in_msg.Address); } else { trigger(Event:Other_GETS, in_msg.Address); } } else { trigger(Event:Other_GETS, in_msg.Address); } } else { trigger(Event:NC_DMA_GETS, in_msg.Address); } } else if (in_msg.Type == CoherenceRequestType:INV) { trigger(Event:Invalidate, in_msg.Address); } else if (in_msg.Type == CoherenceRequestType:WB_ACK) { trigger(Event:Writeback_Ack, in_msg.Address); } else if (in_msg.Type == CoherenceRequestType:WB_NACK) { trigger(Event:Writeback_Nack, in_msg.Address); } else { error("Unexpected message"); } } } } // Response Network in_port(responseToCache_in, ResponseMsg, responseToCache) { if (responseToCache_in.isReady()) { peek(responseToCache_in, ResponseMsg, block_on="Address") { if (in_msg.Type == CoherenceResponseType:ACK) { trigger(Event:Ack, in_msg.Address); } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) { trigger(Event:Shared_Ack, in_msg.Address); } else if (in_msg.Type == CoherenceResponseType:DATA) { trigger(Event:Data, in_msg.Address); } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) { trigger(Event:Shared_Data, in_msg.Address); } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) { trigger(Event:Exclusive_Data, in_msg.Address); } else { error("Unexpected message"); } } } } // Nothing from the unblock network // Mandatory Queue in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") { if (mandatoryQueue_in.isReady()) { peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") { // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache if (in_msg.Type == CacheRequestType:IFETCH) { // ** INSTRUCTION ACCESS *** // Check to see if it is in the OTHER L1 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) { // The block is in the wrong L1, try to write it to the L2 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) { trigger(Event:L1_to_L2, in_msg.LineAddress); } else { trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress)); } } if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) { // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress); } else { if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) { // L1 does't have the line, but we have space for it in the L1 if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) { // L2 has it (maybe not with the right permissions) trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress); } else { // We have room, the L2 doesn't have it, so the L1 fetches the line trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress); } } else { // No room in the L1, so we need to make room if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.LineAddress))) { // The L2 has room, so we move the line from the L1 to the L2 trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.LineAddress)); } else { // The L2 does not have room, so we replace a line from the L2 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress))); } } } } else { // *** DATA ACCESS *** // Check to see if it is in the OTHER L1 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) { // The block is in the wrong L1, try to write it to the L2 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) { trigger(Event:L1_to_L2, in_msg.LineAddress); } else { trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress)); } } if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) { // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress); } else { if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) { // L1 does't have the line, but we have space for it in the L1 if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) { // L2 has it (maybe not with the right permissions) trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress); } else { // We have room, the L2 doesn't have it, so the L1 fetches the line trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress); } } else { // No room in the L1, so we need to make room if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.LineAddress))) { // The L2 has room, so we move the line from the L1 to the L2 trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.LineAddress)); } else { // The L2 does not have room, so we replace a line from the L2 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress))); } } } } } } } // ACTIONS action(a_issueGETS, "a", desc="Issue GETS") { enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) { out_msg.Address := address; out_msg.Type := CoherenceRequestType:GETS; out_msg.Requestor := machineID; out_msg.Destination.add(map_Address_to_Directory(address)); out_msg.MessageSize := MessageSizeType:Request_Control; out_msg.InitialRequestTime := get_time(); TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1) } } action(b_issueGETX, "b", desc="Issue GETX") { enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) { out_msg.Address := address; out_msg.Type := CoherenceRequestType:GETX; out_msg.Requestor := machineID; out_msg.Destination.add(map_Address_to_Directory(address)); out_msg.MessageSize := MessageSizeType:Request_Control; out_msg.InitialRequestTime := get_time(); TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1) } } action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") { peek(forwardToCache_in, RequestMsg) { enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE; out_msg.Sender := machineID; out_msg.Destination.add(in_msg.Requestor); out_msg.DataBlk := getCacheEntry(address).DataBlk; out_msg.Dirty := getCacheEntry(address).Dirty; if (in_msg.DirectedProbe) { out_msg.Acks := machineCount(MachineType:L1Cache); } else { out_msg.Acks := 2; } out_msg.MessageSize := MessageSizeType:Response_Data; out_msg.InitialRequestTime := in_msg.InitialRequestTime; out_msg.ForwardRequestTime := in_msg.ForwardRequestTime; } } } action(d_issuePUT, "d", desc="Issue PUT") { enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) { out_msg.Address := address; out_msg.Type := CoherenceRequestType:PUT; out_msg.Requestor := machineID; out_msg.Destination.add(map_Address_to_Directory(address)); out_msg.MessageSize := MessageSizeType:Writeback_Control; } } action(e_sendData, "e", desc="Send data from cache to requestor") { peek(forwardToCache_in, RequestMsg) { enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:DATA; out_msg.Sender := machineID; out_msg.Destination.add(in_msg.Requestor); out_msg.DataBlk := getCacheEntry(address).DataBlk; out_msg.Dirty := getCacheEntry(address).Dirty; if (in_msg.DirectedProbe) { out_msg.Acks := machineCount(MachineType:L1Cache); } else { out_msg.Acks := 2; } out_msg.MessageSize := MessageSizeType:Response_Data; out_msg.InitialRequestTime := in_msg.InitialRequestTime; out_msg.ForwardRequestTime := in_msg.ForwardRequestTime; } } } action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") { peek(forwardToCache_in, RequestMsg) { enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:DATA_SHARED; out_msg.Sender := machineID; out_msg.Destination.add(in_msg.Requestor); out_msg.DataBlk := getCacheEntry(address).DataBlk; DEBUG_EXPR(out_msg.DataBlk); out_msg.Dirty := getCacheEntry(address).Dirty; if (in_msg.DirectedProbe) { out_msg.Acks := machineCount(MachineType:L1Cache); } else { out_msg.Acks := 2; } out_msg.MessageSize := MessageSizeType:Response_Data; out_msg.InitialRequestTime := in_msg.InitialRequestTime; out_msg.ForwardRequestTime := in_msg.ForwardRequestTime; } } } action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors") { peek(forwardToCache_in, RequestMsg) { enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:DATA_SHARED; out_msg.Sender := machineID; out_msg.Destination := in_msg.MergedRequestors; out_msg.DataBlk := getCacheEntry(address).DataBlk; DEBUG_EXPR(out_msg.DataBlk); out_msg.Dirty := getCacheEntry(address).Dirty; out_msg.Acks := machineCount(MachineType:L1Cache); out_msg.MessageSize := MessageSizeType:Response_Data; out_msg.InitialRequestTime := in_msg.InitialRequestTime; out_msg.ForwardRequestTime := in_msg.ForwardRequestTime; } } } action(f_sendAck, "f", desc="Send ack from cache to requestor") { peek(forwardToCache_in, RequestMsg) { enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:ACK; out_msg.Sender := machineID; out_msg.Destination.add(in_msg.Requestor); out_msg.Acks := 1; assert(in_msg.DirectedProbe == false); out_msg.MessageSize := MessageSizeType:Response_Control; out_msg.InitialRequestTime := in_msg.InitialRequestTime; out_msg.ForwardRequestTime := in_msg.ForwardRequestTime; } } } action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") { peek(forwardToCache_in, RequestMsg) { enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:ACK_SHARED; out_msg.Sender := machineID; out_msg.Destination.add(in_msg.Requestor); out_msg.Acks := 1; assert(in_msg.DirectedProbe == false); out_msg.MessageSize := MessageSizeType:Response_Control; out_msg.InitialRequestTime := in_msg.InitialRequestTime; out_msg.ForwardRequestTime := in_msg.ForwardRequestTime; } } } action(g_sendUnblock, "g", desc="Send unblock to memory") { enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:UNBLOCK; out_msg.Sender := machineID; out_msg.Destination.add(map_Address_to_Directory(address)); out_msg.MessageSize := MessageSizeType:Unblock_Control; } } action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") { enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:UNBLOCKM; out_msg.Sender := machineID; out_msg.Destination.add(map_Address_to_Directory(address)); out_msg.MessageSize := MessageSizeType:Unblock_Control; } } action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") { enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:UNBLOCKS; out_msg.Sender := machineID; out_msg.CurOwner := TBEs[address].CurOwner; out_msg.Destination.add(map_Address_to_Directory(address)); out_msg.MessageSize := MessageSizeType:Unblock_Control; } } action(h_load_hit, "h", desc="Notify sequencer the load completed.") { DEBUG_EXPR(getCacheEntry(address).DataBlk); sequencer.readCallback(address, testAndClearLocalHit(address), getCacheEntry(address).DataBlk); } action(hx_external_load_hit, "hx", desc="load required external msgs") { DEBUG_EXPR(getCacheEntry(address).DataBlk); peek(responseToCache_in, ResponseMsg) { sequencer.readCallback(address, getNondirectHitMachType(in_msg.Address, in_msg.Sender), getCacheEntry(address).DataBlk, TBEs[address].InitialRequestTime, TBEs[address].ForwardRequestTime, TBEs[address].FirstResponseTime); } } action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") { DEBUG_EXPR(getCacheEntry(address).DataBlk); peek(mandatoryQueue_in, CacheMsg) { sequencer.writeCallback(address, testAndClearLocalHit(address), getCacheEntry(address).DataBlk); getCacheEntry(address).Dirty := true; if (in_msg.Type == CacheRequestType:ATOMIC) { getCacheEntry(address).AtomicAccessed := true; } } } action(sx_external_store_hit, "sx", desc="store required external msgs.") { DEBUG_EXPR(getCacheEntry(address).DataBlk); peek(responseToCache_in, ResponseMsg) { sequencer.writeCallback(address, getNondirectHitMachType(address, in_msg.Sender), getCacheEntry(address).DataBlk, TBEs[address].InitialRequestTime, TBEs[address].ForwardRequestTime, TBEs[address].FirstResponseTime); } getCacheEntry(address).Dirty := true; } action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") { DEBUG_EXPR(getCacheEntry(address).DataBlk); sequencer.writeCallback(address, getNondirectHitMachType(address, TBEs[address].LastResponder), getCacheEntry(address).DataBlk, TBEs[address].InitialRequestTime, TBEs[address].ForwardRequestTime, TBEs[address].FirstResponseTime); getCacheEntry(address).Dirty := true; } action(i_allocateTBE, "i", desc="Allocate TBE") { check_allocate(TBEs); TBEs.allocate(address); TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks TBEs[address].Dirty := getCacheEntry(address).Dirty; TBEs[address].Sharers := false; } action(j_popTriggerQueue, "j", desc="Pop trigger queue.") { triggerQueue_in.dequeue(); } action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") { mandatoryQueue_in.dequeue(); } action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") { forwardToCache_in.dequeue(); } action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") { peek(responseToCache_in, ResponseMsg) { assert(in_msg.Acks > 0); DEBUG_EXPR(TBEs[address].NumPendingMsgs); TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks; DEBUG_EXPR(TBEs[address].NumPendingMsgs); TBEs[address].LastResponder := in_msg.Sender; if (TBEs[address].InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) { assert(TBEs[address].InitialRequestTime == in_msg.InitialRequestTime); } if (in_msg.InitialRequestTime != zero_time()) { TBEs[address].InitialRequestTime := in_msg.InitialRequestTime; } if (TBEs[address].ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) { assert(TBEs[address].ForwardRequestTime == in_msg.ForwardRequestTime); } if (in_msg.ForwardRequestTime != zero_time()) { TBEs[address].ForwardRequestTime := in_msg.ForwardRequestTime; } if (TBEs[address].FirstResponseTime == zero_time()) { TBEs[address].FirstResponseTime := get_time(); } } } action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") { peek(responseToCache_in, ResponseMsg) { TBEs[address].CurOwner := in_msg.Sender; } } action(n_popResponseQueue, "n", desc="Pop response queue") { responseToCache_in.dequeue(); } action(ll_L2toL1Transfer, "ll", desc="") { enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) { out_msg.Address := address; out_msg.Type := TriggerType:L2_to_L1; } } action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") { if (TBEs[address].NumPendingMsgs == 0) { enqueue(triggerQueue_out, TriggerMsg) { out_msg.Address := address; if (TBEs[address].Sharers) { out_msg.Type := TriggerType:ALL_ACKS; } else { out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS; } } } } action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") { TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1; } action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") { TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs + 1; } action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") { peek(forwardToCache_in, RequestMsg) { enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:DATA; out_msg.Sender := machineID; out_msg.Destination.add(in_msg.Requestor); DEBUG_EXPR(out_msg.Destination); out_msg.DataBlk := TBEs[address].DataBlk; out_msg.Dirty := TBEs[address].Dirty; if (in_msg.DirectedProbe) { out_msg.Acks := machineCount(MachineType:L1Cache); } else { out_msg.Acks := 2; } out_msg.MessageSize := MessageSizeType:Response_Data; out_msg.InitialRequestTime := in_msg.InitialRequestTime; out_msg.ForwardRequestTime := in_msg.ForwardRequestTime; } } } action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers") { peek(forwardToCache_in, RequestMsg) { enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Type := CoherenceResponseType:DATA; out_msg.Sender := machineID; out_msg.Destination := in_msg.MergedRequestors; DEBUG_EXPR(out_msg.Destination); out_msg.DataBlk := TBEs[address].DataBlk; out_msg.Dirty := TBEs[address].Dirty; out_msg.Acks := machineCount(MachineType:L1Cache); out_msg.MessageSize := MessageSizeType:Response_Data; out_msg.InitialRequestTime := in_msg.InitialRequestTime; out_msg.ForwardRequestTime := in_msg.ForwardRequestTime; } } } action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") { enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Sender := machineID; out_msg.Destination.add(map_Address_to_Directory(address)); out_msg.Dirty := TBEs[address].Dirty; if (TBEs[address].Dirty) { out_msg.Type := CoherenceResponseType:WB_DIRTY; out_msg.DataBlk := TBEs[address].DataBlk; out_msg.MessageSize := MessageSizeType:Writeback_Data; } else { out_msg.Type := CoherenceResponseType:WB_CLEAN; // NOTE: in a real system this would not send data. We send // data here only so we can check it at the memory out_msg.DataBlk := TBEs[address].DataBlk; out_msg.MessageSize := MessageSizeType:Writeback_Control; } } } action(r_setSharerBit, "r", desc="We saw other sharers") { TBEs[address].Sharers := true; } action(s_deallocateTBE, "s", desc="Deallocate TBE") { TBEs.deallocate(address); } action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") { enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) { out_msg.Address := address; out_msg.Sender := machineID; out_msg.Destination.add(map_Address_to_Directory(address)); out_msg.DataBlk := TBEs[address].DataBlk; out_msg.Dirty := TBEs[address].Dirty; if (TBEs[address].Dirty) { out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY; out_msg.DataBlk := TBEs[address].DataBlk; out_msg.MessageSize := MessageSizeType:Writeback_Data; } else { out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN; // NOTE: in a real system this would not send data. We send // data here only so we can check it at the memory out_msg.DataBlk := TBEs[address].DataBlk; out_msg.MessageSize := MessageSizeType:Writeback_Control; } } } action(u_writeDataToCache, "u", desc="Write data to cache") { peek(responseToCache_in, ResponseMsg) { getCacheEntry(address).DataBlk := in_msg.DataBlk; getCacheEntry(address).Dirty := in_msg.Dirty; } } action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") { peek(responseToCache_in, ResponseMsg) { DEBUG_EXPR(getCacheEntry(address).DataBlk); DEBUG_EXPR(in_msg.DataBlk); assert(getCacheEntry(address).DataBlk == in_msg.DataBlk); getCacheEntry(address).DataBlk := in_msg.DataBlk; getCacheEntry(address).Dirty := in_msg.Dirty || getCacheEntry(address).Dirty; } } action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") { if (L1DcacheMemory.isTagPresent(address)) { L1DcacheMemory.deallocate(address); } else { L1IcacheMemory.deallocate(address); } } action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") { if (L1DcacheMemory.isTagPresent(address) == false) { L1DcacheMemory.allocate(address, new Entry); } } action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") { if (L1IcacheMemory.isTagPresent(address) == false) { L1IcacheMemory.allocate(address, new Entry); } } action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") { L2cacheMemory.allocate(address, new Entry); } action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") { L2cacheMemory.deallocate(address); } action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") { if (L1DcacheMemory.isTagPresent(address)) { static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1DcacheMemory[address]).Dirty; static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1DcacheMemory[address]).DataBlk; } else { static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1IcacheMemory[address]).Dirty; static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1IcacheMemory[address]).DataBlk; } } action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") { if (L1DcacheMemory.isTagPresent(address)) { static_cast(Entry, L1DcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty; static_cast(Entry, L1DcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk; static_cast(Entry, L1DcacheMemory[address]).FromL2 := true; } else { static_cast(Entry, L1IcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty; static_cast(Entry, L1IcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk; static_cast(Entry, L1IcacheMemory[address]).FromL2 := true; } } action(uu_profileMiss, "\u", desc="Profile the demand miss") { peek(mandatoryQueue_in, CacheMsg) { if (L1IcacheMemory.isTagPresent(address)) { L1IcacheMemory.profileMiss(in_msg); } else if (L1DcacheMemory.isTagPresent(address)) { L1DcacheMemory.profileMiss(in_msg); } if (L2cacheMemory.isTagPresent(address) == false) { L2cacheMemory.profileMiss(in_msg); } } } action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") { mandatoryQueue_in.recycle(); } //***************************************************** // TRANSITIONS //***************************************************** // Transitions for Load/Store/L2_Replacement from transient states transition({IM, SM, ISM, OM, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) { zz_recycleMandatoryQueue; } transition({M_W, MM_W}, {L2_Replacement}) { zz_recycleMandatoryQueue; } transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT}, {Load, Ifetch}) { zz_recycleMandatoryQueue; } transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT}, L1_to_L2) { zz_recycleMandatoryQueue; } transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) { // stall } // Transitions moving data between the L1 and L2 caches transition({I, S, O, M, MM}, L1_to_L2) { vv_allocateL2CacheBlock; ss_copyFromL1toL2; // Not really needed for state I gg_deallocateL1CacheBlock; } transition(I, Trigger_L2_to_L1D, IT) { ii_allocateL1DCacheBlock; tt_copyFromL2toL1; // Not really needed for state I uu_profileMiss; rr_deallocateL2CacheBlock; zz_recycleMandatoryQueue; ll_L2toL1Transfer; } transition(S, Trigger_L2_to_L1D, ST) { ii_allocateL1DCacheBlock; tt_copyFromL2toL1; uu_profileMiss; rr_deallocateL2CacheBlock; zz_recycleMandatoryQueue; ll_L2toL1Transfer; } transition(O, Trigger_L2_to_L1D, OT) { ii_allocateL1DCacheBlock; tt_copyFromL2toL1; uu_profileMiss; rr_deallocateL2CacheBlock; zz_recycleMandatoryQueue; ll_L2toL1Transfer; } transition(M, Trigger_L2_to_L1D, MT) { ii_allocateL1DCacheBlock; tt_copyFromL2toL1; uu_profileMiss; rr_deallocateL2CacheBlock; zz_recycleMandatoryQueue; ll_L2toL1Transfer; } transition(MM, Trigger_L2_to_L1D, MMT) { ii_allocateL1DCacheBlock; tt_copyFromL2toL1; uu_profileMiss; rr_deallocateL2CacheBlock; zz_recycleMandatoryQueue; ll_L2toL1Transfer; } transition(I, Trigger_L2_to_L1I, IT) { jj_allocateL1ICacheBlock; tt_copyFromL2toL1; // Not really needed for state I uu_profileMiss; rr_deallocateL2CacheBlock; zz_recycleMandatoryQueue; ll_L2toL1Transfer; } transition(S, Trigger_L2_to_L1I, ST) { jj_allocateL1ICacheBlock; tt_copyFromL2toL1; uu_profileMiss; rr_deallocateL2CacheBlock; zz_recycleMandatoryQueue; ll_L2toL1Transfer; } transition(O, Trigger_L2_to_L1I, OT) { jj_allocateL1ICacheBlock; tt_copyFromL2toL1; uu_profileMiss; rr_deallocateL2CacheBlock; zz_recycleMandatoryQueue; ll_L2toL1Transfer; } transition(M, Trigger_L2_to_L1I, MT) { jj_allocateL1ICacheBlock; tt_copyFromL2toL1; uu_profileMiss; rr_deallocateL2CacheBlock; zz_recycleMandatoryQueue; ll_L2toL1Transfer; } transition(MM, Trigger_L2_to_L1I, MMT) { jj_allocateL1ICacheBlock; tt_copyFromL2toL1; uu_profileMiss; rr_deallocateL2CacheBlock; zz_recycleMandatoryQueue; ll_L2toL1Transfer; } transition(IT, Complete_L2_to_L1, I) { j_popTriggerQueue; } transition(ST, Complete_L2_to_L1, S) { j_popTriggerQueue; } transition(OT, Complete_L2_to_L1, O) { j_popTriggerQueue; } transition(MT, Complete_L2_to_L1, M) { j_popTriggerQueue; } transition(MMT, Complete_L2_to_L1, MM) { j_popTriggerQueue; } // Transitions from Idle transition(I, Load, IS) { ii_allocateL1DCacheBlock; i_allocateTBE; a_issueGETS; uu_profileMiss; k_popMandatoryQueue; } transition(I, Ifetch, IS) { jj_allocateL1ICacheBlock; i_allocateTBE; a_issueGETS; uu_profileMiss; k_popMandatoryQueue; } transition(I, Store, IM) { ii_allocateL1DCacheBlock; i_allocateTBE; b_issueGETX; uu_profileMiss; k_popMandatoryQueue; } transition(I, L2_Replacement) { rr_deallocateL2CacheBlock; } transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) { f_sendAck; l_popForwardQueue; } // Transitions from Shared transition({S, SM, ISM}, {Load, Ifetch}) { h_load_hit; k_popMandatoryQueue; } transition(S, Store, SM) { i_allocateTBE; b_issueGETX; uu_profileMiss; k_popMandatoryQueue; } transition(S, L2_Replacement, I) { rr_deallocateL2CacheBlock; } transition(S, {Other_GETX, Invalidate}, I) { f_sendAck; l_popForwardQueue; } transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) { ff_sendAckShared; l_popForwardQueue; } // Transitions from Owned transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) { h_load_hit; k_popMandatoryQueue; } transition(O, Store, OM) { i_allocateTBE; b_issueGETX; p_decrementNumberOfMessagesByOne; uu_profileMiss; k_popMandatoryQueue; } transition(O, L2_Replacement, OI) { i_allocateTBE; d_issuePUT; rr_deallocateL2CacheBlock; } transition(O, {Other_GETX, Invalidate}, I) { e_sendData; l_popForwardQueue; } transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) { ee_sendDataShared; l_popForwardQueue; } transition(O, Merged_GETS) { em_sendDataSharedMultiple; l_popForwardQueue; } // Transitions from Modified transition(MM, {Load, Ifetch}) { h_load_hit; k_popMandatoryQueue; } transition(MM, Store) { hh_store_hit; k_popMandatoryQueue; } transition(MM, L2_Replacement, MI) { i_allocateTBE; d_issuePUT; rr_deallocateL2CacheBlock; } transition(MM, {Other_GETX, Invalidate}, I) { c_sendExclusiveData; l_popForwardQueue; } transition(MM, Other_GETS, I) { c_sendExclusiveData; l_popForwardQueue; } transition(MM, NC_DMA_GETS) { c_sendExclusiveData; l_popForwardQueue; } transition(MM, Other_GETS_No_Mig, O) { ee_sendDataShared; l_popForwardQueue; } transition(MM, Merged_GETS, O) { em_sendDataSharedMultiple; l_popForwardQueue; } // Transitions from Dirty Exclusive transition(M, {Load, Ifetch}) { h_load_hit; k_popMandatoryQueue; } transition(M, Store, MM) { hh_store_hit; k_popMandatoryQueue; } transition(M, L2_Replacement, MI) { i_allocateTBE; d_issuePUT; rr_deallocateL2CacheBlock; } transition(M, {Other_GETX, Invalidate}, I) { c_sendExclusiveData; l_popForwardQueue; } transition(M, {Other_GETS, Other_GETS_No_Mig}, O) { ee_sendDataShared; l_popForwardQueue; } transition(M, NC_DMA_GETS) { ee_sendDataShared; l_popForwardQueue; } transition(M, Merged_GETS, O) { em_sendDataSharedMultiple; l_popForwardQueue; } // Transitions from IM transition(IM, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) { f_sendAck; l_popForwardQueue; } transition(IM, Ack) { m_decrementNumberOfMessages; o_checkForCompletion; n_popResponseQueue; } transition(IM, Data, ISM) { u_writeDataToCache; m_decrementNumberOfMessages; o_checkForCompletion; n_popResponseQueue; } transition(IM, Exclusive_Data, MM_W) { u_writeDataToCache; m_decrementNumberOfMessages; o_checkForCompletion; sx_external_store_hit; n_popResponseQueue; } // Transitions from SM transition(SM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) { ff_sendAckShared; l_popForwardQueue; } transition(SM, {Other_GETX, Invalidate}, IM) { f_sendAck; l_popForwardQueue; } transition(SM, Ack) { m_decrementNumberOfMessages; o_checkForCompletion; n_popResponseQueue; } transition(SM, Data, ISM) { v_writeDataToCacheVerify; m_decrementNumberOfMessages; o_checkForCompletion; n_popResponseQueue; } // Transitions from ISM transition(ISM, Ack) { m_decrementNumberOfMessages; o_checkForCompletion; n_popResponseQueue; } transition(ISM, All_acks_no_sharers, MM) { sxt_trig_ext_store_hit; gm_sendUnblockM; s_deallocateTBE; j_popTriggerQueue; } // Transitions from OM transition(OM, {Other_GETX, Invalidate}, IM) { e_sendData; pp_incrementNumberOfMessagesByOne; l_popForwardQueue; } transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) { ee_sendDataShared; l_popForwardQueue; } transition(OM, Merged_GETS) { em_sendDataSharedMultiple; l_popForwardQueue; } transition(OM, Ack) { m_decrementNumberOfMessages; o_checkForCompletion; n_popResponseQueue; } transition(OM, {All_acks, All_acks_no_sharers}, MM) { sxt_trig_ext_store_hit; gm_sendUnblockM; s_deallocateTBE; j_popTriggerQueue; } // Transitions from IS transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) { f_sendAck; l_popForwardQueue; } transition(IS, Ack) { m_decrementNumberOfMessages; o_checkForCompletion; n_popResponseQueue; } transition(IS, Shared_Ack) { m_decrementNumberOfMessages; r_setSharerBit; o_checkForCompletion; n_popResponseQueue; } transition(IS, Data, SS) { u_writeDataToCache; m_decrementNumberOfMessages; o_checkForCompletion; hx_external_load_hit; uo_updateCurrentOwner; n_popResponseQueue; } transition(IS, Exclusive_Data, M_W) { u_writeDataToCache; m_decrementNumberOfMessages; o_checkForCompletion; hx_external_load_hit; n_popResponseQueue; } transition(IS, Shared_Data, SS) { u_writeDataToCache; r_setSharerBit; m_decrementNumberOfMessages; o_checkForCompletion; hx_external_load_hit; uo_updateCurrentOwner; n_popResponseQueue; } // Transitions from SS transition(SS, Ack) { m_decrementNumberOfMessages; o_checkForCompletion; n_popResponseQueue; } transition(SS, Shared_Ack) { m_decrementNumberOfMessages; r_setSharerBit; o_checkForCompletion; n_popResponseQueue; } transition(SS, All_acks, S) { gs_sendUnblockS; s_deallocateTBE; j_popTriggerQueue; } transition(SS, All_acks_no_sharers, S) { // Note: The directory might still be the owner, so that is why we go to S gs_sendUnblockS; s_deallocateTBE; j_popTriggerQueue; } // Transitions from MM_W transition(MM_W, Store) { hh_store_hit; k_popMandatoryQueue; } transition(MM_W, Ack) { m_decrementNumberOfMessages; o_checkForCompletion; n_popResponseQueue; } transition(MM_W, All_acks_no_sharers, MM) { gm_sendUnblockM; s_deallocateTBE; j_popTriggerQueue; } // Transitions from M_W transition(M_W, Store, MM_W) { hh_store_hit; k_popMandatoryQueue; } transition(M_W, Ack) { m_decrementNumberOfMessages; o_checkForCompletion; n_popResponseQueue; } transition(M_W, All_acks_no_sharers, M) { gm_sendUnblockM; s_deallocateTBE; j_popTriggerQueue; } // Transitions from OI/MI transition({OI, MI}, {Other_GETX, Invalidate}, II) { q_sendDataFromTBEToCache; l_popForwardQueue; } transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) { q_sendDataFromTBEToCache; l_popForwardQueue; } transition({OI, MI}, Merged_GETS, OI) { qm_sendDataFromTBEToCache; l_popForwardQueue; } transition(MI, Writeback_Ack, I) { t_sendExclusiveDataFromTBEToMemory; s_deallocateTBE; l_popForwardQueue; } transition(OI, Writeback_Ack, I) { qq_sendDataFromTBEToMemory; s_deallocateTBE; l_popForwardQueue; } // Transitions from II transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) { f_sendAck; l_popForwardQueue; } transition(II, Writeback_Ack, I) { g_sendUnblock; s_deallocateTBE; l_popForwardQueue; } transition(II, Writeback_Nack, I) { s_deallocateTBE; l_popForwardQueue; } }