summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDan Gibson <gibson@cs.wisc.edu>2009-05-11 10:38:45 -0700
committerDan Gibson <gibson@cs.wisc.edu>2009-05-11 10:38:45 -0700
commitd8c592a05d884560b3cbbe04d9e1ed9cf6575eaa (patch)
tree6902f66ea067a5f2a63a6f149c6be0ddc6777337 /src
parent6ceaffd7240993761785c0d2f5e4f92bd94fbf32 (diff)
downloadgem5-d8c592a05d884560b3cbbe04d9e1ed9cf6575eaa.tar.xz
ruby: remove unnecessary code.
1) Removing files from the ruby build left some unresovled symbols. Those have been fixed. 2) Most of the dependencies on Simics data types and the simics interface files have been removed. 3) Almost all mention of opal is gone. 4) Huge chunks of LogTM are now gone. 5) Handling 1-4 left ~hundreds of unresolved references, which were fixed, yielding a snowball effect (and the massive size of this delta).
Diffstat (limited to 'src')
-rw-r--r--src/mem/gems_common/ioutil/FakeSimicsDataTypes.hh (renamed from src/mem/ruby/FakeSimicsDataTypes.hh)0
-rw-r--r--src/mem/gems_common/ioutil/confio.cc2
-rw-r--r--src/mem/gems_common/ioutil/initvar.cc40
-rw-r--r--src/mem/gems_common/ioutil/initvar.hh12
-rw-r--r--src/mem/protocol/LogTM.sm83
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm1800
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm2123
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-mem.sm166
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory-msg.sm153
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory.slicc7
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm250
-rw-r--r--src/mem/protocol/MESI_CMP_filter_directory_m.slicc7
-rw-r--r--src/mem/ruby/config/RubyConfig.cc2
-rw-r--r--src/mem/ruby/config/config.hh2
-rw-r--r--src/mem/ruby/config/rubyconfig.defaults2
-rw-r--r--src/mem/ruby/config/tester.defaults2
-rw-r--r--src/mem/ruby/init.cc244
-rw-r--r--src/mem/ruby/init.hh10
-rw-r--r--src/mem/ruby/interfaces/OpalInterface.cc446
-rw-r--r--src/mem/ruby/interfaces/OpalInterface.hh214
-rw-r--r--src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh3
-rw-r--r--src/mem/ruby/network/simple/Topology.cc3
-rw-r--r--src/mem/ruby/profiler/Profiler.cc1319
-rw-r--r--src/mem/ruby/profiler/Profiler.hh593
-rw-r--r--src/mem/ruby/recorder/CacheRecorder.cc36
-rw-r--r--src/mem/ruby/simics/commands.cc867
-rw-r--r--src/mem/ruby/simics/commands.hh106
-rw-r--r--src/mem/ruby/simics/interface.cc935
-rw-r--r--src/mem/ruby/simics/interface.hh152
-rw-r--r--src/mem/ruby/simics/simics_api_dummy.c105
-rw-r--r--src/mem/ruby/system/DirectoryMemory.cc10
-rw-r--r--src/mem/ruby/system/PerfectCacheMemory.hh1
-rw-r--r--src/mem/ruby/system/PersistentArbiter.hh1
-rw-r--r--src/mem/ruby/system/Sequencer.cc155
-rw-r--r--src/mem/ruby/system/System.cc10
-rw-r--r--src/mem/ruby/system/System.hh3
-rw-r--r--src/mem/ruby/tester/DeterministicDriver.cc4
-rw-r--r--src/mem/ruby/tester/RaceyDriver.cc4
-rw-r--r--src/mem/ruby/tester/SyntheticDriver.cc4
-rw-r--r--src/mem/ruby/tester/Tester.cc4
-rw-r--r--src/mem/ruby/tester/main.cc4
-rw-r--r--src/mem/ruby/tester/test_framework.cc7
42 files changed, 484 insertions, 9407 deletions
diff --git a/src/mem/ruby/FakeSimicsDataTypes.hh b/src/mem/gems_common/ioutil/FakeSimicsDataTypes.hh
index b6fcda95c..b6fcda95c 100644
--- a/src/mem/ruby/FakeSimicsDataTypes.hh
+++ b/src/mem/gems_common/ioutil/FakeSimicsDataTypes.hh
diff --git a/src/mem/gems_common/ioutil/confio.cc b/src/mem/gems_common/ioutil/confio.cc
index ff1e25f12..54a96527e 100644
--- a/src/mem/gems_common/ioutil/confio.cc
+++ b/src/mem/gems_common/ioutil/confio.cc
@@ -59,8 +59,6 @@ using namespace std;
// #endif
// };
-#include "FakeSimicsDataTypes.hh"
-
#include "confio.hh"
/*------------------------------------------------------------------------*/
diff --git a/src/mem/gems_common/ioutil/initvar.cc b/src/mem/gems_common/ioutil/initvar.cc
index 8a560176a..b6b7ff9e0 100644
--- a/src/mem/gems_common/ioutil/initvar.cc
+++ b/src/mem/gems_common/ioutil/initvar.cc
@@ -70,8 +70,6 @@ using namespace std;
// #endif
// };
-#include "FakeSimicsDataTypes.hh"
-
#include "Global.hh"
#include "confio.hh"
@@ -124,8 +122,7 @@ static set_error_t initvar_set_attr( void *ptr, void *obj,
initvar_t::initvar_t( const char *name, const char *relativeIncludePath,
const char *initializingString,
void (*allocate_fn)(void),
- void (*my_generate_fn)(void),
- get_attr_t my_get_attr, set_attr_t my_set_attr )
+ void (*my_generate_fn)(void) )
{
m_is_init = false;
m_name = (char *) malloc( sizeof(char)*(strlen( name ) + 2) );
@@ -135,8 +132,6 @@ initvar_t::initvar_t( const char *name, const char *relativeIncludePath,
strcpy( m_rel_include_path, relativeIncludePath );
m_allocate_f = allocate_fn;
m_generate_values_f = my_generate_fn;
- m_my_get_attr = my_get_attr;
- m_my_set_attr = my_set_attr;
initvar_t::m_inst = this;
init_config_reader( initializingString );
@@ -247,7 +242,7 @@ void initvar_t::checkInitialization( void )
}
//**************************************************************************
-attr_value_t initvar_t::dispatch_get( void *id, void *obj,
+int initvar_t::dispatch_get( void *id, void *obj,
attr_value_t *idx )
{
const char *command = (const char *) id;
@@ -256,13 +251,11 @@ attr_value_t initvar_t::dispatch_get( void *id, void *obj,
DEBUG_OUT(" : you must initialize %s with a configuration file first.\n", m_name);
DEBUG_OUT(" : use the command \'%s0.init\'\n", m_name);
- attr_value_t ret;
- ret.kind = Sim_Val_Invalid;
- ret.u.integer = 0;
- return ret;
+ return 0;
}
- return ((*m_my_get_attr)(id, obj, idx));
+ std::cerr << __FILE__ << "(" << __LINE__ << "): Not implmented." << std::endl;
+ return 0;
}
@@ -331,7 +324,9 @@ set_error_t initvar_t::dispatch_set( void *id, void *obj,
return Sim_Set_Illegal_Value;
}
- return (*m_my_set_attr)( id, obj, val, idx );
+
+ std::cerr << __FILE__ << "(" << __LINE__ << "): Not implmented." << std::endl;
+ return Sim_Set_Illegal_Value;
}
/*------------------------------------------------------------------------*/
@@ -588,22 +583,3 @@ const char *initvar_t::get_config_name( void )
return m_config_filename;
}
-/*------------------------------------------------------------------------*/
-/* Global functions */
-/*------------------------------------------------------------------------*/
-
-//**************************************************************************
-attr_value_t initvar_dispatch_get( void *id, void *obj,
- attr_value_t *idx )
-{
- initvar_t *init_obj = initvar_t::m_inst;
- return (init_obj->dispatch_get( id, obj, idx ));
-}
-
-//**************************************************************************
-set_error_t initvar_dispatch_set( void *id, void *obj,
- attr_value_t *val, attr_value_t *idx )
-{
- initvar_t *init_obj = initvar_t::m_inst;
- return (init_obj->dispatch_set( id, obj, val, idx ));
-}
diff --git a/src/mem/gems_common/ioutil/initvar.hh b/src/mem/gems_common/ioutil/initvar.hh
index d88f80c32..f872e1cd8 100644
--- a/src/mem/gems_common/ioutil/initvar.hh
+++ b/src/mem/gems_common/ioutil/initvar.hh
@@ -70,8 +70,8 @@ public:
initvar_t( const char *name, const char *relativeIncludePath,
const char *initializingString,
void (*allocate_fn)(void),
- void (*my_generate_fn)(void),
- get_attr_t my_get_attr, set_attr_t my_set_attr );
+ void (*my_generate_fn)(void)
+ );
/**
* Destructor: frees object.
@@ -96,8 +96,8 @@ public:
const char *get_config_name( void );
/// calls through to the get_attr function, if object is initialized
- attr_value_t dispatch_get( void *id, void *obj,
- attr_value_t *idx );
+ int dispatch_get( void *id, void *obj,
+ attr_value_t *idx );
/** adds initialization attributes, calls through to the set_attr function,
* if object is initialized.
@@ -144,10 +144,6 @@ protected:
/// a pointer to the generate values function
void (*m_generate_values_f)(void);
- /// a pointer to the session get function
- get_attr_t m_my_get_attr;
- /// a pointer to the session set function
- set_attr_t m_my_set_attr;
};
diff --git a/src/mem/protocol/LogTM.sm b/src/mem/protocol/LogTM.sm
deleted file mode 100644
index 02c6656ac..000000000
--- a/src/mem/protocol/LogTM.sm
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- This file has been modified by Kevin Moore and Dan Nussbaum of the
- Scalable Systems Research Group at Sun Microsystems Laboratories
- (http://research.sun.com/scalable/) to support the Adaptive
- Transactional Memory Test Platform (ATMTP).
-
- Please send email to atmtp-interest@sun.com with feedback, questions, or
- to request future announcements about ATMTP.
-
- ----------------------------------------------------------------------
-
- File modification date: 2008-02-23
-
- ----------------------------------------------------------------------
-*/
-
-external_type(PartialAddressFilter, desc="Bloom filter for tracking transaction locks."){
- bool isRead(Address);
- bool isWrite(Address);
-
- void addEntry(Address, bool);
- void clear();
-}
-
-external_type(TransactionInterfaceManager) {
- bool shouldNackLoad(Address, uint64, MachineID);
- bool shouldNackStore(Address, uint64, MachineID);
- bool checkReadWriteSignatures(Address);
- bool checkWriteSignatures(Address);
-
- void notifySendNack(Address, uint64, MachineID);
- void notifyReceiveNack(int, Address, uint64, uint64, MachineID);
- void notifyReceiveNackFinal(int, Address);
-
- uint64 getTimestamp(int);
- uint64 getOldestTimestamp();
-
- bool existGlobalLoadConflict(int, Address);
- bool existGlobalStoreConflict(int, Address);
-
- void profileTransactionMiss(int, bool);
-
- void xactReplacement(Address);
-
- /* DEPRECATED */
- bool existLoadConflict(Address);
- bool existStoreConflict(Address);
- bool isInReadFilterSummary(Address);
- bool isInWriteFilterSummary(Address);
- bool isTokenOwner(int);
- void setAbortFlag(int, Address);
- void setEnemyProcessor(int, MachineID);
- bool isRemoteOlder(uint64);
-
-}
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm b/src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm
deleted file mode 100644
index 468cf3c1c..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory-L1cache.sm
+++ /dev/null
@@ -1,1800 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- This file has been modified by Kevin Moore and Dan Nussbaum of the
- Scalable Systems Research Group at Sun Microsystems Laboratories
- (http://research.sun.com/scalable/) to support the Adaptive
- Transactional Memory Test Platform (ATMTP).
-
- Please send email to atmtp-interest@sun.com with feedback, questions, or
- to request future announcements about ATMTP.
-
- ----------------------------------------------------------------------
-
- File modification date: 2008-02-23
-
- ----------------------------------------------------------------------
-*/
-
-/*
- * $Id$
- *
- */
-
-
-machine(L1Cache, "MESI Directory L1 Cache CMP") {
-
- // NODE L1 CACHE
- // From this node's L1 cache TO the network
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
- // a local L1 -> this L2 bank
- MessageBuffer responseFromL1Cache, network="To", virtual_network="3", ordered="false";
- MessageBuffer unblockFromL1Cache, network="To", virtual_network="4", ordered="false";
-
-
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false";
- // a L2 bank -> this L1
- MessageBuffer responseToL1Cache, network="From", virtual_network="3", ordered="false";
-
- // STATES
- enumeration(State, desc="Cache states", default="L1Cache_State_I") {
- // Base states
- NP, desc="Not present in either cache";
- I, desc="a L1 cache entry Idle";
- S, desc="a L1 cache entry Shared";
- E, desc="a L1 cache entry Exclusive";
- M, desc="a L1 cache entry Modified", format="!b";
-
- // Transient States
- IS, desc="L1 idle, issued GETS, have not seen response yet";
- IM, desc="L1 idle, issued GETX, have not seen response yet";
- SM, desc="L1 idle, issued GETX, have not seen response yet";
- IS_I, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
- IS_S, desc="L1 idle, issued GETS, L2 sent us data but responses from filters have not arrived";
- IS_E, desc="L1 idle, issued GETS, L2 sent us exclusive data, but responses from filters have not arrived";
- IM_M, desc="L1 idle, issued GETX, L2 sent us data, but responses from filters have not arrived";
-
- M_I, desc="L1 replacing, waiting for ACK";
- E_I, desc="L1 replacing, waiting for ACK";
-
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- // L1 events
- Load, desc="Load request from the home processor";
- Ifetch, desc="I-fetch request from the home processor";
- Store, desc="Store request from the home processor";
-
- Replace, desc="lower level cache replaced this line, also need to invalidate to maintain inclusion";
- Inv, desc="Invalidate request from L2 bank";
- Inv_X, desc="Invalidate request from L2 bank, trans CONFLICT";
-
- // internal generated request
- L1_Replacement, desc="L1 Replacement", format="!r";
- L1_Replacement_XACT, desc="L1 Replacement of trans. data", format="!r";
-
- // other requests
- Fwd_GETX, desc="GETX from other processor";
- Fwd_GETS, desc="GETS from other processor";
- Fwd_GET_INSTR, desc="GET_INSTR from other processor";
-
- //Data, desc="Data for processor";
- L2_Data, desc="Data for processor, from L2";
- L2_Data_all_Acks, desc="Data for processor, from L2, all acks";
- L2_Exclusive_Data, desc="Exlusive Data for processor, from L2";
- L2_Exclusive_Data_all_Acks, desc="Exlusive Data for processor, from L2, all acks";
- DataS_fromL1, desc="data for GETS request, need to unblock directory";
- Data_all_Acks, desc="Data for processor, all acks";
-
- Ack, desc="Ack for processor";
- Ack_all, desc="Last ack for processor";
-
- WB_Ack, desc="Ack for replacement";
-
- // Transactional responses/requests
- Nack, desc="Nack for processor";
- Nack_all, desc="Last Nack for processor";
- Check_Write_Filter, desc="Check the write filter";
- Check_Read_Write_Filter, desc="Check the read and write filters";
-
- //Fwd_GETS_T, desc="A GetS from another processor, part of a trans, but not a conflict";
- Fwd_GETS_X, desc="A GetS from another processor, trans CONFLICT";
- Fwd_GETX_X, desc="A GetS from another processor, trans CONFLICT";
- Fwd_GET_INSTR_X, desc="A GetInstr from another processor, trans CONFLICT";
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
- State CacheState, desc="cache state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, default="false", desc="data is dirty";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- Address Address, desc="Line address for this TBE";
- Address PhysicalAddress, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Buffer for the data block";
- bool Dirty, default="false", desc="data is dirty";
- bool isPrefetch, desc="Set if this was caused by a prefetch";
- int pendingAcks, default="0", desc="number of pending acks";
- int ThreadID, default="0", desc="SMT thread issuing the request";
-
- bool RemoveLastOwnerFromDir, default="false", desc="The forwarded data was being replaced";
- MachineID LastOwnerID, desc="What component forwarded (last owned) the data"; // For debugging
-
- // for Transactional Memory
- uint64 Timestamp, default="0", desc="Timestamp of request";
- bool nack, default="false", desc="has this request been nacked?";
- NetDest Nackers, desc="The nodes which sent a NACK to us";
- }
-
- external_type(CacheMemory) {
- bool cacheAvail(Address);
- Address cacheProbe(Address);
- void allocate(Address);
- void deallocate(Address);
- Entry lookup(Address);
- void changePermission(Address, AccessPermission);
- bool isTagPresent(Address);
- }
-
- external_type(TBETable) {
- TBE lookup(Address);
- void allocate(Address);
- void deallocate(Address);
- bool isPresent(Address);
- }
-
- TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
-
- CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
- CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
-
-
- MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
-
- Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
- TransactionInterfaceManager xact_mgr, abstract_chip_ptr="true", constructor_hack="i";
-
- // triggerQueue used to indicate when all acks/nacks have been received
- MessageBuffer triggerQueue, ordered="false";
-
- int cache_state_to_int(State state);
-
- // inclusive cache returns L1 entries only
- Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
- if (L1DcacheMemory.isTagPresent(addr)) {
- return L1DcacheMemory[addr];
- } else {
- return L1IcacheMemory[addr];
- }
- }
-
- void changeL1Permission(Address addr, AccessPermission permission) {
- if (L1DcacheMemory.isTagPresent(addr)) {
- return L1DcacheMemory.changePermission(addr, permission);
- } else if(L1IcacheMemory.isTagPresent(addr)) {
- return L1IcacheMemory.changePermission(addr, permission);
- } else {
- error("cannot change permission, L1 block not present");
- }
- }
-
- bool isL1CacheTagPresent(Address addr) {
- return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
- }
-
- State getState(Address addr) {
- if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
- DEBUG_EXPR(id);
- DEBUG_EXPR(addr);
- }
- assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
-
- if(L1_TBEs.isPresent(addr)) {
- return L1_TBEs[addr].TBEState;
- } else if (isL1CacheTagPresent(addr)) {
- return getL1CacheEntry(addr).CacheState;
- }
- return State:NP;
- }
-
-
- // For detecting read/write conflicts on requests from remote processors
- bool shouldNackLoad(Address addr, uint64 remote_timestamp, MachineID remote_id){
- return xact_mgr.shouldNackLoad(addr, remote_timestamp, remote_id);
- }
-
- bool shouldNackStore(Address addr, uint64 remote_timestamp, MachineID remote_id){
- return xact_mgr.shouldNackStore(addr, remote_timestamp, remote_id);
- }
-
- // For querying read/write signatures on current processor
- bool checkReadWriteSignatures(Address addr){
- return xact_mgr.checkReadWriteSignatures(addr);
- }
-
- bool checkWriteSignatures(Address addr){
- return xact_mgr.checkWriteSignatures(addr);
- }
-
- void setState(Address addr, State state) {
- assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
-
- // MUST CHANGE
- if(L1_TBEs.isPresent(addr)) {
- L1_TBEs[addr].TBEState := state;
- }
-
- if (isL1CacheTagPresent(addr)) {
- getL1CacheEntry(addr).CacheState := state;
-
- // Set permission
- if (state == State:I) {
- changeL1Permission(addr, AccessPermission:Invalid);
- } else if (state == State:S || state == State:E) {
- changeL1Permission(addr, AccessPermission:Read_Only);
- } else if (state == State:M) {
- changeL1Permission(addr, AccessPermission:Read_Write);
- } else {
- changeL1Permission(addr, AccessPermission:Busy);
- }
- }
- }
-
- Event mandatory_request_type_to_event(CacheRequestType type) {
- if (type == CacheRequestType:LD) {
- return Event:Load;
- } else if (type == CacheRequestType:LD_XACT) {
- return Event:Load;
- } else if (type == CacheRequestType:IFETCH) {
- return Event:Ifetch;
- } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
- return Event:Store;
- } else if((type == CacheRequestType:ST_XACT) || (type == CacheRequestType:LDX_XACT) ) {
- return Event:Store;
- } else {
- error("Invalid CacheRequestType");
- }
- }
-
-
- void printRequest(CacheMsg in_msg){
- DEBUG_EXPR("Regquest msg: ");
- DEBUG_EXPR(machineID);
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.PhysicalAddress);
- DEBUG_EXPR(in_msg.Type);
- DEBUG_EXPR(in_msg.ProgramCounter);
- DEBUG_EXPR(in_msg.AccessMode);
- DEBUG_EXPR(in_msg.Size);
- DEBUG_EXPR(in_msg.Prefetch);
- DEBUG_EXPR(in_msg.Version);
- DEBUG_EXPR(in_msg.LogicalAddress);
- DEBUG_EXPR(in_msg.ThreadID);
- DEBUG_EXPR(in_msg.Timestamp);
- DEBUG_EXPR(in_msg.ExposedAction);
- }
-
- out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
- out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
- out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
- peek(triggerQueue_in, TriggerMsg) {
- if (in_msg.Type == TriggerType:ALL_ACKS) {
- if (L1_TBEs[in_msg.Address].nack == true){
- trigger(Event:Nack_all, in_msg.Address);
- } else {
- trigger(Event:Ack_all, in_msg.Address);
- }
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // Response IntraChip L1 Network - response msg to this L1 cache
- in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
- if (responseIntraChipL1Network_in.isReady()) {
- peek(responseIntraChipL1Network_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if(in_msg.Type == CoherenceResponseType:L2_DATA_EXCLUSIVE) {
- if( in_msg.AckCount == 0 ){
- trigger(Event:L2_Exclusive_Data_all_Acks, in_msg.Address);
- }
- else{
- trigger(Event:L2_Exclusive_Data, in_msg.Address);
- }
- } else if(in_msg.Type == CoherenceResponseType:L2_DATA) {
- if( in_msg.AckCount == 0 ){
- trigger(Event:L2_Data_all_Acks, in_msg.Address);
- }
- else{
- trigger(Event:L2_Data, in_msg.Address);
- }
- } else if(in_msg.Type == CoherenceResponseType:DATA) {
- if ( (getState(in_msg.Address) == State:IS || getState(in_msg.Address) == State:IS_I) &&
- machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache ) {
-
- trigger(Event:DataS_fromL1, in_msg.Address);
- } else if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
- trigger(Event:Data_all_Acks, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:NACK) {
- trigger(Event:Nack, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
- trigger(Event:WB_Ack, in_msg.Address);
- } else {
- error("Invalid L1 response type");
- }
- }
- }
- }
-
- // Request InterChip network - request from this L1 cache to the shared L2
- in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
- if(requestIntraChipL1Network_in.isReady()) {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceRequestType:INV) {
- // check whether we have a inter-proc conflict
- if(shouldNackStore(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == false){
- trigger(Event:Inv, in_msg.Address);
- }
- else{
- // there's a conflict
- trigger(Event:Inv_X, in_msg.Address);
- }
- } else if(in_msg.Type == CoherenceRequestType:INV_ESCAPE) {
- // we cannot NACK this
- trigger(Event:Inv, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
- // check whether we have a conflict
- if(shouldNackStore(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
- trigger(Event:Fwd_GETX_X, in_msg.Address);
- }
- else{
- // else no conflict
- // upgrade transforms to GETX due to race
- trigger(Event:Fwd_GETX, in_msg.Address);
- }
- } else if(in_msg.Type == CoherenceRequestType:GETX_ESCAPE) {
- // no need for filter checks
- trigger(Event:Fwd_GETX, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- // check whether we have a conflict
- if(shouldNackLoad(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
- trigger(Event:Fwd_GETS_X, in_msg.Address);
- }
- else{
- // else no conflict
- trigger(Event:Fwd_GETS, in_msg.Address);
- }
- } else if(in_msg.Type == CoherenceRequestType:GETS_ESCAPE) {
- // no need for filter checks
- trigger(Event:Fwd_GETS, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
- if(shouldNackLoad(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
- trigger(Event:Fwd_GET_INSTR_X, in_msg.Address);
- }
- else{
- // else no conflict
- trigger(Event:Fwd_GET_INSTR, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceRequestType:GET_INSTR_ESCAPE) {
- // no need for filter checks
- trigger(Event:Fwd_GET_INSTR, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:REPLACE) {
- trigger(Event:Replace, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:CHECK_WRITE_FILTER) {
- trigger(Event:Check_Write_Filter, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:CHECK_READ_WRITE_FILTER) {
- trigger(Event:Check_Read_Write_Filter, in_msg.Address);
- } else {
- error("Invalid forwarded request type");
- }
- }
- }
- }
-
- // Mandatory Queue betweens Node's CPU and it's L1 caches
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg) {
-
- // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
-
- if (in_msg.Type == CacheRequestType:IFETCH) {
- // ** INSTRUCTION ACCESS ***
-
- // Check to see if it is in the OTHER L1
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
- // check whether block is transactional
- if (checkReadWriteSignatures(in_msg.Address) == true){
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement_XACT, in_msg.Address);
- }
- else{
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.Address);
- }
- }
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
- // The tag matches for the L1, so the L1 asks the L2 for it.
- printRequest(in_msg);
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
- } else {
- if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
- // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
- printRequest(in_msg);
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
- } else {
- // check whether block is transactional
- if(checkReadWriteSignatures( L1IcacheMemory.cacheProbe(in_msg.Address) ) == true){
- // No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement_XACT, L1IcacheMemory.cacheProbe(in_msg.Address));
- }
- else{
- // No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
- }
- }
- }
- } else {
- // *** DATA ACCESS ***
-
- // Check to see if it is in the OTHER L1
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
- // check whether block is transactional
- if(checkReadWriteSignatures(in_msg.Address) == true){
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement_XACT, in_msg.Address);
- }
- else{
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.Address);
- }
- }
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
- // The tag matches for the L1, so the L1 ask the L2 for it
- printRequest(in_msg);
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
- } else {
- if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
- // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
- printRequest(in_msg);
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
- } else {
- // check whether block is transactional
- if(checkReadWriteSignatures( L1DcacheMemory.cacheProbe(in_msg.Address) ) == true){
- // No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement_XACT, L1DcacheMemory.cacheProbe(in_msg.Address));
- }
- else{
- // No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
- }
- }
- }
- }
- }
- }
- }
-
- // ACTIONS
- action(a_issueGETS, "a", desc="Issue GETS") {
- peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- if(in_msg.ExposedAction){
- out_msg.Type := CoherenceRequestType:GETS_ESCAPE;
- }
- else{
- out_msg.Type := CoherenceRequestType:GETS;
- }
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- DEBUG_EXPR(address);
- DEBUG_EXPR(out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- // either return transactional timestamp or current time
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- }
- }
- }
-
- action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
- peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- if(in_msg.ExposedAction){
- out_msg.Type := CoherenceRequestType:GET_INSTR_ESCAPE;
- }
- else{
- out_msg.Type := CoherenceRequestType:GET_INSTR;
- }
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- DEBUG_EXPR(address);
- DEBUG_EXPR(out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- // either return transactional timestamp or current time
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- }
- }
- }
-
-
- action(b_issueGETX, "b", desc="Issue GETX") {
- peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- if(in_msg.ExposedAction){
- out_msg.Type := CoherenceRequestType:GETX_ESCAPE;
- }
- else{
- out_msg.Type := CoherenceRequestType:GETX;
- }
- out_msg.Requestor := machineID;
- DEBUG_EXPR(machineID);
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- DEBUG_EXPR(address);
- DEBUG_EXPR(out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- // either return transactional timestamp or current time
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- }
- }
- }
-
- action(c_issueUPGRADE, "c", desc="Issue GETX") {
- peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:UPGRADE;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- DEBUG_EXPR(address);
- DEBUG_EXPR(out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- // either return transactional timestamp or current time
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- }
- }
- }
-
- /****************************BEGIN Transactional Actions*************************/
- // send a NACK to requestor - the equivalent of a NACKed data response
- // Note we don't have to track the ackCount here because we only send data NACKs when
- // we are exclusive with the data. Otherwise the L2 will source the data (and set the ackCount
- // appropriately)
- action(e_sendNackToRequestor, "en", desc="send nack to requestor (could be L2 or L1)") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:NACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // send oldest timestamp (or current time if no thread in transaction)
- out_msg.Timestamp := xact_mgr.getOldestTimestamp();
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- // ackCount is by default 0
- }
- // also inform driver about sending NACK
- xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
- }
- }
-
- // send a NACK when L2 wants us to invalidate ourselves
- action(fi_sendInvNack, "fin", desc="send data to the L2 cache") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:NACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // send oldest timestamp (or current time if no thread in transaction)
- out_msg.Timestamp := xact_mgr.getOldestTimestamp();
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- // also inform driver about sending NACK
- xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
- }
- }
-
- // for when we want to check our Write filters
- action(a_checkWriteFilter, "awf", desc="Check our write filter for conflicts") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- // For correct conflict detection, should call shouldNackLoad() NOT
- // checkWriteSignatures()
- if(shouldNackLoad(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor) == true){
- // conflict - send a NACK
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:NACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // send oldest timestamp (or current time if no thread in transaction)
- out_msg.Timestamp := xact_mgr.getOldestTimestamp();
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- // also inform driver about sending NACK
- xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
- }
- else{
- // no conflict - send ACK
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
- }
-
- // for when we want to check our Read + Write filters
- action(a_checkReadWriteFilter, "arwf", desc="Check our write filter for conflicts") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- // For correct conflict detection, we should call shouldNackStore() NOT
- // checkReadWriteSignatures()
- if(shouldNackStore(in_msg.PhysicalAddress, in_msg.Timestamp,in_msg.Requestor ) == true){
- // conflict - send a NACK
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:NACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // send oldest timestamp (or current time if no thread in transaction)
- out_msg.Timestamp := xact_mgr.getOldestTimestamp();
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(out_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- // also inform driver about sending NACK
- xact_mgr.notifySendNack(in_msg.PhysicalAddress, in_msg.Timestamp, in_msg.Requestor);
- }
- else{
- // no conflict - send ACK
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
- }
-
- action(r_notifyReceiveNack, "nrn", desc="Notify the driver when a nack is received"){
- peek(responseIntraChipL1Network_in, ResponseMsg) {
- xact_mgr.notifyReceiveNack(L1_TBEs[address].ThreadID, in_msg.PhysicalAddress, L1_TBEs[address].Timestamp, in_msg.Timestamp, in_msg.Sender);
- }
- }
-
- // Used to driver to take abort or retry action
- action(r_notifyReceiveNackFinal, "nrnf", desc="Notify the driver when the final nack is received"){
- xact_mgr.notifyReceiveNackFinal(L1_TBEs[address].ThreadID, L1_TBEs[address].PhysicalAddress);
- }
-
- // this version uses physical address stored in TBE
-
- action(x_tbeSetPrefetch, "xp", desc="Set the prefetch bit in the TBE."){
- peek(mandatoryQueue_in, CacheMsg) {
- if(in_msg.Prefetch == PrefetchBit:No){
- L1_TBEs[address].isPrefetch := false;
- }
- else{
- assert(in_msg.Prefetch == PrefetchBit:Yes);
- L1_TBEs[address].isPrefetch := true;
- }
- }
- }
-
- action(x_tbeSetPhysicalAddress, "ia", desc="Sets the physical address field of the TBE"){
- peek(mandatoryQueue_in, CacheMsg) {
- L1_TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
- L1_TBEs[address].ThreadID := in_msg.ThreadID;
- L1_TBEs[address].Timestamp := in_msg.Timestamp;
- }
- }
-
- // Send unblock cancel to L2 (for nacked requests that blocked directory)
- action(jj_sendUnblockCancel, "\jc", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK_CANCEL;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along list of NACKers
- out_msg.Nackers := L1_TBEs[address].Nackers;
- }
- }
-
- //same as ACK case, but sets the NACK flag for TBE entry
- action(q_updateNackCount, "qn", desc="Update ack count") {
- peek(responseIntraChipL1Network_in, ResponseMsg) {
- // mark this request as having been NACKed
- L1_TBEs[address].nack := true;
- APPEND_TRANSITION_COMMENT(" before pendingAcks: ");
- APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
- L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
- L1_TBEs[address].Nackers.add(in_msg.Sender);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
-
- APPEND_TRANSITION_COMMENT(" after pendingAcks: ");
- APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
- APPEND_TRANSITION_COMMENT(" sender: ");
- APPEND_TRANSITION_COMMENT(in_msg.Sender);
- if (L1_TBEs[address].pendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := TriggerType:ALL_ACKS;
- APPEND_TRANSITION_COMMENT(" Triggering All_Acks");
- }
- }
- }
- }
-
- action(q_profileOverflow, "po", desc="profile the overflowed block"){
- profileOverflow(address, machineID);
- }
-
- action(qq_xactReplacement, "\q", desc="replaced a transactional block"){
- xact_mgr.xactReplacement(address);
- }
-
- action(p_profileRequest, "pcc", desc="Profile request msg") {
- peek(mandatoryQueue_in, CacheMsg) {
- APPEND_TRANSITION_COMMENT(" request: Timestamp: ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" PA: ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" Type: ");
- APPEND_TRANSITION_COMMENT(in_msg.Type);
- APPEND_TRANSITION_COMMENT(" VPC: ");
- APPEND_TRANSITION_COMMENT(in_msg.ProgramCounter);
- APPEND_TRANSITION_COMMENT(" Mode: ");
- APPEND_TRANSITION_COMMENT(in_msg.AccessMode);
- APPEND_TRANSITION_COMMENT(" PF: ");
- APPEND_TRANSITION_COMMENT(in_msg.Prefetch);
- APPEND_TRANSITION_COMMENT(" VA: ");
- APPEND_TRANSITION_COMMENT(in_msg.LogicalAddress);
- APPEND_TRANSITION_COMMENT(" Thread: ");
- APPEND_TRANSITION_COMMENT(in_msg.ThreadID);
- APPEND_TRANSITION_COMMENT(" Exposed: ");
- APPEND_TRANSITION_COMMENT(in_msg.ExposedAction);
- }
- }
-
- /********************************END Transactional Actions************************/
-
- action(d_sendDataToRequestor, "d", desc="send data to requestor") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := L1_TBEs[address].DataBlk;
- out_msg.Dirty := L1_TBEs[address].Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.RemoveLastOwnerFromDir := true;
- out_msg.LastOwnerID := machineID;
- }
- }
- }
-
- action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := L1_TBEs[address].DataBlk;
- out_msg.Dirty := L1_TBEs[address].Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- }
-
- action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := L1_TBEs[address].DataBlk;
- out_msg.Dirty := L1_TBEs[address].Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- }
-
- action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
- peek(requestIntraChipL1Network_in, RequestMsg) {
- enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.AckCount := 1;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
-
-
- action(g_issuePUTX, "g", desc="send data to the L2 cache") {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:PUTX;
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Requestor:= machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- if (getL1CacheEntry(address).Dirty) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(g_issuePUTS, "gs", desc="send clean data to the L2 cache") {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:PUTS;
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Requestor:= machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- if (getL1CacheEntry(address).Dirty) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- // used to determine whether to set sticky-M or sticky-S state in directory (M or SS in L2)
- action(g_issuePUTXorPUTS, "gxs", desc="send data to the L2 cache") {
- enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
- out_msg.Address := address;
- if(checkWriteSignatures(address) == true){
- // we should set sticky-M
- out_msg.Type := CoherenceRequestType:PUTX;
- }
- else{
- // we should set sticky-S
- out_msg.Type := CoherenceRequestType:PUTS;
- }
- out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
- out_msg.Dirty := getL1CacheEntry(address).Dirty;
- out_msg.Requestor:= machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- if (getL1CacheEntry(address).Dirty) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // inform L2 whether request was transactional
- //out_msg.Transactional := L1_TBEs[address].Trans;
- out_msg.Transactional := checkReadWriteSignatures(address);
-
- out_msg.RemoveLastOwnerFromDir := L1_TBEs[address].RemoveLastOwnerFromDir;
- out_msg.LastOwnerID := L1_TBEs[address].LastOwnerID;
- }
- }
-
- action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // inform L2 whether request was transactional
- // out_msg.Transactional := L1_TBEs[address].Trans;
- out_msg.Transactional := checkReadWriteSignatures(address);
-
- out_msg.RemoveLastOwnerFromDir := L1_TBEs[address].RemoveLastOwnerFromDir;
- out_msg.LastOwnerID := L1_TBEs[address].LastOwnerID;
- }
- }
-
-
-
- action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
- DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
- sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
- }
-
- action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
- DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
- sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
- getL1CacheEntry(address).Dirty := true;
- }
-
- action(h_load_conflict, "hc", desc="Notify sequencer of conflict on load") {
- sequencer.readConflictCallback(address);
- }
-
- action(hh_store_conflict, "\hc", desc="If not prefetch, notify sequencer that store completed.") {
- sequencer.writeConflictCallback(address);
- }
-
- action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
- check_allocate(L1_TBEs);
- L1_TBEs.allocate(address);
- L1_TBEs[address].isPrefetch := false;
- L1_TBEs[address].Dirty := getL1CacheEntry(address).Dirty;
- L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
- }
-
- action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
- }
-
- action(j_popTriggerQueue, "jp", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
- }
-
- action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
- }
-
- action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate TBE") {
- L1_TBEs.deallocate(address);
- }
-
- action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
- peek(responseIntraChipL1Network_in, ResponseMsg) {
- getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
- getL1CacheEntry(address).Dirty := in_msg.Dirty;
- if (machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
- L1_TBEs[address].RemoveLastOwnerFromDir := in_msg.RemoveLastOwnerFromDir;
- L1_TBEs[address].LastOwnerID := in_msg.LastOwnerID;
- }
- }
- }
-
- action(q_updateAckCount, "q", desc="Update ack count") {
- peek(responseIntraChipL1Network_in, ResponseMsg) {
- APPEND_TRANSITION_COMMENT(" before pendingAcks: ");
- APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
- L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" after pendingAcks: ");
- APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
- APPEND_TRANSITION_COMMENT(" sender: ");
- APPEND_TRANSITION_COMMENT(in_msg.Sender);
- if (L1_TBEs[address].pendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := TriggerType:ALL_ACKS;
- APPEND_TRANSITION_COMMENT(" Triggering All_Acks");
- }
- }
- }
- }
-
- action(z_stall, "z", desc="Stall") {
- }
-
- action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- if (L1DcacheMemory.isTagPresent(address)) {
- L1DcacheMemory.deallocate(address);
- } else {
- L1IcacheMemory.deallocate(address);
- }
- }
-
- action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
- if (L1DcacheMemory.isTagPresent(address) == false) {
- L1DcacheMemory.allocate(address);
- // reset trans bit
- }
- }
-
- action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
- if (L1IcacheMemory.isTagPresent(address) == false) {
- L1IcacheMemory.allocate(address);
- // reset trans bit
- }
- }
-
- action(zz_recycleRequestQueue, "zz", desc="recycle L1 request queue") {
- requestIntraChipL1Network_in.recycle();
- }
-
- action(z_recycleMandatoryQueue, "\z", desc="recycle L1 request queue") {
- mandatoryQueue_in.recycle();
- }
-
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, CacheMsg) {
- profile_L1Cache_miss(in_msg, id);
- }
- }
-
- action(uuu_profileTransactionLoadMiss, "\uu", desc="Profile Miss") {
- xact_mgr.profileTransactionMiss(L1_TBEs[address].ThreadID, true);
- }
-
- action(uuu_profileTransactionStoreMiss, "\uuu", desc="Profile Miss") {
- xact_mgr.profileTransactionMiss(L1_TBEs[address].ThreadID, false);
- }
-
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- // For filter responses
- transition({NP, I, S, E, M, IS, IM, SM, IS_I, IS_S, IS_E, IM_M, M_I, E_I}, Check_Write_Filter){
- a_checkWriteFilter;
- l_popRequestQueue;
- }
-
- transition({NP, I, S, E, M, IS, IM, SM, IS_I, IS_S, IS_E, IM_M, M_I, E_I}, Check_Read_Write_Filter){
- a_checkReadWriteFilter;
- l_popRequestQueue;
- }
-
- // Transitions for Load/Store/Replacement/WriteBack from transient states
- transition({IS, IM, IS_I, IS_S, IS_E, IM_M, M_I, E_I}, {Load, Ifetch, Store, L1_Replacement, L1_Replacement_XACT}) {
- z_recycleMandatoryQueue;
- }
-
- // Transitions from Idle
- transition({NP,I}, {L1_Replacement, L1_Replacement_XACT}) {
- ff_deallocateL1CacheBlock;
- }
-
- transition({NP,I}, Load, IS) {
- p_profileRequest;
- oo_allocateL1DCacheBlock;
- i_allocateTBE;
- x_tbeSetPrefetch;
- x_tbeSetPhysicalAddress;
- a_issueGETS;
- uu_profileMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP,I}, Ifetch, IS) {
- p_profileRequest;
- pp_allocateL1ICacheBlock;
- i_allocateTBE;
- x_tbeSetPhysicalAddress;
- ai_issueGETINSTR;
- uu_profileMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP,I}, Store, IM) {
- p_profileRequest;
- oo_allocateL1DCacheBlock;
- i_allocateTBE;
- x_tbeSetPrefetch;
- x_tbeSetPhysicalAddress;
- b_issueGETX;
- uu_profileMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP, I}, Inv) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Transactional invalidates to blocks in NP or I are
- // transactional blocks that have been silently replaced
- // FALSE POSITIVE - can't tell whether block was never in our read/write set or was replaced
- transition({NP, I}, Inv_X) {
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- // for L2 replacements. This happens due to our silent replacements.
- transition({NP, I}, Replace) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Transitions from Shared
- transition(S, {Load,Ifetch}) {
- p_profileRequest;
- h_load_hit;
- k_popMandatoryQueue;
- }
-
- transition(S, Store, IM) {
- p_profileRequest;
- i_allocateTBE;
- x_tbeSetPrefetch;
- x_tbeSetPhysicalAddress;
- b_issueGETX;
- uu_profileMiss;
- k_popMandatoryQueue;
- }
-
- transition(S, L1_Replacement, I) {
- ff_deallocateL1CacheBlock;
- }
-
- transition(S, L1_Replacement_XACT, I) {
- q_profileOverflow;
- qq_xactReplacement;
- ff_deallocateL1CacheBlock;
- }
-
- transition(S, Inv, I) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(S, Inv_X) {
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- // for L2 replacements.
- transition(S, Replace, I){
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Transitions from Exclusive
-
- transition(E, {Load, Ifetch}) {
- p_profileRequest;
- h_load_hit;
- k_popMandatoryQueue;
- }
-
- transition(E, Store, M) {
- p_profileRequest;
- hh_store_hit;
- k_popMandatoryQueue;
- }
-
- transition(E, L1_Replacement, M_I) {
- // The data is clean
- i_allocateTBE;
- g_issuePUTX; // send data, but hold in case forwarded request
- ff_deallocateL1CacheBlock;
- }
-
- // we can't go to M_I here because we need to maintain transactional read isolation on this line, and M_I allows GETS and GETXs to
- // be serviced. For correctness we need to make sure we are marked as a transactional reader (if we never read transactionally written data back exclusively) or transactional writer
- transition(E, L1_Replacement_XACT, E_I) {
- q_profileOverflow;
- qq_xactReplacement;
- // The data is clean
- i_allocateTBE;
- g_issuePUTXorPUTS; // send data and hold, but do not release on forwarded requests
- ff_deallocateL1CacheBlock;
- }
-
- transition(E, Inv, I) {
- // don't send data
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(E, Inv_X){
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- // for L2 replacements
- transition(E, Replace, I) {
- // don't send data
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(E, Fwd_GETX, I) {
- d_sendDataToRequestor;
- l_popRequestQueue;
- }
-
- transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
- d_sendDataToRequestor;
- d2_sendDataToL2;
- l_popRequestQueue;
- }
-
- // If we see Fwd_GETS_X this is a FALSE POSITIVE, since we never
- // modified this block
- transition(E, {Fwd_GETS_X, Fwd_GETX_X, Fwd_GET_INSTR_X}){
- // send NACK instead of data
- e_sendNackToRequestor;
- l_popRequestQueue;
- }
-
- // Transitions from Modified
- transition(M, {Load, Ifetch}) {
- p_profileRequest;
- h_load_hit;
- k_popMandatoryQueue;
- }
-
- transition(M, Store) {
- p_profileRequest;
- hh_store_hit;
- k_popMandatoryQueue;
- }
-
- transition(M, L1_Replacement, M_I) {
- i_allocateTBE;
- g_issuePUTX; // send data, but hold in case forwarded request
- ff_deallocateL1CacheBlock;
- }
-
- // in order to prevent releasing isolation of transactional data (either written to just read) we need to
- // mark ourselves as a transactional reader (e.g. SS state in L2) or transactional writer (e.g. M state in L2). We need to transition to the same E_I
- // state as for transactional replacements from E state, and ignore all requests.
- transition(M, L1_Replacement_XACT, E_I) {
- q_profileOverflow;
- qq_xactReplacement;
- i_allocateTBE;
- g_issuePUTXorPUTS; // send data, but do not release on forwarded requests
- ff_deallocateL1CacheBlock;
- }
-
- transition({M_I, E_I}, WB_Ack, I) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(M, Inv, I) {
- f_sendDataToL2;
- l_popRequestQueue;
- }
-
- // for L2 replacement
- transition(M, Replace, I) {
- f_sendDataToL2;
- l_popRequestQueue;
- }
-
- transition(M, Inv_X){
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- transition(E_I, Inv) {
- // ack requestor's GETX, but wait for WB_Ack from L2
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // maintain isolation on M or E replacements
- // took out M_I, since L2 transitions to M upon PUTX, and we should no longer receives invalidates
- transition(E_I, Inv_X) {
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- // allow L2 to get data while we replace
- transition({M_I, E_I}, Replace, I) {
- ft_sendDataToL2_fromTBE;
- s_deallocateTBE;
- l_popRequestQueue;
- }
-
- transition(M, Fwd_GETX, I) {
- d_sendDataToRequestor;
- l_popRequestQueue;
- }
-
- transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
- d_sendDataToRequestor;
- d2_sendDataToL2;
- l_popRequestQueue;
- }
-
- transition(M, {Fwd_GETX_X, Fwd_GETS_X, Fwd_GET_INSTR_X}) {
- // send NACK instead of data
- e_sendNackToRequestor;
- l_popRequestQueue;
- }
-
- // for simplicity we ignore all other requests while we wait for L2 to receive the clean data. Otherwise we will incorrectly transfer
- // ownership and not mark ourselves as a transactional sharer in the L2 directory
- transition(E_I, {Fwd_GETX, Fwd_GETS, Fwd_GET_INSTR, Fwd_GETS_X, Fwd_GETX_X, Fwd_GET_INSTR_X}) {
- // send NACK instead of data
- e_sendNackToRequestor;
- l_popRequestQueue;
- }
-
- transition(M_I, Fwd_GETX, I) {
- dt_sendDataToRequestor_fromTBE;
- s_deallocateTBE;
- l_popRequestQueue;
- }
-
- transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, I) {
- dt_sendDataToRequestor_fromTBE;
- d2t_sendDataToL2_fromTBE;
- s_deallocateTBE;
- l_popRequestQueue;
- }
-
- // don't release isolation on forwarded conflicting requests
- transition(M_I, {Fwd_GETS_X, Fwd_GETX_X, Fwd_GET_INSTR_X}) {
- // send NACK instead of data
- e_sendNackToRequestor;
- l_popRequestQueue;
- }
-
- // Transitions from IS
- transition({IS, IS_I}, Inv, IS_I) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Only possible when L2 sends us data in SS state. No conflict is possible, so no need to unblock L2
- transition(IS, L2_Data_all_Acks, S) {
- u_writeDataToL1Cache;
- // unblock L2 because it blocks on GETS
- j_sendUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // Made the L2 block on GETS requests, so we are guaranteed to have no races with GETX
- // We only get into this transition if the writer had to retry his GETX request that invalidated us, and L2 went back to SS
- transition(IS_I, L2_Data_all_Acks, S) {
- u_writeDataToL1Cache;
- // unblock L2 because it blocks on GETS
- j_sendUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // for L2 replacements
- transition({IS, IS_I}, Replace, IS_I) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // These transitions are for when L2 sends us data, because it has exclusive copy, but L1 filter responses have not arrived
- transition({IS, IS_I}, Ack){
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition({IS, IS_I}, Nack) {
- r_notifyReceiveNack;
- q_updateNackCount;
- o_popIncomingResponseQueue;
- }
-
- // IS_I also allowed because L2 Inv beat our GETS request, and now L2 is in NP state, ready to service our GETS.
- transition({IS, IS_I}, L2_Data, IS_S) {
- u_writeDataToL1Cache;
- // This message carries the inverse of the ack count
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_S, Ack){
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_S, Nack) {
- r_notifyReceiveNack;
- q_updateNackCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_S, Ack_all, S){
- // tell L2 we succeeded
- j_sendUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // retry request from I
- transition(IS_S, Nack_all, I){
- ff_deallocateL1CacheBlock;
- // This is also the final NACK
- r_notifyReceiveNackFinal;
- // tell L2 we failed
- jj_sendUnblockCancel;
- h_load_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // L2 is trying to give us exclusive data
- // we can go to E because L2 is guaranteed to have only copy (ie no races from other L1s possible)
- transition({IS, IS_I}, L2_Exclusive_Data, IS_E) {
- u_writeDataToL1Cache;
- // This message carries the inverse of the ack count
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition({IS, IS_I}, L2_Exclusive_Data_all_Acks, E){
- u_writeDataToL1Cache;
- // tell L2 we succeeded
- jj_sendExclusiveUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_E, Ack){
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_E, Nack) {
- r_notifyReceiveNack;
- q_updateNackCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IS_E, Ack_all, E){
- // tell L2 we succeeded
- jj_sendExclusiveUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // retry request from I
- transition(IS_E, Nack_all, I){
- ff_deallocateL1CacheBlock;
- // This is also the final NACK
- r_notifyReceiveNackFinal;
- // need to tell L2 we failed
- jj_sendUnblockCancel;
- h_load_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // Normal case - when L2 doesn't have exclusive line, but L1 has line.
- // We got NACKed . Try again in state I
- // IMPORTANT: filters are NOT checked when L2 is in SS, because nobody has modified the line.
- // For this transition we only receive NACKs from the exclusive writer
- transition({IS, IS_I}, Nack_all, I) {
- // This is also the final NACK
- r_notifyReceiveNackFinal;
- // L2 is blocked when L1 is exclusive
- jj_sendUnblockCancel;
- h_load_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- transition({IS, IS_I}, Inv_X) {
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- transition(IS, DataS_fromL1, S) {
- u_writeDataToL1Cache;
- j_sendUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // This occurs when there is a race between our GETS and another L1's GETX, and the GETX wins
- // The L2 is now blocked because our request was forwarded to exclusive L1 (ie MT_IIB)
- transition(IS_I, DataS_fromL1, S) {
- u_writeDataToL1Cache;
- j_sendUnblock;
- uuu_profileTransactionLoadMiss;
- h_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // Transitions from IM
- transition({IM, SM}, Inv, IM) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition({IM, SM}, Inv_X) {
- fi_sendInvNack;
- l_popRequestQueue;
- }
-
- // for L2 replacements
- transition({IM, SM}, Replace, IM) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // only possible when L1 exclusive sends us the line
- transition(IM, Data_all_Acks, M) {
- u_writeDataToL1Cache;
- jj_sendExclusiveUnblock;
- uuu_profileTransactionStoreMiss;
- hh_store_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // L2 is trying to give us data
- // Don't go to SM because we do not want a S copy on failure. This might cause conflicts for older writers that
- // nacked us.
- transition(IM, L2_Data, IM_M) {
- u_writeDataToL1Cache;
- // This message carries the inverse of the ack count
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IM, L2_Data_all_Acks, M){
- u_writeDataToL1Cache;
- // tell L2 we succeeded
- jj_sendExclusiveUnblock;
- uuu_profileTransactionStoreMiss;
- hh_store_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(IM_M, Ack){
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IM_M, Nack) {
- r_notifyReceiveNack;
- q_updateNackCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IM_M, Ack_all, M){
- // tell L2 we succeeded
- jj_sendExclusiveUnblock;
- uuu_profileTransactionStoreMiss;
- hh_store_hit;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // retry request from I
- transition(IM_M, Nack_all, I){
- ff_deallocateL1CacheBlock;
- // This is also the final NACK
- r_notifyReceiveNackFinal;
- // need to tell L2 we failed
- jj_sendUnblockCancel;
- hh_store_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // transitions from SM
- transition({SM, IM}, Ack) {
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- // instead of Data we receive Nacks
- transition({SM, IM}, Nack) {
- r_notifyReceiveNack;
- // mark this request as being NACKed
- q_updateNackCount;
- o_popIncomingResponseQueue;
- }
-
- transition(SM, Ack_all, M) {
- jj_sendExclusiveUnblock;
- uuu_profileTransactionStoreMiss;
- hh_store_hit;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // retry in state S
- transition(SM, Nack_all, S){
- // This is the final nack
- r_notifyReceiveNackFinal;
- // unblock the L2
- jj_sendUnblockCancel;
- hh_store_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
- // retry in state I
- transition(IM, Nack_all, I){
- // This is the final NACK
- r_notifyReceiveNackFinal;
- // unblock the L2
- jj_sendUnblockCancel;
- hh_store_conflict;
- s_deallocateTBE;
- j_popTriggerQueue;
- }
-
-}
-
-
-
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm b/src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm
deleted file mode 100644
index 9085ae33f..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory-L2cache.sm
+++ /dev/null
@@ -1,2123 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
- *
- */
-
-machine(L2Cache, "MESI Directory L2 Cache CMP") {
-
- // L2 BANK QUEUES
- // From local bank of L2 cache TO the network
- MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="2", ordered="false"; // this L2 bank -> Memory
- MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> a local L1
- MessageBuffer responseFromL2Cache, network="To", virtual_network="3", ordered="false"; // this L2 bank -> a local L1 || Memory
-
- // FROM the network to this local bank of L2 cache
- MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false"; // a local L1 -> this L2 bank
- MessageBuffer responseToL2Cache, network="From", virtual_network="3", ordered="false"; // a local L1 || Memory -> this L2 bank
- MessageBuffer unblockToL2Cache, network="From", virtual_network="4", ordered="false"; // a local L1 || Memory -> this L2 bank
-
- // STATES
- enumeration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
- // Base states
- NP, desc="Not present in either cache";
- SS, desc="L2 cache entry Shared, also present in one or more L1s";
- M, desc="L2 cache entry Modified, not present in any L1s", format="!b";
- MT, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
-
- // L2 replacement
- M_I, desc="L2 cache replacing, have all acks, sent dirty data to memory, waiting for ACK from memory";
- MT_I, desc="L2 cache replacing, getting data from exclusive";
- MCT_I, desc="L2 cache replacing, clean in L2, getting data or ack from exclusive";
- I_I, desc="L2 replacing clean data, need to inv sharers and then drop data";
- S_I, desc="L2 replacing dirty data, collecting acks from L1s";
-
- // Transient States for fetching data from memory
- ISS, desc="L2 idle, got single L1_GETS, issued memory fetch, have not seen response yet";
- IS, desc="L2 idle, got L1_GET_INSTR or multiple L1_GETS, issued memory fetch, have not seen response yet";
- IM, desc="L2 idle, got L1_GETX, issued memory fetch, have not seen response(s) yet";
-
- // Blocking states
- SS_MB, desc="Blocked for L1_GETX from SS";
- SS_SSB, desc="Blocked for L1_GETS from SS";
- MT_MB, desc="Blocked for L1_GETX from MT";
- M_MB, desc="Blocked for L1_GETX from M";
- ISS_MB, desc="Blocked for L1_GETS or L1_GETX from NP, received Mem Data";
- IS_SSB, desc="Blocked for L1_GET_INSTR from NP, received Mem Data";
- M_SSB, desc="Blocked for L1_GET_INSTR from M";
-
- MT_IIB, desc="Blocked for L1_GETS from MT, waiting for unblock and data";
- MT_IB, desc="Blocked for L1_GETS from MT, got unblock, waiting for data";
- MT_SB, desc="Blocked for L1_GETS from MT, got data, waiting for unblock";
-
- // for resolving PUTX/PUTS races
- PB_MT, desc="Going to MT, got data and unblock, waiting for PUT";
- PB_SS, desc="Going to SS, got unblock, waiting for PUT";
- PB_MT_IB, desc="Blocked from MT, got unblock, waiting for data and PUT";
-
- }
-
- // EVENTS
- enumeration(Event, desc="L2 Cache events") {
- // L2 events
-
- // events initiated by the local L1s
- L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
- L1_GET_INSTR_ESCAPE, desc="a L1I GET INSTR in an escape action request for a block mapped to us";
- L1_GETS, desc="a L1D GETS request for a block maped to us";
- L1_GETS_ESCAPE, desc="a L1D GETS in an escape action request for a block mapped to us";
- L1_GETX, desc="a L1D GETX request for a block maped to us";
- L1_GETX_ESCAPE, desc="a L1D GETX in an escape action request for a block mapped to us";
- L1_UPGRADE, desc="a L1D GETX request for a block maped to us";
-
- L1_PUTX, desc="L1 replacing data";
- L1_PUTX_old, desc="L1 replacing data, but no longer sharer";
- L1_PUTS, desc="L1 replacing clean data";
- L1_PUTS_old, desc="L1 replacing clean data, but no longer sharer";
- L1_PUT_PENDING, desc="L1 PUT msg pending (recycled)";
-
- Fwd_L1_GETX, desc="L1 did not have data, so we supply";
- Fwd_L1_GETS, desc="L1 did not have data, so we supply";
- Fwd_L1_GET_INSTR, desc="L1 did not have data, so we supply";
-
- // events initiated by this L2
- L2_Replacement, desc="L2 Replacement", format="!r";
- L2_Replacement_XACT, desc="L2 Replacement of trans. data", format="!r";
- L2_Replacement_clean, desc="L2 Replacement, but data is clean", format="!r";
- L2_Replacement_clean_XACT, desc="L2 Replacement of trans. data, but data is clean", format="!r";
-
- // events from memory controller
- Mem_Data, desc="data from memory", format="!r";
- Mem_Ack, desc="ack from memory", format="!r";
-
- // M->S data writeback
- WB_Data, desc="data from L1";
- WB_Data_clean, desc="clean data from L1";
- Ack, desc="writeback ack";
- Ack_all, desc="writeback ack";
- // For transactional memory
- Nack, desc="filter indicates conflict";
- Nack_all, desc="all filters have responded, at least one conflict";
-
- Unblock, desc="Unblock from L1 requestor";
- Unblock_Cancel, desc="Unblock from L1 requestor (FOR XACT MEMORY)";
- Exclusive_Unblock, desc="Unblock from L1 requestor";
-
- Unblock_WaitPUTold, desc="Unblock from L1 requestor, last requestor was replacing so wait for PUT msg";
- Exclusive_Unblock_WaitPUTold, desc="Unblock from L1 requestor, last requestor was replacing so wait for PUT msg";
-
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- NetDest Sharers, desc="tracks the L1 shares on-chip";
- MachineID Exclusive, desc="Exclusive holder of block";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, default="false", desc="data is dirty";
-
- bool Trans, desc="dummy bit for debugging";
- bool Read, desc="LogTM R bit";
- bool Write, desc="LogTM W bit";
- bool L2Miss, desc="Was this block sourced from memory";
- int L1PutsPending, default="0", desc="how many PUT_ are pending for this entry (being recyled)";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- Address Address, desc="Line address for this TBE";
- Address PhysicalAddress, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Buffer for the data block";
- bool Dirty, default="false", desc="Data is Dirty";
-
- NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
- MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
- bool isPrefetch, desc="Set if this was caused by a prefetch";
-
- int pendingAcks, desc="number of pending acks for invalidates during writeback";
- bool nack, default="false", desc="has this request been NACKed?";
- }
-
- external_type(CacheMemory) {
- bool cacheAvail(Address);
- Address cacheProbe(Address);
- void allocate(Address);
- void deallocate(Address);
- Entry lookup(Address);
- void changePermission(Address, AccessPermission);
- bool isTagPresent(Address);
- void setMRU(Address);
- }
-
- external_type(TBETable) {
- TBE lookup(Address);
- void allocate(Address);
- void deallocate(Address);
- bool isPresent(Address);
- }
-
- TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
-
- CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
-
- // inclusive cache, returns L2 entries only
- Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
- return L2cacheMemory[addr];
- }
-
- void changeL2Permission(Address addr, AccessPermission permission) {
- if (L2cacheMemory.isTagPresent(addr)) {
- return L2cacheMemory.changePermission(addr, permission);
- }
- }
-
- string getCoherenceRequestTypeStr(CoherenceRequestType type) {
- return CoherenceRequestType_to_string(type);
- }
-
- bool isL2CacheTagPresent(Address addr) {
- return (L2cacheMemory.isTagPresent(addr));
- }
-
- bool isOneSharerLeft(Address addr, MachineID requestor) {
- assert(L2cacheMemory[addr].Sharers.isElement(requestor));
- return (L2cacheMemory[addr].Sharers.count() == 1);
- }
-
- bool isSharer(Address addr, MachineID requestor) {
- if (L2cacheMemory.isTagPresent(addr)) {
- return L2cacheMemory[addr].Sharers.isElement(requestor);
- } else {
- return false;
- }
- }
-
- void addSharer(Address addr, MachineID requestor) {
- DEBUG_EXPR(machineID);
- DEBUG_EXPR(requestor);
- DEBUG_EXPR(addr);
- assert(map_L1CacheMachId_to_L2Cache(addr, requestor) == machineID);
- L2cacheMemory[addr].Sharers.add(requestor);
- }
-
- State getState(Address addr) {
- if(L2_TBEs.isPresent(addr)) {
- return L2_TBEs[addr].TBEState;
- } else if (isL2CacheTagPresent(addr)) {
- return getL2CacheEntry(addr).CacheState;
- }
- return State:NP;
- }
-
- string getStateStr(Address addr) {
- return L2Cache_State_to_string(getState(addr));
- }
-
- // when is this called
- void setState(Address addr, State state) {
-
- // MUST CHANGE
- if (L2_TBEs.isPresent(addr)) {
- L2_TBEs[addr].TBEState := state;
- }
-
- if (isL2CacheTagPresent(addr)) {
- getL2CacheEntry(addr).CacheState := state;
-
- // Set permission
- if (state == State:SS ) {
- changeL2Permission(addr, AccessPermission:Read_Only);
- } else if (state == State:M) {
- changeL2Permission(addr, AccessPermission:Read_Write);
- } else if (state == State:MT) {
- changeL2Permission(addr, AccessPermission:Stale);
- } else {
- changeL2Permission(addr, AccessPermission:Busy);
- }
- }
- }
-
- Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr, MachineID requestor) {
- if (L2cacheMemory.isTagPresent(addr)){ /* Present */
- if(getL2CacheEntry(addr).L1PutsPending > 0 && /* At least one PUT pending */
- (getL2CacheEntry(addr).CacheState == State:SS || getL2CacheEntry(addr).CacheState == State:MT || getL2CacheEntry(addr).CacheState == State:M )) { /* Base state */
-
- /* Only allow PUTX/PUTS to go on */
- if (type != CoherenceRequestType:PUTX &&
- type != CoherenceRequestType:PUTS) {
- return Event:L1_PUT_PENDING; // Don't serve any req until the wb is serviced
- }
- }
- }
- if(type == CoherenceRequestType:GETS) {
- return Event:L1_GETS;
- } else if(type == CoherenceRequestType:GETS_ESCAPE) {
- return Event:L1_GETS_ESCAPE;
- } else if(type == CoherenceRequestType:GET_INSTR) {
- return Event:L1_GET_INSTR;
- } else if(type == CoherenceRequestType:GET_INSTR_ESCAPE) {
- return Event:L1_GET_INSTR_ESCAPE;
- } else if (type == CoherenceRequestType:GETX) {
- return Event:L1_GETX;
- } else if(type == CoherenceRequestType:GETX_ESCAPE) {
- return Event:L1_GETX_ESCAPE;
- } else if (type == CoherenceRequestType:UPGRADE) {
- if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).Sharers.isElement(requestor) ) {
- return Event:L1_UPGRADE;
- } else {
- return Event:L1_GETX;
- }
- } else if (type == CoherenceRequestType:PUTX) {
- if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).L1PutsPending > 0) {
- getL2CacheEntry(addr).L1PutsPending := getL2CacheEntry(addr).L1PutsPending - 1;
- DEBUG_EXPR("PUTX PutSPending ");
- DEBUG_EXPR(getL2CacheEntry(addr).L1PutsPending);
- }
- if (isSharer(addr, requestor)) {
- return Event:L1_PUTX;
- } else {
- return Event:L1_PUTX_old;
- }
- } else if (type == CoherenceRequestType:PUTS) {
- if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).L1PutsPending > 0) {
- getL2CacheEntry(addr).L1PutsPending := getL2CacheEntry(addr).L1PutsPending - 1;
- DEBUG_EXPR("PUTS PutSPending ");
- DEBUG_EXPR(getL2CacheEntry(addr).L1PutsPending);
- }
- if (isSharer(addr, requestor)) {
- return Event:L1_PUTS;
- } else {
- return Event:L1_PUTS_old;
- }
- } else {
- DEBUG_EXPR(addr);
- DEBUG_EXPR(type);
- error("Invalid L1 forwarded request type");
- }
- }
-
- // ** OUT_PORTS **
-
- out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
- out_port(DirRequestIntraChipL2Network_out, RequestMsg, DirRequestFromL2Cache);
- out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
-
-
- // Response IntraChip L2 Network - response msg to this particular L2 bank
- in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
- if (responseIntraChipL2Network_in.isReady()) {
- peek(responseIntraChipL2Network_in, ResponseMsg) {
- // test wether it's from a local L1 or an off chip source
- assert(in_msg.Destination.isElement(machineID));
- if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
- if(in_msg.Type == CoherenceResponseType:DATA) {
- if (in_msg.Dirty) {
- trigger(Event:WB_Data, in_msg.Address);
- } else {
- trigger(Event:WB_Data_clean, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceResponseType:ACK) {
- if ((L2_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0) {
- // check whether any previous responses have been NACKs
- if(L2_TBEs[in_msg.Address].nack == false) {
- trigger(Event:Ack_all, in_msg.Address);
- }
- else {
- // at least one nack received
- trigger(Event:Nack_all, in_msg.Address);
- }
- } else {
- trigger(Event:Ack, in_msg.Address);
- }
- // for NACKs
- } else if (in_msg.Type == CoherenceResponseType:NACK) {
- if ((L2_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0) {
- trigger(Event:Nack_all, in_msg.Address);
- } else {
- trigger(Event:Nack, in_msg.Address);
- }
- } else {
- error("unknown message type");
- }
-
- } else { // external message
- if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Mem_Data, in_msg.Address); // L2 now has data and all off-chip acks
- } else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
- trigger(Event:Mem_Ack, in_msg.Address); // L2 now has data and all off-chip acks
- } else {
- error("unknown message type");
- }
- }
- }
- } // if not ready, do nothing
- }
-
- // L1 Request
- in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
- if(L1RequestIntraChipL2Network_in.isReady()) {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- /*
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(id);
- DEBUG_EXPR(getState(in_msg.Address));
- DEBUG_EXPR(in_msg.Requestor);
- DEBUG_EXPR(in_msg.Type);
- DEBUG_EXPR(in_msg.Destination);
- */
- assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
- assert(in_msg.Destination.isElement(machineID));
- if (L2cacheMemory.isTagPresent(in_msg.Address)) {
- // The L2 contains the block, so proceeded with handling the request
- trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
- } else {
- if (L2cacheMemory.cacheAvail(in_msg.Address)) {
- // L2 does't have the line, but we have space for it in the L2
- trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
- } else {
- // No room in the L2, so we need to make room before handling the request
- if (L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Dirty ) {
- // check whether block is transactional
- if(L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Trans == true){
- trigger(Event:L2_Replacement_XACT, L2cacheMemory.cacheProbe(in_msg.Address));
- }
- else{
- trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
- }
- } else {
- // check whether block is transactional
- if(L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Trans == true){
- trigger(Event:L2_Replacement_clean_XACT, L2cacheMemory.cacheProbe(in_msg.Address));
- }
- else{
- trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address));
- }
- }
- }
- }
- }
- }
- }
-
- in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) {
- if(L1unblockNetwork_in.isReady()) {
- peek(L1unblockNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
- if (in_msg.RemoveLastOwnerFromDir == true) {
- if (isSharer(in_msg.Address,in_msg.LastOwnerID)) {
- trigger(Event:Exclusive_Unblock_WaitPUTold, in_msg.Address);
- }
- else { // PUT arrived, requestor already removed from dir
- trigger(Event:Exclusive_Unblock, in_msg.Address);
- }
- }
- else {
- trigger(Event:Exclusive_Unblock, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
- if (in_msg.RemoveLastOwnerFromDir == true) {
- if (isSharer(in_msg.Address,in_msg.LastOwnerID)) {
- trigger(Event:Unblock_WaitPUTold, in_msg.Address);
- }
- else { // PUT arrived, requestor already removed from dir
- trigger(Event:Unblock, in_msg.Address);
- }
- }
- else {
- trigger(Event:Unblock, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_CANCEL) {
- trigger(Event:Unblock_Cancel, in_msg.Address);
- } else {
- error("unknown unblock message");
- }
- }
- }
- }
-
- // ACTIONS
-
- action(a_issueFetchToMemory, "a", desc="fetch data from memory") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.MessageSize := MessageSizeType:Control;
- }
- }
- }
-
- action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(L2cacheMemory[address].Exclusive);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- // also pass along timestamp
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
-
- action(c_exclusiveReplacement, "c", desc="Send data to memory") {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.DataBlk := L2_TBEs[address].DataBlk;
- out_msg.Dirty := L2_TBEs[address].Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
-
- //************Transactional memory actions **************
- //broadcast a write filter lookup request to all L1s except for the requestor
- action(a_checkL1WriteFiltersExceptRequestor, "wr", desc="Broadcast a Write Filter lookup request"){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := getLocalL1IDs(machineID);
- // don't broadcast to requestor
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
-
- //broadcast a read + write filter lookup request to all L1s except for the requestor
- action(a_checkL1ReadWriteFiltersExceptRequestor, "rwr", desc="Broadcast a Read + Write Filter lookup request"){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
- // make L1 forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := getLocalL1IDs(machineID);
- // don't broadcast to requestor
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
-
- // These are to send out filter checks to those NACKers in our sharers or exclusive ptr list
- action(a_checkNackerL1WriteFiltersExceptRequestor, "wrn", desc="Broadcast a Write Filter lookup request"){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- // check if the L2 miss bit is set - if it is, send check filter requests to those in our Sharers list only
- if(getL2CacheEntry(address).L2Miss == true){
- // check whether we are the only sharer on the list. If so, no need to broadcast.
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT("L2 Miss: No need to check L1 write filter ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- else{
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- assert(getL2CacheEntry(address).Sharers.count() > 0);
- out_msg.Destination := getL2CacheEntry(address).Sharers;
- // don't broadcast to requestor
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
- }
- else{
- // This is a read request, so check whether we have a writer
- if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- // we have a writer, and it is not us
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(getL2CacheEntry(address).Exclusive);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
- else{
- APPEND_TRANSITION_COMMENT("L1 replacement: No need to check L1 write filter");
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" exclusive: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- // we should not have any sharers
- assert( getL2CacheEntry(address).Sharers.count() == 0 );
- }
- }
- }
- }
-
- action(a_checkNackerL1ReadWriteFiltersExceptRequestor, "wrrn", desc="Broadcast a Read + Write Filter lookup request"){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- // check if the L2 miss bit is set - if it is, send check filter requests to those in our Sharers list only
- if(getL2CacheEntry(address).L2Miss == true){
- // check whether we are the only sharer on the list. If so, no need to broadcast.
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT("L2 Miss: No need to check L1 read/write filter");
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- else{
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- assert(getL2CacheEntry(address).Sharers.count() > 0);
- out_msg.Destination := getL2CacheEntry(address).Sharers;
- // don't broadcast to requestor
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
- }
- else{
- // This is a write request, so check whether we have readers not including us or a writer that is not us
- if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- // we have a writer, and it is not us
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(getL2CacheEntry(address).Exclusive);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
- else if(getL2CacheEntry(address).Sharers.count() > 0){
- // this should never happen - since we allow silent S replacements but we always track exclusive L1
- assert(false);
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter - we are only reader");
- }
- else{
- // reader(s) exist that is not us
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:CHECK_READ_WRITE_FILTER;
- // make L1s forward responses to requestor
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := getL2CacheEntry(address).Sharers;
- // don't check our own filter
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // also pass along timestamp of requestor
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
- }
- else{
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter");
- }
- }
- }
- }
-
- // send data but force L1 requestor to wait for filter responses
- action(f_sendDataToGetSRequestor, "f", desc="Send data from cache to reqeustor") {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := L2_TBEs[address].PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- // wait for the filter responses from other L1s
- out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
- }
- }
-
- // send exclusive data
- action(f_sendExclusiveDataToGetSRequestor, "fx", desc="Send data from cache to reqeustor") {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := L2_TBEs[address].PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- // wait for the filter responses from other L1s
- out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
- }
- }
-
- action(f_sendDataToGetXRequestor, "fxx", desc="Send data from cache to reqeustor") {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := L2_TBEs[address].PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // internal nodes
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- // wait for the filter responses from other L1s
- out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
- }
- }
-
- action(f_sendDataToRequestor, "fd", desc="Send data from cache to reqeustor") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- // different ack counts for different situations
- if(in_msg.Type == CoherenceRequestType:GET_INSTR_ESCAPE || in_msg.Type == CoherenceRequestType:GETX_ESCAPE){
- // no acks needed
- out_msg.AckCount := 0;
- }
- else{
-
- // ORIGINAL
- if( false ) {
- out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
- }
-
- else{
- // NEW***
- // differentiate btw read and write requests
- if(in_msg.Type == CoherenceRequestType:GET_INSTR){
- if(getL2CacheEntry(address).L2Miss == true){
- // check whether we are the only sharer on the list. If so, no need to broadcast.
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT("We are only sharer");
- out_msg.AckCount := 0;
- }
- else{
- // wait for ACKs from the other NACKers
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
- if(isSharer(address, in_msg.Requestor)){
- // don't include us
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- APPEND_TRANSITION_COMMENT("Nackers exist");
- }
- }
- else{
- // This is a read request, so check whether we have a writer
- if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
- // we have a writer and it is not us
- out_msg.AckCount := 0 - 1;
-
- APPEND_TRANSITION_COMMENT(" Writer exists ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- }
- else{
- // we should have no sharers!
- assert(getL2CacheEntry(address).Sharers.count() == 0);
- assert(getL2CacheEntry(address).Exclusive == in_msg.Requestor);
-
- APPEND_TRANSITION_COMMENT(" Sharers or we are writer exist, ok to read ");
- APPEND_TRANSITION_COMMENT(" sharers: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
- APPEND_TRANSITION_COMMENT(" exclusive: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- out_msg.AckCount := 0;
- }
- }
- }
- else if(in_msg.Type == CoherenceRequestType:GETX){
- if(getL2CacheEntry(address).L2Miss == true){
- // check whether we are the only sharer on the list. If so, no need to broadcast.
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT(" L2Miss and we are only sharer ");
- out_msg.AckCount := 0;
- }
- else{
- // nackers exist
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
- if(isSharer(address, in_msg.Requestor)){
- // don't include us
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- APPEND_TRANSITION_COMMENT("Nackers exist");
- }
- }
- else{
- // This is a write request, so check whether we have readers not including us or a writer that is not us
- if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
- // we have a writer and it is not us
- out_msg.AckCount := 0 - 1;
-
- APPEND_TRANSITION_COMMENT(" Writer exists ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
-
- }
- else if(getL2CacheEntry(address).Sharers.count() > 0){
- // this shouldn't be possible - we always track exclusive owner, but allow silent S replacements
- assert(false);
-
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter - we are only reader");
- out_msg.AckCount := 0;
- }
- else{
- // reader(s) exist that is not us
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
- if(isSharer(address, in_msg.Requestor)){
- // don't include us
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- APPEND_TRANSITION_COMMENT(" Readers exist ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
- }
- }
- else{
- // we should always have no sharers!
- assert(getL2CacheEntry(address).Sharers.count() == 0);
- assert(getL2CacheEntry(address).Exclusive == in_msg.Requestor);
-
- out_msg.AckCount := 0;
-
- APPEND_TRANSITION_COMMENT(" sharers: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
- APPEND_TRANSITION_COMMENT(" exclusive: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- APPEND_TRANSITION_COMMENT(" L1 replacement: No need to check L1 read/write filter");
- }
- }
- } // for GETX
- else{
- // unknown request type
- APPEND_TRANSITION_COMMENT(in_msg.Type);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- assert(false);
- }
- }
- } // for original vs new code
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" AckCount: ");
- APPEND_TRANSITION_COMMENT(out_msg.AckCount);
- }
- }
- }
-
- action(f_sendExclusiveDataToRequestor, "fdx", desc="Send data from cache to reqeustor") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- // different ack counts depending on situation
- // IMPORTANT: assuming data sent exclusively for GETS request
- if(in_msg.Type == CoherenceRequestType:GETS_ESCAPE){
- // no acks needed
- out_msg.AckCount := 0;
- }
- else{
-
- // ORIGINAL :
- if( false ){
- // request filter checks from all L1s
- out_msg.AckCount := 0 - (numberOfL1CachePerChip() - 1);
- }
- else{
- // NEW***
- if(getL2CacheEntry(address).L2Miss == true){
- // check whether we are the only sharer on the list. If so, no need to broadcast.
- if(isSharer(address, in_msg.Requestor) == true && isOneSharerLeft(address, in_msg.Requestor) == true){
- // no filter check needed
- APPEND_TRANSITION_COMMENT("We are only sharer");
- out_msg.AckCount := 0;
- }
- else{
- // wait for ACKs from the other NACKers
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
- if(isSharer(address, in_msg.Requestor)){
- // don't include us
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- APPEND_TRANSITION_COMMENT("Nackers exist");
- }
- }
- else{
- // This is a read request, so check whether we have a writer
- if(getL2CacheEntry(address).Sharers.count() == 0 && getL2CacheEntry(address).Exclusive != in_msg.Requestor){
- // we have a writer and it is not us
- out_msg.AckCount := 0 - 1;
-
- APPEND_TRANSITION_COMMENT(" Writer exists ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Exclusive);
- }
- else{
- // we should always have no sharers!
- APPEND_TRANSITION_COMMENT(address);
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" sharers: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
-
- DEBUG_EXPR(address);
- DEBUG_EXPR(" requestor: ");
- DEBUG_EXPR(in_msg.Requestor);
- DEBUG_EXPR(" sharers: ");
- DEBUG_EXPR(getL2CacheEntry(address).Sharers);
-
- assert(getL2CacheEntry(address).Sharers.count() == 0);
- assert(getL2CacheEntry(address).Exclusive == in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" Sharers exist or we are writer, ok to read ");
- out_msg.AckCount := 0;
- }
- }
- } // for orginal vs new code
- }
-
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" AckCount: ");
- APPEND_TRANSITION_COMMENT(out_msg.AckCount);
- }
- }
- }
-
- // send an accumulated ACK to requestor when we don't care about checking filters (for escape actions)
- action(f_sendAccumulatedAckToRequestor, "faa", desc="Send ACKs to requestor") {
- // special case: don't send ACK if uniprocessor, since we don't need it (just send data)
- if((numberOfL1CachePerChip() - 1) > 0){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // count all L1s except requestor
- out_msg.AckCount := numberOfL1CachePerChip() - 1;
- APPEND_TRANSITION_COMMENT(" Total L1s: ");
- APPEND_TRANSITION_COMMENT(numberOfL1CachePerChip());
- APPEND_TRANSITION_COMMENT(" Total ACKS: ");
- APPEND_TRANSITION_COMMENT(out_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
- }
-
- // special INV used when we receive an escape action request. Sharers cannot NACK this invalidate.
- action(fwm_sendFwdInvEscapeToSharersMinusRequestor, "fwme", desc="invalidate sharers for request, requestor is sharer") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="1") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:INV_ESCAPE;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := L2cacheMemory[address].Sharers;
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- //also pass along timestamp
- out_msg.Timestamp := in_msg.Timestamp;
- }
- }
- }
-
- action(f_profileRequestor, "prq", desc="Profiles the requestor") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- APPEND_TRANSITION_COMMENT(" requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- }
-
- // marks the L2 block as transactional if request was transactional
- action(f_markBlockTransIfTrans, "\mbt", desc="Mark an L2 block as transactional") {
- peek(L1unblockNetwork_in, ResponseMsg) {
- if(in_msg.Transactional == true){
- L2cacheMemory[address].Trans := true;
- }
- }
- }
-
- action(q_profileOverflow, "po", desc="profile the overflowed block"){
- profileOverflow(address, machineID);
- }
-
- action(p_profileRequest, "pcc", desc="Profile request msg") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- APPEND_TRANSITION_COMMENT(" request: Timestamp: ");
- APPEND_TRANSITION_COMMENT(in_msg.Timestamp);
- APPEND_TRANSITION_COMMENT(" Requestor: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" Dest: ");
- APPEND_TRANSITION_COMMENT(in_msg.Destination);
- APPEND_TRANSITION_COMMENT(" PA: ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- APPEND_TRANSITION_COMMENT(" Type: ");
- APPEND_TRANSITION_COMMENT(in_msg.Type);
- APPEND_TRANSITION_COMMENT(" Mode: ");
- APPEND_TRANSITION_COMMENT(in_msg.AccessMode);
- APPEND_TRANSITION_COMMENT(" PF: ");
- APPEND_TRANSITION_COMMENT(in_msg.Prefetch);
- }
- }
-
- //********************************END***************************
-
- action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:L2_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
- if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- APPEND_TRANSITION_COMMENT(" AckCount: ");
- APPEND_TRANSITION_COMMENT(out_msg.AckCount);
- }
- }
- }
-
- // use DATA instead of L2_DATA because L1 doesn't need to wait for acks from L1 filters in this case
- action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:L2_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
- out_msg.Dirty := getL2CacheEntry(address).Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- // no ACKS needed because no possible conflicts
- out_msg.AckCount := 0;
- }
- }
- }
-
- action(f_sendInvToSharers, "fsi", desc="invalidate sharers for L2 replacement") {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:REPLACE;
- out_msg.Requestor := machineID;
- out_msg.Destination := L2cacheMemory[address].Sharers;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := L2cacheMemory[address].Sharers;
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- //also pass along timestamp
- out_msg.Timestamp := in_msg.Timestamp;
- APPEND_TRANSITION_COMMENT(" Sharers: ");
- APPEND_TRANSITION_COMMENT(L2cacheMemory[address].Sharers);
- }
- }
- }
-
- // OTHER ACTIONS
- action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
- check_allocate(L2_TBEs);
- L2_TBEs.allocate(address);
- L2_TBEs[address].L1_GetS_IDs.clear();
- L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
- L2_TBEs[address].Dirty := getL2CacheEntry(address).Dirty;
- L2_TBEs[address].pendingAcks := getL2CacheEntry(address).Sharers.count();
- }
-
- action(i_setTBEPhysicalAddress, "ia", desc="Sets the physical address field of the TBE"){
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- L2_TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
- }
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
- L2_TBEs.deallocate(address);
- }
-
- action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
- profileMsgDelay(0, L1RequestIntraChipL2Network_in.dequeue_getDelayCycles());
- }
-
- action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
- profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
- }
-
-
- action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
- profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
- }
-
-
- action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
- peek(responseIntraChipL2Network_in, ResponseMsg) {
- getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
- getL2CacheEntry(address).Dirty := in_msg.Dirty;
- // reset the L2 miss bit
- getL2CacheEntry(address).L2Miss := false;
- }
- }
-
- // Sets the L2Miss bit in the L2 entry - indicates data was sourced from memory
- action(m_markL2MissBit, "mi", desc="Set the entry's L2 Miss bit") {
- getL2CacheEntry(address).L2Miss := true;
- }
-
- action(m_copyNackersIntoSharers, "mn", desc="Copy the NACKers list into our sharers list") {
- peek(L1unblockNetwork_in, ResponseMsg) {
- assert(in_msg.Nackers.count() > 0);
- getL2CacheEntry(address).Sharers.clear();
- // only need to copy into sharers list if we are in special state of "multicast" filter checks
- if(getL2CacheEntry(address).L2Miss == true){
- getL2CacheEntry(address).Sharers := in_msg.Nackers;
- APPEND_TRANSITION_COMMENT(" Unblocker: ");
- APPEND_TRANSITION_COMMENT(in_msg.Sender);
- APPEND_TRANSITION_COMMENT(" Nackers: ");
- APPEND_TRANSITION_COMMENT(getL2CacheEntry(address).Sharers);
- }
- }
- }
-
- action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
- getL2CacheEntry(address).Dirty := in_msg.Dirty;
- // reset the L2 miss bit
- getL2CacheEntry(address).L2Miss := false;
- }
- }
-
- action(q_updateAck, "q", desc="update pending ack count") {
- peek(responseIntraChipL2Network_in, ResponseMsg) {
- L2_TBEs[address].pendingAcks := L2_TBEs[address].pendingAcks - in_msg.AckCount;
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" p: ");
- APPEND_TRANSITION_COMMENT(L2_TBEs[address].pendingAcks);
- }
- }
-
- // For transactional memory. If received NACK instead of ACK
- action(q_updateNack, "qn", desc="update pending ack count") {
- peek(responseIntraChipL2Network_in, ResponseMsg) {
- // set flag indicating we have seen NACK
- L2_TBEs[address].nack := true;
- L2_TBEs[address].pendingAcks := L2_TBEs[address].pendingAcks - in_msg.AckCount;
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" p: ");
- APPEND_TRANSITION_COMMENT(L2_TBEs[address].pendingAcks);
- }
- }
-
- action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
- peek(responseIntraChipL2Network_in, ResponseMsg) {
- L2_TBEs[address].DataBlk := in_msg.DataBlk;
- L2_TBEs[address].Dirty := in_msg.Dirty;
- }
- }
-
-
- action(z_stall, "z", desc="Stall") {
- }
-
-
- action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- L2_TBEs[address].L1_GetS_IDs.add(in_msg.Requestor);
- }
- }
-
- action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- L2_TBEs[address].L1_GetX_ID := in_msg.Requestor;
- }
- }
-
- action(set_setMRU, "\set", desc="set the MRU entry") {
- L2cacheMemory.setMRU(address);
- }
-
- action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
- if (L2cacheMemory.isTagPresent(address) == false) {
- L2cacheMemory.allocate(address);
- }
- }
-
- action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- L2cacheMemory.deallocate(address);
- }
-
- action(t_sendWBAck, "t", desc="Send writeback ACK") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:WB_ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // upgrader doesn't get ack from itself, hence the + 1
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count() + 1;
- APPEND_TRANSITION_COMMENT(" ");
- APPEND_TRANSITION_COMMENT(in_msg.PhysicalAddress);
- }
- }
- }
-
- // same as above, but send NACK instead of ACK
- action(ts_sendInvNackToUpgrader, "tsn", desc="Send NACK to upgrader") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_TAG_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:NACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // upgrader doesn't get ack from itself, hence the + 1
- out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count() + 1;
- }
- }
- }
-
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, L1CacheMachIDToProcessorNum(in_msg.Requestor));
- }
- }
-
- action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- // profile_request(in_msg.L1CacheStateStr, getStateStr(address), "NA", getCoherenceRequestTypeStr(in_msg.Type));
- }
- }
-
-
-
- action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- addSharer(address, in_msg.Requestor);
- APPEND_TRANSITION_COMMENT( getL2CacheEntry(address).Sharers );
- }
- }
-
- action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
- peek(L1unblockNetwork_in, ResponseMsg) {
- addSharer(address, in_msg.Sender);
- if (in_msg.RemoveLastOwnerFromDir == true) {
- // We do this to solve some races with PUTX
- APPEND_TRANSITION_COMMENT("Last owner removed, it was ");
- APPEND_TRANSITION_COMMENT(in_msg.LastOwnerID);
- L2cacheMemory[address].Sharers.remove(in_msg.LastOwnerID);
- assert(in_msg.LastOwnerID == L2cacheMemory[address].Exclusive);
- }
- }
- }
-
-
- action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- L2cacheMemory[address].Sharers.remove(in_msg.Requestor);
- }
- }
-
- action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
- L2cacheMemory[address].Sharers.clear();
- }
-
- action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
- peek(L1unblockNetwork_in, ResponseMsg) {
- if (in_msg.RemoveLastOwnerFromDir == true) {
- // We do this to solve some races with PUTX
- APPEND_TRANSITION_COMMENT(" Last owner removed, it was ");
- APPEND_TRANSITION_COMMENT(in_msg.LastOwnerID);
- assert(in_msg.LastOwnerID == L2cacheMemory[address].Exclusive);
- }
- L2cacheMemory[address].Sharers.clear();
- L2cacheMemory[address].Exclusive := in_msg.Sender;
- addSharer(address, in_msg.Sender);
- }
- }
-
- action(zz_recycleL1RequestQueue, "zz", desc="recycle L1 request queue") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:PUTX || in_msg.Type == CoherenceRequestType:PUTS) {
- if (L2cacheMemory.isTagPresent(in_msg.Address)) {
- getL2CacheEntry(in_msg.Address).L1PutsPending := getL2CacheEntry(in_msg.Address).L1PutsPending + 1;
- DEBUG_EXPR("RECYCLE PutSPending ");
- DEBUG_EXPR(getL2CacheEntry(in_msg.Address).L1PutsPending);
- DEBUG_EXPR(in_msg.Type);
- DEBUG_EXPR(in_msg.Requestor);
- }
- }
- }
- L1RequestIntraChipL2Network_in.recycle();
- }
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- /* Recycle while waiting for PUT */
- transition({PB_MT, PB_MT_IB, PB_SS}, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_GETS_ESCAPE, L1_GETX_ESCAPE, L1_GET_INSTR_ESCAPE, L2_Replacement, L2_Replacement_clean, L2_Replacement_XACT, L2_Replacement_clean_XACT}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition({IM, IS, ISS, SS_MB, M_MB, ISS_MB, IS_SSB, MT_MB, MT_IIB, MT_IB, MT_SB, M_SSB, SS_SSB},
- {L2_Replacement, L2_Replacement_clean, L2_Replacement_XACT, L2_Replacement_clean_XACT}) {
- zz_recycleL1RequestQueue;
- }
-
- transition({SS_MB, M_MB, ISS_MB, IS_SSB, MT_MB, MT_IIB, MT_IB, MT_SB, M_SSB, SS_SSB},
- {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_GETS_ESCAPE, L1_GETX_ESCAPE, L1_GET_INSTR_ESCAPE}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition({NP, SS, M, M_I, MT_I, MCT_I, I_I, S_I, ISS, IS, IM, /*SS_MB,*/ SS_SSB, /* MT_MB, M_MB, ISS_MB,*/ IS_SSB, M_SSB, /*MT_IIB, */MT_IB/*, MT_SB*/}, {L1_PUTX,L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // occurs when L2 replacement raced with L1 replacement, and L2 finished its replacement first
- transition({NP, M_I, MCT_I, I_I, S_I, IS, ISS, IM, SS, M, MT, IS_SSB, MT_IB, M_SSB, SS_SSB}, {L1_PUTX_old, L1_PUTS_old}){
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- // PUT from current (last) exclusive owner, that was replacing the line when it received Fwd req
- transition(MT_I, {L1_PUTX_old, L1_PUTS_old}) {
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- transition({SS, M, MT}, {L1_PUT_PENDING}) { // L1_PUT_ msg pending for the block, don't accept new requests until PUT is processed */
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- //===============================================
- // BASE STATE - I
-
- // Transitions from I (Idle)
-
- // When L2 doesn't have block, need to send broadcasst to all L1s to check appropriate filter(s)
- transition(NP, L1_GETS, ISS) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- // will mark as exclusive when we get unblocked with success
- //nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- ss_recordGetSL1ID;
- a_issueFetchToMemory;
- // for correctness we need to query both read + write filters
- a_checkL1ReadWriteFiltersExceptRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- // no need to check filters, send accumulated ACK to requestor
- transition(NP, L1_GETS_ESCAPE, ISS) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- // will mark as exclusive when we get unblocked with success
- //nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- ss_recordGetSL1ID;
- a_issueFetchToMemory;
- // send accumulated ACK
- f_sendAccumulatedAckToRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- transition(NP, L1_GET_INSTR, IS) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- ss_recordGetSL1ID;
- a_issueFetchToMemory;
- // for correctness query the read + write filters
- a_checkL1ReadWriteFiltersExceptRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- // no need to query filters, send accumluated ACK to requestor
- transition(NP, L1_GET_INSTR_ESCAPE, IS) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- ss_recordGetSL1ID;
- a_issueFetchToMemory;
- // send accumulated ACK
- f_sendAccumulatedAckToRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- transition(NP, L1_GETX, IM) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- // nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- xx_recordGetXL1ID;
- a_issueFetchToMemory;
- // also query the L1 write and read filters
- a_checkL1ReadWriteFiltersExceptRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- // don't check filters
- transition(NP, L1_GETX_ESCAPE, IM) {
- p_profileRequest;
- f_profileRequestor;
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- // nn_addSharer;
- i_allocateTBE;
- i_setTBEPhysicalAddress;
- xx_recordGetXL1ID;
- a_issueFetchToMemory;
- // send accumulated ACK to requestor
- f_sendAccumulatedAckToRequestor;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
-
- // transitions from IS/IM
-
- // force L1s to respond success or failure
- transition(ISS, Mem_Data, ISS_MB){
- m_writeDataToCache;
- m_markL2MissBit;
- // send exclusive data but force L1 to wait for filter responses
- f_sendExclusiveDataToGetSRequestor;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(IS, Mem_Data, IS_SSB){
- m_writeDataToCache;
- m_markL2MissBit;
- // send data but force L1 to wait for filter responses
- f_sendDataToGetSRequestor;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(IM, Mem_Data, ISS_MB){
- m_writeDataToCache;
- m_markL2MissBit;
- // send data but force L1 to wait for filter responses
- f_sendDataToGetXRequestor;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // disallow grouping of requestors. There is a correctness problem if we check the wrong
- // filters as indicated by the original requestor.
- transition({IS, ISS}, {L1_GETX, L1_GETS, L1_GET_INSTR, L1_GETX_ESCAPE, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition(IM, {L1_GETX, L1_GETS, L1_GET_INSTR, L1_GETX_ESCAPE, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // transitions from SS
- transition(SS, {L1_GETS, L1_GET_INSTR, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}, SS_SSB) {
- p_profileRequest;
- f_profileRequestor;
- ds_sendSharedDataToRequestor;
- nn_addSharer;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // For isolation the L1 filters might return NACKs to the requestor
- transition(SS, L1_GETX, SS_MB) {
- p_profileRequest;
- f_profileRequestor;
- d_sendDataToRequestor;
- fwm_sendFwdInvToSharersMinusRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // send special INV to sharers - they have to invalidate
- transition(SS, L1_GETX_ESCAPE, SS_MB) {
- p_profileRequest;
- f_profileRequestor;
- d_sendDataToRequestor;
- fwm_sendFwdInvEscapeToSharersMinusRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // For isolation the L1 filters might return NACKs to the requestor
- transition(SS, L1_UPGRADE, SS_MB) {
- f_profileRequestor;
- fwm_sendFwdInvToSharersMinusRequestor;
- ts_sendInvAckToUpgrader;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(SS, L2_Replacement_clean, I_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(SS, L2_Replacement_clean_XACT, I_I) {
- q_profileOverflow;
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(SS, L2_Replacement, S_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(SS, L2_Replacement_XACT, S_I) {
- q_profileOverflow;
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- // Transitions from M
-
- // send data, but force L1 to wait for filter responses
- transition(M, L1_GETS, M_MB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendExclusiveDataToRequestor;
- // selective filter checks, but need to check both read+write in case nackers put NP block into M state
- a_checkNackerL1ReadWriteFiltersExceptRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // don't care about filters
- transition(M, L1_GETS_ESCAPE, M_MB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendExclusiveDataToRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(M, L1_GET_INSTR, M_SSB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendDataToRequestor;
- // NEW - selective filter checks, but need to check both read+write in case nackers put NP block into M state
- a_checkNackerL1ReadWriteFiltersExceptRequestor;
- // This should always be _after_ f_sendDataToRequestor and a_checkNackerL1WriteFiltersExceptRequestor, since they
- // explicitly look at the sharers list!
- nn_addSharer;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // don't care about filters
- transition(M, L1_GET_INSTR_ESCAPE, M_SSB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendDataToRequestor;
- nn_addSharer;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(M, L1_GETX, M_MB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendDataToRequestor;
- // selective filter checks
- a_checkNackerL1ReadWriteFiltersExceptRequestor;
- // issue filter checks
- //a_checkL1ReadWriteFiltersExceptRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- // don't care about filters
- transition(M, L1_GETX_ESCAPE, M_MB) {
- p_profileRequest;
- f_profileRequestor;
- f_sendDataToRequestor;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(M, L2_Replacement, M_I) {
- i_allocateTBE;
- c_exclusiveReplacement;
- rr_deallocateL2CacheBlock;
- }
-
- transition(M, L2_Replacement_clean, M_I) {
- rr_deallocateL2CacheBlock;
- }
-
- transition(M, L2_Replacement_XACT, M_I) {
- q_profileOverflow;
- i_allocateTBE;
- c_exclusiveReplacement;
- rr_deallocateL2CacheBlock;
- }
-
- transition(M, L2_Replacement_clean_XACT, M_I) {
- q_profileOverflow;
- rr_deallocateL2CacheBlock;
- }
-
-
- // transitions from MT
- transition(MT, {L1_GETX, L1_GETX_ESCAPE}, MT_MB) {
- p_profileRequest;
- f_profileRequestor;
- b_forwardRequestToExclusive;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
-
- transition(MT, {L1_GETS, L1_GET_INSTR, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}, MT_IIB) {
- p_profileRequest;
- f_profileRequestor;
- b_forwardRequestToExclusive;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(MT, L2_Replacement, MT_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(MT, L2_Replacement_clean, MCT_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(MT, L2_Replacement_XACT, MT_I) {
- q_profileOverflow;
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(MT, L2_Replacement_clean_XACT, MCT_I) {
- q_profileOverflow;
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(MT, L1_PUTX, M) {
- f_profileRequestor;
- // this doesn't affect exlusive ptr
- ll_clearSharers;
- mr_writeDataToCacheFromRequest;
- t_sendWBAck;
- jj_popL1RequestQueue;
- }
-
- // This is for the case of transactional read line in E state being replaced from L1. We need to maintain isolation on this
- // in the event of a future transactional store from another proc, so we maintain this transactional sharer on the list
- transition(MT, L1_PUTS, SS) {
- f_profileRequestor;
- ll_clearSharers;
- // maintain transactional read isolation
- nn_addSharer;
- mr_writeDataToCacheFromRequest;
- t_sendWBAck;
- jj_popL1RequestQueue;
- }
-
- // transitions from blocking states
- transition(SS_MB, Unblock_Cancel, SS) {
- k_popUnblockQueue;
- }
-
- transition(M_SSB, Unblock_Cancel, M) {
- ll_clearSharers;
- // copy NACKers list from unblock message to our sharers list
- m_copyNackersIntoSharers;
- k_popUnblockQueue;
- }
-
- transition(MT_MB, Unblock_Cancel, MT) {
- k_popUnblockQueue;
- }
-
- transition(MT_IB, Unblock_Cancel, MT) {
- k_popUnblockQueue;
- }
-
- transition(MT_IIB, Unblock_Cancel, MT){
- k_popUnblockQueue;
- }
-
- // L2 just got the data from memory, but we have Nackers. We can let nacked block reside in M, but GETS request needs to check read+write
- // signatures to avoid atomicity violations.
- transition({ISS_MB, IS_SSB}, Unblock_Cancel, M){
- //rr_deallocateL2CacheBlock;
- // copy NACKers list from unblock message to our sharers list
- m_copyNackersIntoSharers;
- k_popUnblockQueue;
- }
-
- transition(M_MB, Unblock_Cancel, M) {
- // copy NACKers list from unblock message to our sharers list
- m_copyNackersIntoSharers;
- k_popUnblockQueue;
- }
-
- transition(SS_MB, Exclusive_Unblock, MT) {
- // update actual directory
- mmu_markExclusiveFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- // PUT from next exclusive surpassed its own ExclusiveUnblock
- // Perceived as PUTX_old because the directory is outdated
- transition(SS_MB, {L1_PUTX_old, L1_PUTS_old}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // PUT from current (old) exclusive, can't do anything with it in this state
- // Don't know whether exclusive was replacing or not, so wait to see what Unblock says
- transition(SS_MB, {L1_PUTX, L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // Next exclusive informs that last owner was replacing the line when it received Fwd req
- // Thus, expect a PUTX_old from previous owner
- transition(SS_MB, Exclusive_Unblock_WaitPUTold, PB_MT) {
- // update actual directory
- mmu_markExclusiveFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- transition(PB_MT, {L1_PUTX_old, L1_PUTS_old}, MT) { // OK, PUT_old received, go to MT
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- // PUT from current (next) exclusive, so recycle
- // Expecting PUT_old, won't take in new PUT until previous PUT arrives
- transition(PB_MT, {L1_PUTX, L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // L2 blocks on GETS requests in SS state
- transition(SS_SSB, Unblock, SS) {
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- transition({M_SSB, IS_SSB}, Unblock, SS) {
- // we already added the sharer when we received original request
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- transition({M_MB, MT_MB, ISS_MB}, Exclusive_Unblock, MT) {
- // update actual directory
- mmu_markExclusiveFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- transition({M_MB, MT_MB, ISS_MB}, Exclusive_Unblock_WaitPUTold, PB_MT) {
- // update actual directory
- mmu_markExclusiveFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- // PUT from (not yet) next exclusive surpassed its own ExclusiveUnblock
- // thus became PUTX_old (since directory is not up-to-date)
- transition({M_MB, MT_MB, ISS_MB}, {L1_PUTX_old, L1_PUTS_old}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // PUT from current (previous) owner: recycle until unblock arrives
- // We don't know whether replacing cache is waiting for WB_Ack or it was replacing when fwd arrived
- transition({M_MB, MT_MB, ISS_MB}, {L1_PUTX, L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // L1 requestor received data from exclusive L1, but writeback data from exclusive L1 hasn't arrived yet
- transition(MT_IIB, Unblock, MT_IB) {
- nnu_addSharerFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- // PUT from current (previous) owner: recycle
- // We don't know whether replacing cache is waiting for WB_Ack or it was replacing when fwd arrived
- transition(MT_IIB, {L1_PUTX, L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition(MT_IB, {WB_Data, WB_Data_clean}, SS) {
- m_writeDataToCache;
- o_popIncomingResponseQueue;
- }
-
- // PUT from (not yet) next exclusive, but unblock hasn't arrived yet, so it became PUT_old: recycle
- transition(MT_IIB, {L1_PUTX_old, L1_PUTS_old}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition(MT_IIB, Unblock_WaitPUTold, PB_MT_IB) { // Now arrives Unblock, wait for PUT and WB_Data
- nnu_addSharerFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- // L1 requestor has not received data from exclusive L1, but we received writeback data from exclusive L1
- transition(MT_IIB, {WB_Data, WB_Data_clean}, MT_SB) {
- m_writeDataToCache;
- o_popIncomingResponseQueue;
- }
-
- // PUT_old from previous owner, that was replacing when it received Fwd req
- transition(PB_MT_IB, {L1_PUTX_old, L1_PUTS_old}, MT_IB) { // Go to MT_IB, and wait for WB_Data
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- transition(PB_MT_IB, {L1_PUTX, L1_PUTS}) { // Waiting for PUT_old, don't take new PUT in
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // WB_data from previous owner, we already received unblock, just wait for PUT_old to go to SS
- transition(PB_MT_IB, {WB_Data, WB_Data_clean}, PB_SS) { // Received Unblock, now arrives WB_Data, wait for PUT
- m_writeDataToCache;
- o_popIncomingResponseQueue;
- }
-
- transition(PB_SS, {L1_PUTX_old, L1_PUTS_old}, SS) { // Received Unblock and WB_Data, now arrives PUT, go to SS
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- // PUT from new exclusive owner, while waiting for PUT from previous exclusive owner: recycle
- transition(PB_SS, {L1_PUTX, L1_PUTS}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition(MT_SB, Unblock, SS) {
- nnu_addSharerFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- transition(MT_SB, Unblock_WaitPUTold, PB_SS) { // Received WB_Data, now arriving Unblock, wait for PUT
- nnu_addSharerFromUnblock;
- // mark block as trans if needed
- f_markBlockTransIfTrans;
- k_popUnblockQueue;
- }
-
- // PUT from (not yet) new exclusive owner, before we receive Unblock from it (became PUT_old because directory is not up-to-date)
- transition(MT_SB, {L1_PUTX_old, L1_PUTS_old}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- // PUT from current (last) exclusive owner, that was replacing the line when it received Fwd req
- transition(MT_SB, {L1_PUTX, L1_PUTS}) {
- kk_removeRequestSharer; // When Unblock arrives, it'll trigger Unblock, not Unblock_WaitPUTold
- f_profileRequestor;
- jj_popL1RequestQueue;
- }
-
- // writeback states
- transition({I_I, S_I, MT_I, MCT_I, M_I}, {L1_GETX, L1_UPGRADE, L1_GETS, L1_GET_INSTR, L1_GETX_ESCAPE, L1_GETS_ESCAPE, L1_GET_INSTR_ESCAPE}) {
- f_profileRequestor;
- zz_recycleL1RequestQueue;
- }
-
- transition(I_I, Ack) {
- q_updateAck;
- o_popIncomingResponseQueue;
- }
-
- transition(I_I, Ack_all, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition({MT_I, MCT_I}, WB_Data, M_I) {
- qq_writeDataToTBE;
- ct_exclusiveReplacementFromTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(MCT_I, WB_Data_clean, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- // L1 never changed Dirty data
- transition(MT_I, Ack_all, M_I) {
- ct_exclusiveReplacementFromTBE;
- o_popIncomingResponseQueue;
- }
-
- // clean data that L1 exclusive never wrote
- transition(MCT_I, Ack_all, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(MT_I, WB_Data_clean, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(S_I, Ack) {
- q_updateAck;
- o_popIncomingResponseQueue;
- }
-
- transition(S_I, Ack_all, M_I) {
- ct_exclusiveReplacementFromTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(M_I, Mem_Ack, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-}
-
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-mem.sm b/src/mem/protocol/MESI_CMP_filter_directory-mem.sm
deleted file mode 100644
index 1fcd234fe..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory-mem.sm
+++ /dev/null
@@ -1,166 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
- */
-
-
-machine(Directory, "Token protocol") {
-
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
- MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
- MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
-
- // STATES
- enumeration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, desc="Owner";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- Fetch, desc="A GETX arrives";
- Data, desc="A GETS arrives";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...") {
- DataBlock DataBlk, desc="data for the block";
- }
-
- external_type(DirectoryMemory) {
- Entry lookup(Address);
- bool isPresent(Address);
- }
-
-
- // ** OBJECTS **
-
- DirectoryMemory directory, constructor_hack="i";
-
- State getState(Address addr) {
- return State:I;
- }
-
- void setState(Address addr, State state) {
- }
-
- // ** OUT_PORTS **
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
-
- // ** IN_PORTS **
-
- in_port(requestNetwork_in, RequestMsg, requestToDir) {
- if (requestNetwork_in.isReady()) {
- peek(requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:Fetch, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:Fetch, in_msg.Address);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(responseNetwork_in, ResponseMsg, responseToDir) {
- if (responseNetwork_in.isReady()) {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Data, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- // Actions
- action(a_sendAck, "a", desc="Send ack to L2") {
- peek(responseNetwork_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Sender);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(d_sendData, "d", desc="Send data to requestor") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue();
- }
-
- action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
- responseNetwork_in.dequeue();
- }
-
- action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- directory[in_msg.Address].DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
- }
- }
-
- // TRANSITIONS
-
- transition(I, Fetch) {
- d_sendData;
- j_popIncomingRequestQueue;
- }
-
- transition(I, Data) {
- m_writeDataToMemory;
- a_sendAck;
- k_popIncomingResponseQueue;
- }
-}
diff --git a/src/mem/protocol/MESI_CMP_filter_directory-msg.sm b/src/mem/protocol/MESI_CMP_filter_directory-msg.sm
deleted file mode 100644
index a888e2450..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory-msg.sm
+++ /dev/null
@@ -1,153 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: MSI_MOSI_CMP_directory-msg.sm 1.5 05/01/19 15:48:37-06:00 mikem@royal16.cs.wisc.edu $
- *
- */
-
-// CoherenceRequestType
-enumeration(CoherenceRequestType, desc="...") {
- GETX, desc="Get eXclusive";
- GETX_ESCAPE, desc="Get eXclusive, while in escape action";
- UPGRADE, desc="UPGRADE to exclusive";
- GETS, desc="Get Shared";
- GETS_ESCAPE, desc="Get Shared, while in escape action";
- GET_INSTR, desc="Get Instruction";
- GET_INSTR_ESCAPE, desc="Get Instruction, while in escape action";
- INV, desc="INValidate, could be NACKed";
- INV_ESCAPE, desc="INValidate, cannot be NACKed";
- PUTX, desc="replacement message, for writeback to lower caches";
- PUTS, desc="clean replacement message, for writeback to lower caches";
- REPLACE, desc="replacement message, from lowest cache";
- CHECK_WRITE_FILTER, desc="check write filter message";
- CHECK_READ_WRITE_FILTER, desc="check both read and write filters message";
-}
-
-// CoherenceResponseType
-enumeration(CoherenceResponseType, desc="...") {
- MEMORY_ACK, desc="Ack from memory controller";
- DATA, desc="Data";
- DATA_EXCLUSIVE, desc="Data";
- L2_DATA, desc="data from L2, in shared mode";
- L2_DATA_EXCLUSIVE, desc="data from L2, in exclusive mode";
- MEMORY_DATA, desc="Data";
- ACK, desc="Generic invalidate ack";
- NACK, desc="NACK used to maintain transactional isolation";
- WB_ACK, desc="writeback ack";
- UNBLOCK, desc="unblock";
- EXCLUSIVE_UNBLOCK, desc="exclusive unblock";
- UNBLOCK_CANCEL, desc="unblock when trans. request fails";
-}
-
-// RequestMsg
-structure(RequestMsg, desc="...", interface="NetworkMessage") {
- Address Address, desc="Line address for this request";
- Address PhysicalAddress, desc="Physical address for this request";
- CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
- AccessModeType AccessMode, desc="user/supervisor access type";
- MachineID Requestor , desc="What component request";
- NetDest Destination, desc="What components receive the request, includes MachineType and num";
- MessageSizeType MessageSize, desc="size category of the message";
- DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
- bool Dirty, default="false", desc="Dirty bit";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
- uint64 Timestamp, desc="TLR-like Timestamp";
-}
-
-// ResponseMsg
-structure(ResponseMsg, desc="...", interface="NetworkMessage") {
- Address Address, desc="Line address for this request";
- Address PhysicalAddress, desc="Physical address for this request";
- CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
- MachineID Sender, desc="What component sent the data";
- NetDest Destination, desc="Node to whom the data is sent";
- DataBlock DataBlk, desc="Data for the cache line";
- bool Dirty, default="false", desc="Dirty bit";
- int AckCount, default="0", desc="number of acks in this message";
- MessageSizeType MessageSize, desc="size category of the message";
- uint64 Timestamp, desc="TLR-like Timestamp";
- NetDest Nackers, desc="The nodes which sent NACKs to requestor";
- bool Transactional, desc="Whether this address was transactional";
- bool RemoveLastOwnerFromDir, desc="To solve some races with PUTX/GETS";
- MachineID LastOwnerID, desc="What component sent the data";
-}
-
-// TriggerType
-enumeration(TriggerType, desc="...") {
- ALL_ACKS, desc="When all acks/nacks have been received";
-}
-
-// TriggerMsg
-structure(TriggerMsg, desc="...", interface="Message") {
- Address Address, desc="Line address for this request";
- Address PhysicalAddress, desc="Physical address for this request";
- TriggerType Type, desc="Type of trigger";
-}
-
-/*
- GETX, desc="Get eXclusive";
- UPGRADE, desc="UPGRADE to exclusive";
- GETS, desc="Get Shared";
- GET_INSTR, desc="Get Instruction";
- INV, desc="INValidate";
- PUTX, desc="replacement message, for writeback to lower caches";
- REPLACE, desc="replacement message, from lowest cache";
- CHECK_WRITE_FILTER, desc="check write filter message";
- CHECK_READ_WRITE_FILTER, desc="check both read and write filters message";
-*/
-
-GenericRequestType convertToGenericType(CoherenceRequestType type) {
- if(type == CoherenceRequestType:PUTX) {
- return GenericRequestType:PUTX;
- } else if(type == CoherenceRequestType:GETS) {
- return GenericRequestType:GETS;
- } else if(type == CoherenceRequestType:GETS_ESCAPE) {
- return GenericRequestType:GETS;
- } else if(type == CoherenceRequestType:GET_INSTR) {
- return GenericRequestType:GET_INSTR;
- } else if(type == CoherenceRequestType:GET_INSTR_ESCAPE) {
- return GenericRequestType:GET_INSTR;
- } else if(type == CoherenceRequestType:GETX) {
- return GenericRequestType:GETX;
- } else if(type == CoherenceRequestType:GETX_ESCAPE) {
- return GenericRequestType:GETX;
- } else if(type == CoherenceRequestType:UPGRADE) {
- return GenericRequestType:UPGRADE;
- } else if(type == CoherenceRequestType:INV) {
- return GenericRequestType:INV;
- } else if( type == CoherenceRequestType:REPLACE) {
- return GenericRequestType:REPLACEMENT;
- } else {
- DEBUG_EXPR(type);
- error("invalid CoherenceRequestType");
- }
-}
-
-
diff --git a/src/mem/protocol/MESI_CMP_filter_directory.slicc b/src/mem/protocol/MESI_CMP_filter_directory.slicc
deleted file mode 100644
index 715da5795..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory.slicc
+++ /dev/null
@@ -1,7 +0,0 @@
-../protocols/LogTM.sm
-../protocols/MESI_CMP_filter_directory-msg.sm
-../protocols/MESI_CMP_filter_directory-L2cache.sm
-../protocols/MESI_CMP_filter_directory-L1cache.sm
-../protocols/MESI_CMP_filter_directory-mem.sm
-../protocols/standard_CMP-protocol.sm
-
diff --git a/src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm b/src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm
deleted file mode 100644
index 2f8818489..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory_m-mem.sm
+++ /dev/null
@@ -1,250 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
- */
-
-// This file is copied from Yasuko Watanabe's prefetch / memory protocol
-// Copied here by aep 12/14/07
-
-
-machine(Directory, "MESI_CMP_filter_directory protocol") {
-
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
- MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
- MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
-
- // STATES
- enumeration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, desc="Owner";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- Fetch, desc="A memory fetch arrives";
- Data, desc="writeback data arrives";
- Memory_Data, desc="Fetched data from memory arrives";
- Memory_Ack, desc="Writeback Ack from memory arrives";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...") {
- DataBlock DataBlk, desc="data for the block";
- }
-
- external_type(DirectoryMemory) {
- Entry lookup(Address);
- bool isPresent(Address);
- }
-
- // to simulate detailed DRAM
- external_type(MemoryControl, inport="yes", outport="yes") {
-
- }
-
-
- // ** OBJECTS **
-
- DirectoryMemory directory, constructor_hack="i";
- MemoryControl memBuffer, constructor_hack="i";
-
- State getState(Address addr) {
- return State:I;
- }
-
- void setState(Address addr, State state) {
- }
-
- bool isGETRequest(CoherenceRequestType type) {
- return (type == CoherenceRequestType:GETS) ||
- (type == CoherenceRequestType:GET_INSTR) ||
- (type == CoherenceRequestType:GETX);
- }
-
-
- // ** OUT_PORTS **
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
- out_port(memQueue_out, MemoryMsg, memBuffer);
-
- // ** IN_PORTS **
-
- in_port(requestNetwork_in, RequestMsg, requestToDir) {
- if (requestNetwork_in.isReady()) {
- peek(requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (isGETRequest(in_msg.Type)) {
- trigger(Event:Fetch, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg);
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(responseNetwork_in, ResponseMsg, responseToDir) {
- if (responseNetwork_in.isReady()) {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Data, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
- if (memQueue_in.isReady()) {
- peek(memQueue_in, MemoryMsg) {
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.Address);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
-
-
- // Actions
- action(a_sendAck, "a", desc="Send ack to L2") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(d_sendData, "d", desc="Send data to requestor") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue();
- }
-
- action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
- responseNetwork_in.dequeue();
- }
-
- action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
- }
-
- action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
-
- DEBUG_EXPR(out_msg);
- }
- }
- }
-
- action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
- peek(responseNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Sender;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := in_msg.Prefetch;
-
- DEBUG_EXPR(out_msg);
- }
- }
- }
-
- action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- directory[in_msg.Address].DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
- }
- }
-
- // TRANSITIONS
-
- transition(I, Fetch) {
- //d_sendData;
- qf_queueMemoryFetchRequest;
- j_popIncomingRequestQueue;
- }
-
- transition(I, Data) {
- m_writeDataToMemory;
- //a_sendAck;
- qw_queueMemoryWBRequest;
- k_popIncomingResponseQueue;
- }
-
- transition(I, Memory_Data) {
- d_sendData;
- l_popMemQueue;
- }
-
- transition(I, Memory_Ack) {
- a_sendAck;
- l_popMemQueue;
- }
-}
diff --git a/src/mem/protocol/MESI_CMP_filter_directory_m.slicc b/src/mem/protocol/MESI_CMP_filter_directory_m.slicc
deleted file mode 100644
index 43c9d4019..000000000
--- a/src/mem/protocol/MESI_CMP_filter_directory_m.slicc
+++ /dev/null
@@ -1,7 +0,0 @@
-../protocols/LogTM.sm
-../protocols/MESI_CMP_filter_directory-msg.sm
-../protocols/MESI_CMP_filter_directory-L2cache.sm
-../protocols/MESI_CMP_filter_directory-L1cache.sm
-../protocols/MESI_CMP_filter_directory_m-mem.sm
-../protocols/standard_CMP-protocol.sm
-
diff --git a/src/mem/ruby/config/RubyConfig.cc b/src/mem/ruby/config/RubyConfig.cc
index 3d615ac02..0e94efb46 100644
--- a/src/mem/ruby/config/RubyConfig.cc
+++ b/src/mem/ruby/config/RubyConfig.cc
@@ -39,7 +39,6 @@
#include "RubyConfig.hh"
#include "protocol_name.hh"
#include "util.hh"
-#include "interface.hh"
#include "Protocol.hh"
#define CHECK_POWER_OF_2(N) { if (!is_power_of_2(N)) { ERROR_MSG(#N " must be a power of 2."); }}
@@ -179,7 +178,6 @@ void RubyConfig::printConfiguration(ostream& out) {
out << "------------------" << endl;
out << "protocol: " << CURRENT_PROTOCOL << endl;
- SIMICS_print_version(out);
out << "compiled_at: " << __TIME__ << ", " << __DATE__ << endl;
out << "RUBY_DEBUG: " << bool_to_string(RUBY_DEBUG) << endl;
diff --git a/src/mem/ruby/config/config.hh b/src/mem/ruby/config/config.hh
index f853fb72b..3c3d87ef5 100644
--- a/src/mem/ruby/config/config.hh
+++ b/src/mem/ruby/config/config.hh
@@ -134,7 +134,7 @@ PARAM_BOOL( REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH );
// CACHE & MEMORY PARAMETERS
// *********************************************
-PARAM_BOOL( g_SIMICS );
+PARAM_BOOL( g_SIMULATING );
PARAM( L1_CACHE_ASSOC );
PARAM( L1_CACHE_NUM_SETS_BITS );
diff --git a/src/mem/ruby/config/rubyconfig.defaults b/src/mem/ruby/config/rubyconfig.defaults
index 3b86b4645..52b6603fb 100644
--- a/src/mem/ruby/config/rubyconfig.defaults
+++ b/src/mem/ruby/config/rubyconfig.defaults
@@ -36,7 +36,7 @@
//
g_RANDOM_SEED: 1
-g_SIMICS: true
+g_SIMULATING: true
g_DEADLOCK_THRESHOLD: 500000
diff --git a/src/mem/ruby/config/tester.defaults b/src/mem/ruby/config/tester.defaults
index ea83a1443..c9e963bda 100644
--- a/src/mem/ruby/config/tester.defaults
+++ b/src/mem/ruby/config/tester.defaults
@@ -6,7 +6,7 @@
// Please: - Add new variables only to rubyconfig.defaults file.
// - Change them here only when necessary.
-g_SIMICS: false
+g_SIMULATING: false
DATA_BLOCK: true
RANDOMIZATION: true
g_SYNTHETIC_DRIVER: true
diff --git a/src/mem/ruby/init.cc b/src/mem/ruby/init.cc
index 534b1c286..a2e874df8 100644
--- a/src/mem/ruby/init.cc
+++ b/src/mem/ruby/init.cc
@@ -42,30 +42,13 @@
#include "Debug.hh"
#include "Profiler.hh"
#include "Tester.hh"
-#include "OpalInterface.hh"
#include "init.hh"
-#include "interface.hh"
-
-#ifdef CONTIGUOUS_ADDRESSES
-#include "ContiguousAddressTranslator.hh"
-
-/* Declared in interface.C */
-extern ContiguousAddressTranslator * g_p_ca_translator;
-
-#endif // #ifdef CONTIGUOUS_ADDRESSES
using namespace std;
#include <string>
#include <map>
#include <stdlib.h>
-// Maurice
-// extern "C" {
-// #include "simics/api.hh"
-// };
-
-#include "FakeSimicsDataTypes.hh"
-
#include "confio.hh"
#include "initvar.hh"
@@ -73,210 +56,65 @@ using namespace std;
// The defaults are stored in the variable global_default_param
#include "default_param.hh"
-attr_value_t ruby_session_get( void *id, void *obj,
- attr_value_t *idx ) {
- attr_value_t ret;
-
- // all session attributes default to return invalid
- ret.kind = Sim_Val_Invalid;
- return ret;
-}
-
-set_error_t ruby_session_set( void *id, void *obj,
- attr_value_t *val, attr_value_t *idx ) {
- const char *command = (const char *) id;
- // Add new ruby commands to this function
-
-#if 0 // Eventually add these commands back in
- if (!strcmp(command, "dump-stats" ) ) {
- char* filename = (char*) val->u.string;
- if(strcmp(filename, "")){
- ruby_dump_stats(filename);
- } else {
- ruby_dump_stats(NULL);
- }
- return Sim_Set_Ok;
- } else if (!strcmp(command, "dump-short-stats" ) ) {
- char* filename = (char*) val->u.string;
- if(strcmp(filename, "")){
- ruby_dump_short_stats(filename);
- } else {
- ruby_dump_short_stats(NULL);
- }
- return Sim_Set_Ok;
- } else if (!strcmp(command, "periodic-stats-file" ) ) {
- char* filename = (char*) val->u.string;
- ruby_set_periodic_stats_file(filename);
- return Sim_Set_Ok;
- } else if (!strcmp(command, "periodic-stats-interval" ) ) {
- int interval = val->u.integer;
- ruby_set_periodic_stats_interval(interval);
- return Sim_Set_Ok;
- } else if (!strcmp(command, "clear-stats" ) ) {
- ruby_clear_stats();
- return Sim_Set_Ok;
- } else if (!strcmp(command, "debug-verb" ) ) {
- char* new_verbosity = (char*) val->u.string;
- ruby_change_debug_verbosity(new_verbosity);
- return Sim_Set_Ok;
- } else if (!strcmp(command, "debug-filter" ) ) {
- char* new_debug_filter = (char*) val->u.string;
- ruby_change_debug_filter(new_debug_filter);
- return Sim_Set_Ok;
- } else if (!strcmp(command, "debug-output-file" ) ) {
- char* new_filename = (char*) val->u.string;
- ruby_set_debug_output_file(new_filename);
- return Sim_Set_Ok;
- } else if (!strcmp(command, "debug-start-time" ) ) {
- char* new_start_time = (char*) val->u.string;
- ruby_set_debug_start_time(new_start_time);
- return Sim_Set_Ok;
- } else if (!strcmp(command, "load-caches" ) ) {
- char* filename = (char*) val->u.string;
- ruby_load_caches(filename);
- return Sim_Set_Ok;
- } else if (!strcmp(command, "save-caches" ) ) {
- char* filename = (char*) val->u.string;
- ruby_save_caches(filename);
- return Sim_Set_Ok;
- } else if (!strcmp(command, "dump-cache" ) ) {
- int cpuNumber = val->u.integer;
- ruby_dump_cache(cpuNumber);
- return Sim_Set_Ok;
- } else if (!strcmp(command, "dump-cache-data" ) ) {
- int cpuNumber = val->u.list.vector[0].u.integer;
- char *filename = (char*) val->u.list.vector[1].u.string;
- ruby_dump_cache_data( cpuNumber, filename );
- return Sim_Set_Ok;
- } else if (!strcmp(command, "tracer-output-file" ) ) {
- char* new_filename = (char*) val->u.string;
- ruby_set_tracer_output_file(new_filename);
- return Sim_Set_Ok;
- } else if (!strcmp(command, "xact-visualizer-file" ) ) {
- char* new_filename = (char*) val->u.string;
- ruby_xact_visualizer_file(new_filename);
- return Sim_Set_Ok;
- }
- fprintf( stderr, "error: unrecognized command: %s\n", command );
-#endif
- return Sim_Set_Illegal_Value;
-}
-
-static initvar_t *ruby_initvar_obj = NULL;
+static initvar_t *ruby_initvar_obj = NULL;
//***************************************************************************
static void init_generate_values( void )
{
- /* update generated values, based on input configuration */
+ /* update generated values, based on input configuration */
}
//***************************************************************************
void init_variables( void )
{
- // allocate the "variable initialization" package
- ruby_initvar_obj = new initvar_t( "ruby", "../../../ruby/",
- global_default_param,
- &init_simulator,
- &init_generate_values,
- &ruby_session_get,
- &ruby_session_set );
+ // allocate the "variable initialization" package
+ ruby_initvar_obj = new initvar_t( "ruby", "../../../ruby/",
+ global_default_param,
+ &init_simulator,
+ &init_generate_values );
}
void init_simulator()
{
- // Set things to NULL to make sure we don't de-reference them
- // without a seg. fault.
- g_system_ptr = NULL;
- g_debug_ptr = NULL;
- g_eventQueue_ptr = NULL;
+ // Set things to NULL to make sure we don't de-reference them
+ // without a seg. fault.
+ g_system_ptr = NULL;
+ g_debug_ptr = NULL;
+ g_eventQueue_ptr = NULL;
- cout << "Ruby Timing Mode" << endl;
+ cout << "Ruby Timing Mode" << endl;
- if (g_SIMICS) {
- // LUKE - if we don't set the default SMT threads in condor scripts,
- // set it now
- if(g_NUM_SMT_THREADS == 0){
- g_NUM_SMT_THREADS = 1;
- }
- if(g_NUM_PROCESSORS == 0){
- //only set to default if value not set in condor scripts
- // Account for SMT systems also
- g_NUM_PROCESSORS = SIMICS_number_processors()/g_NUM_SMT_THREADS;
- }
- }
+ RubyConfig::init();
- RubyConfig::init();
+ g_debug_ptr = new Debug( DEBUG_FILTER_STRING,
+ DEBUG_VERBOSITY_STRING,
+ DEBUG_START_TIME,
+ DEBUG_OUTPUT_FILENAME );
- g_debug_ptr = new Debug( DEBUG_FILTER_STRING,
- DEBUG_VERBOSITY_STRING,
- DEBUG_START_TIME,
- DEBUG_OUTPUT_FILENAME );
+ cout << "Creating event queue..." << endl;
+ g_eventQueue_ptr = new RubyEventQueue;
+ cout << "Creating event queue done" << endl;
- cout << "Creating event queue..." << endl;
- g_eventQueue_ptr = new RubyEventQueue;
- cout << "Creating event queue done" << endl;
+ cout << "Creating system..." << endl;
+ cout << " Processors: " << RubyConfig::numberOfProcessors() << endl;
- cout << "Creating system..." << endl;
- cout << " Processors: " << RubyConfig::numberOfProcessors() << endl;
+ g_system_ptr = new RubySystem;
+ cout << "Creating system done" << endl;
- g_system_ptr = new RubySystem;
- cout << "Creating system done" << endl;
-
- // if opal is loaded, its static interface object (inst) will be non-null,
- // and the opal object needs to be notified that ruby is now loaded.
- // "1" indicates a load and should be replaced with an enumerated type.
- if (OpalInterface::inst != NULL) {
- OpalInterface::inst->notify( 1 );
- }
-
-#ifdef CONTIGUOUS_ADDRESSES
- if(g_SIMICS) {
- cout << "Establishing Contiguous Address Space Mappings..." << flush;
- g_p_ca_translator = new ContiguousAddressTranslator();
- assert(g_p_ca_translator!=NULL);
- if(g_p_ca_translator->AddressesAreContiguous()) {
- cout << "Physical Memory Addresses are already contiguous." << endl;
- delete g_p_ca_translator;
- g_p_ca_translator = NULL;
- } else {
- cout << "Done." << endl;
- }
- } else {
- g_p_ca_translator = NULL;
- }
-#endif // #ifdef CONTIGUOUS_ADDRESSES
-
- cout << "Ruby initialization complete" << endl;
-}
-
-void init_opal_interface( mf_ruby_api_t *api )
-{
- OpalInterface::installInterface( api );
-}
-
-int init_use_snoop()
-{
- if (g_SIMICS) {
- // The "snoop interface" defined by simics allows ruby to see store
- // data values (from simics). If DATA_BLOCK is defined, we are tracking
- // data, so we need to install the snoop interface.
- return ((DATA_BLOCK == true) || (XACT_MEMORY));
- } else {
- return (0);
- }
+ cout << "Ruby initialization complete" << endl;
}
void destroy_simulator()
{
- cout << "Deleting system..." << endl;
- delete g_system_ptr;
- cout << "Deleting system done" << endl;
+ cout << "Deleting system..." << endl;
+ delete g_system_ptr;
+ cout << "Deleting system done" << endl;
- cout << "Deleting event queue..." << endl;
- delete g_eventQueue_ptr;
- cout << "Deleting event queue done" << endl;
+ cout << "Deleting event queue..." << endl;
+ delete g_eventQueue_ptr;
+ cout << "Deleting event queue done" << endl;
- delete g_debug_ptr;
+ delete g_debug_ptr;
}
/*-------------------------------------------------------------------------+
@@ -286,19 +124,23 @@ void destroy_simulator()
extern "C"
int OnLoadRuby() {
- init_variables();
- return 0;
+ init_variables();
+ return 0;
}
extern "C"
int OnInitRuby() {
- init_simulator();
- return 0;
+ init_simulator();
+ return 0;
}
extern "C"
int OnUnloadRuby() {
- destroy_simulator();
- return 0;
+ destroy_simulator();
+ return 0;
}
+/* I have to put it somewhere for now */
+void tester_main(int argc, char **argv) {
+ std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented." << std::endl;
+}
diff --git a/src/mem/ruby/init.hh b/src/mem/ruby/init.hh
index 36d975b3e..8fec5a7c8 100644
--- a/src/mem/ruby/init.hh
+++ b/src/mem/ruby/init.hh
@@ -39,18 +39,8 @@
#ifndef INIT_H
#define INIT_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
extern void init_variables();
extern void init_simulator();
-extern void init_opal_interface( mf_ruby_api_t *api );
-extern int init_use_snoop();
extern void destroy_simulator();
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
#endif //INIT_H
diff --git a/src/mem/ruby/interfaces/OpalInterface.cc b/src/mem/ruby/interfaces/OpalInterface.cc
deleted file mode 100644
index 362c7bcb6..000000000
--- a/src/mem/ruby/interfaces/OpalInterface.cc
+++ /dev/null
@@ -1,446 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-/*------------------------------------------------------------------------*/
-/* Includes */
-/*------------------------------------------------------------------------*/
-
-#include "OpalInterface.hh"
-#include "interface.hh"
-#include "System.hh"
-#include "SubBlock.hh"
-#include "mf_api.hh"
-#include "Chip.hh"
-#include "RubyConfig.hh"
-//#include "XactIsolationChecker.hh" //gem5:Arka for decomissioning ruby/log_tm
-// #include "TransactionInterfaceManager.hh"
-//#include "TransactionVersionManager.hh" //gem5:Arka for decomissioning ruby/log_tm
-#include "Sequencer.hh"
-
-/*------------------------------------------------------------------------*/
-/* Forward declarations */
-/*------------------------------------------------------------------------*/
-
-static CacheRequestType get_request_type( OpalMemop_t opaltype );
-static OpalMemop_t get_opal_request_type( CacheRequestType type );
-
-/// The static opalinterface instance
-OpalInterface *OpalInterface::inst = NULL;
-
-/*------------------------------------------------------------------------*/
-/* Constructor(s) / destructor */
-/*------------------------------------------------------------------------*/
-
-//**************************************************************************
-OpalInterface::OpalInterface(System* sys_ptr) {
- clearStats();
- ASSERT( inst == NULL );
- inst = this;
- m_opal_intf = NULL;
-}
-
-/*------------------------------------------------------------------------*/
-/* Public methods */
-/*------------------------------------------------------------------------*/
-
-//**************************************************************************
-void OpalInterface::printConfig(ostream& out) const {
- out << "Opal_ruby_multiplier: " << OPAL_RUBY_MULTIPLIER << endl;
- out << endl;
-}
-
-void OpalInterface::printStats(ostream& out) const {
- out << endl;
- out << "Opal Interface Stats" << endl;
- out << "----------------------" << endl;
- out << endl;
-}
-
-//**************************************************************************
-void OpalInterface::clearStats() {
-}
-
-//**************************************************************************
-integer_t OpalInterface::getInstructionCount(int procID) const {
- return ((*m_opal_intf->getInstructionCount)(procID));
-}
-
-//*************************************************************************
-uint64 OpalInterface::getOpalTime(int procID) const {
- return ((*m_opal_intf->getOpalTime)(procID));
-}
-
-/************ For WATTCH power stats ************************************/
-//*************************************************************************
-void OpalInterface::incrementL2Access(int procID) const{
- ((*m_opal_intf->incrementL2Access)(procID));
-}
-
-//*************************************************************************
-void OpalInterface::incrementPrefetcherAccess(int procID, int num_prefetches, int isinstr) const{
- ((*m_opal_intf->incrementPrefetcherAccess)(procID, num_prefetches, isinstr));
-}
-/******************** END WATTCH power stats ****************************/
-
-// Notifies Opal of an L2 miss
-//*************************************************************************
-void OpalInterface::notifyL2Miss(int procID, physical_address_t physicalAddr, OpalMemop_t type, int tagexists) const{
- ((*m_opal_intf->notifyL2Miss)(procID, physicalAddr, type, tagexists));
-}
-
-/******************************************************************
- * void hitCallback(int cpuNumber)
- * Called by Sequencer. Calls opal.
- ******************************************************************/
-
-//**************************************************************************
-void OpalInterface::hitCallback(NodeID proc, SubBlock& data, CacheRequestType type, int thread) {
- // notify opal that the transaction has completed
- (*m_opal_intf->hitCallback)( proc, data.getAddress().getAddress(), get_opal_request_type(type), thread );
-}
-
-//**************************************************************************
-// Useful functions if Ruby needs to read/write physical memory when running with Opal
-integer_t OpalInterface::readPhysicalMemory(int procID,
- physical_address_t address,
- int len ){
- return SIMICS_read_physical_memory(procID, address, len);
-}
-
-//**************************************************************************
-void OpalInterface::writePhysicalMemory( int procID,
- physical_address_t address,
- integer_t value,
- int len ){
- SIMICS_write_physical_memory(procID, address, value, len);
-}
-
-//***************************************************************
-// notifies Opal to print debug info
-void OpalInterface::printDebug(){
- (*m_opal_intf->printDebug)();
-}
-
-//***************************************************************
-
-/******************************************************************
- * Called by opal's memory operations (memop.C)
- * May call Sequencer.
- ******************************************************************/
-
-//****************************************************************************
-int OpalInterface::getNumberOutstanding( int cpuNumber ){
- Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
-
- return targetSequencer_ptr->getNumberOutstanding();
-}
-
-//****************************************************************************
-int OpalInterface::getNumberOutstandingDemand( int cpuNumber ){
- Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
-
- return targetSequencer_ptr->getNumberOutstandingDemand();
-}
-
-//****************************************************************************
-int OpalInterface::getNumberOutstandingPrefetch( int cpuNumber ){
- Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
-
- return targetSequencer_ptr->getNumberOutstandingPrefetch();
-}
-
-//**************************************************************************
-int OpalInterface::isReady( int cpuNumber, la_t logicalAddr, pa_t physicalAddr, OpalMemop_t typeOfRequest, int thread ) {
- // Send request to sequencer
- Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
-
- // FIXME - some of these fields have bogus values sinced isReady()
- // doesn't need them. However, it would be cleaner if all of these
- // fields were exact.
-
- return (targetSequencer_ptr->isReady(CacheMsg(Address( physicalAddr ),
- Address( physicalAddr ),
- get_request_type(typeOfRequest),
- Address(0),
- AccessModeType_UserMode, // User/supervisor mode
- 0, // Size in bytes of request
- PrefetchBit_No, // Not a prefetch
- 0, // Version number
- Address(logicalAddr), // Virtual Address
- thread, // SMT thread
- 0, // TM specific - timestamp of memory request
- false // TM specific - whether request is part of escape action
- )
- ));
-}
-
-// FIXME: duplicated code should be avoided
-//**************************************************************************
-void OpalInterface::makeRequest(int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
- int requestSize, OpalMemop_t typeOfRequest,
- la_t virtualPC, int isPriv, int thread) {
- // Issue the request to the sequencer.
- // set access type (user/supervisor)
- AccessModeType access_mode;
- if (isPriv) {
- access_mode = AccessModeType_SupervisorMode;
- } else {
- access_mode = AccessModeType_UserMode;
- }
-
- // Send request to sequencer
- Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
-
- targetSequencer_ptr->makeRequest(CacheMsg(Address( physicalAddr ),
- Address( physicalAddr ),
- get_request_type(typeOfRequest),
- Address(virtualPC),
- access_mode, // User/supervisor mode
- requestSize, // Size in bytes of request
- PrefetchBit_No, // Not a prefetch
- 0, // Version number
- Address(logicalAddr), // Virtual Address
- thread, // SMT thread
- 0, // TM specific - timestamp of memory request
- false // TM specific - whether request is part of escape action
- )
- );
-}
-
-
-//**************************************************************************
-void OpalInterface::makePrefetch(int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
- int requestSize, OpalMemop_t typeOfRequest,
- la_t virtualPC, int isPriv, int thread) {
- DEBUG_MSG(SEQUENCER_COMP,MedPrio,"Making prefetch request");
-
- // Issue the request to the sequencer.
- // set access type (user/supervisor)
- AccessModeType access_mode;
- if (isPriv) {
- access_mode = AccessModeType_SupervisorMode;
- } else {
- access_mode = AccessModeType_UserMode;
- }
-
- // make the prefetch
- Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
- targetSequencer_ptr->makeRequest(CacheMsg(Address( physicalAddr ),
- Address( physicalAddr ),
- get_request_type(typeOfRequest),
- Address(virtualPC),
- access_mode,
- requestSize,
- PrefetchBit_Yes, // True means this is a prefetch
- 0, // Version number
- Address(logicalAddr), // Virtual Address
- thread, // SMT thread
- 0, // TM specific - timestamp of memory request
- false // TM specific - whether request is part of escape action
- )
- );
- return;
-}
-
-//**************************************************************************
-int OpalInterface::staleDataRequest( int cpuNumber, pa_t physicalAddr,
- int requestSize, int8 *buffer ) {
- // Find sequencer
- Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
- assert(targetSequencer_ptr != NULL);
-
- // query the cache for stale data values (if any)
- bool hit = false;
- //hit = targetSequencer_ptr->staleDataAccess( Address(physicalAddr),
- // requestSize, buffer );
-
- return hit;
-}
-
-//**************************************************************************
-void OpalInterface::notify( int status ) {
- if (OpalInterface::inst == NULL) {
- if (status == 1) {
- // notify system that opal is now loaded
- g_system_ptr->opalLoadNotify();
- } else {
- return;
- }
- }
-
- // opal interface must be allocated now
- ASSERT( OpalInterface::inst != NULL );
- if ( status == 0 ) {
-
- } else if ( status == 1 ) {
- // install notification: query opal for its interface
- OpalInterface::inst->queryOpalInterface();
- if ( OpalInterface::inst->m_opal_intf != NULL ) {
- cout << "OpalInterface: installation successful." << endl;
- // test: (*(OpalInterface::inst->m_opal_intf->hitCallback))( 0, 0xFFULL );
- }
- } else if ( status == 2 ) {
- // unload notification
- // NOTE: this is not tested, as we can't unload ruby or opal right now.
- OpalInterface::inst->removeOpalInterface();
- }
-}
-
-// advance ruby time
-//**************************************************************************
-int OpalInterface::s_advance_counter = 0;
-
-void OpalInterface::advanceTime( void ) {
- s_advance_counter++;
- if (s_advance_counter == OPAL_RUBY_MULTIPLIER) {
- Time time = g_eventQueue_ptr->getTime() + 1;
- DEBUG_EXPR(NODE_COMP, HighPrio, time);
- g_eventQueue_ptr->triggerEvents(time);
- s_advance_counter = 0;
- }
-}
-
-// return ruby's time
-//**************************************************************************
-unsigned long long OpalInterface::getTime( void ) {
- return (g_eventQueue_ptr->getTime());
-}
-
-// print's Ruby outstanding request table
-void OpalInterface::printProgress(int cpuNumber){
- Sequencer* targetSequencer_ptr = g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->getSequencer(cpuNumber%RubyConfig::numberOfProcsPerChip());
- assert(targetSequencer_ptr != NULL);
-
- targetSequencer_ptr->printProgress(cout);
-}
-
-// Non-method helper function
-//**************************************************************************
-static CacheRequestType get_request_type( OpalMemop_t opaltype ) {
- CacheRequestType type;
-
- if (opaltype == OPAL_LOAD) {
- type = CacheRequestType_LD;
- } else if (opaltype == OPAL_STORE){
- type = CacheRequestType_ST;
- } else if (opaltype == OPAL_IFETCH){
- type = CacheRequestType_IFETCH;
- } else if (opaltype == OPAL_ATOMIC){
- type = CacheRequestType_ATOMIC;
- } else {
- ERROR_MSG("Error: Strange memory transaction type: not a LD or a ST");
- }
- return type;
-}
-
-//**************************************************************************
-static OpalMemop_t get_opal_request_type( CacheRequestType type ) {
- OpalMemop_t opal_type;
-
- if(type == CacheRequestType_LD){
- opal_type = OPAL_LOAD;
- }
- else if( type == CacheRequestType_ST){
- opal_type = OPAL_STORE;
- }
- else if( type == CacheRequestType_IFETCH){
- opal_type = OPAL_IFETCH;
- }
- else if( type == CacheRequestType_ATOMIC){
- opal_type = OPAL_ATOMIC;
- }
- else{
- ERROR_MSG("Error: Strange memory transaction type: not a LD or a ST");
- }
-
- //cout << "get_opal_request_type() CacheRequestType[ " << type << " ] opal_type[ " << opal_type << " ] " << endl;
- return opal_type;
-}
-
-//**************************************************************************
-void OpalInterface::removeOpalInterface( void ) {
- cout << "ruby: opal uninstalled. reinstalling timing model." << endl;
- SIMICS_install_timing_model();
-}
-
-//**************************************************************************
-bool OpalInterface::isOpalLoaded( void ) {
- if (!g_SIMICS) {
- return false;
- } else {
- mf_opal_api_t *opal_interface = SIMICS_get_opal_interface();
- if ( opal_interface == NULL ) {
- return false;
- } else {
- return true;
- }
- }
-}
-
-//**************************************************************************
-void OpalInterface::queryOpalInterface( void ) {
- m_opal_intf = SIMICS_get_opal_interface();
- if ( m_opal_intf == NULL ) {
- WARN_MSG("error: OpalInterface: opal does not implement mf-opal-api interface.\n");
- } else {
- // opal is loaded -- remove the timing_model interface
- cout << "Ruby: ruby-opal link established. removing timing_model." << endl;
- SIMICS_remove_timing_model();
-
- if (m_opal_intf->notifyCallback != NULL) {
- cout << "opalinterface: doing notify callback\n";
- (*m_opal_intf->notifyCallback)( 1 );
- } else {
- // 2/27/2005, removed spurious error message (MRM)
- // cout << "error: opalinterface: mf-opal-api has NULL notify callback.\n";
- }
- }
-}
-
-// install the opal interface to simics
-//**************************************************************************
-void OpalInterface::installInterface( mf_ruby_api_t *api ) {
- // install ruby interface
- api->isReady = &OpalInterface::isReady;
- api->makeRequest = &OpalInterface::makeRequest;
- api->makePrefetch = &OpalInterface::makePrefetch;
- api->advanceTime = &OpalInterface::advanceTime;
- api->getTime = &OpalInterface::getTime;
- api->staleDataRequest = &OpalInterface::staleDataRequest;
- api->notifyCallback = &OpalInterface::notify;
- api->getNumberOutstanding = &OpalInterface::getNumberOutstanding;
- api->getNumberOutstandingDemand = &OpalInterface::getNumberOutstandingDemand;
- api->getNumberOutstandingPrefetch = &OpalInterface::getNumberOutstandingPrefetch;
- api->printProgress = &OpalInterface::printProgress;
-}
diff --git a/src/mem/ruby/interfaces/OpalInterface.hh b/src/mem/ruby/interfaces/OpalInterface.hh
deleted file mode 100644
index 4bc63d15a..000000000
--- a/src/mem/ruby/interfaces/OpalInterface.hh
+++ /dev/null
@@ -1,214 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- * Description:
- *
- */
-
-#ifndef OpalInterface_H
-#define OpalInterface_H
-
-/*------------------------------------------------------------------------*/
-/* Includes */
-/*------------------------------------------------------------------------*/
-
-#include "Global.hh"
-#include "Driver.hh"
-#include "mf_api.hh"
-#include "CacheRequestType.hh"
-
-/*------------------------------------------------------------------------*/
-/* Class declaration(s) */
-/*------------------------------------------------------------------------*/
-
-class System;
-class TransactionInterfaceManager;
-class Sequencer;
-
-/**
- * the processor model (opal) calls these OpalInterface APIs to access
- * the memory hierarchy (ruby).
- * @see pseq_t
- * @author cmauer
- * @version $Id$
- */
-class OpalInterface : public Driver {
-public:
- // Constructors
- OpalInterface(System* sys_ptr);
-
- // Destructor
- // ~OpalInterface();
-
- integer_t getInstructionCount(int procID) const;
- void hitCallback( NodeID proc, SubBlock& data, CacheRequestType type, int thread );
- void printStats(ostream& out) const;
- void clearStats();
- void printConfig(ostream& out) const;
- void print(ostream& out) const;
-
- integer_t readPhysicalMemory(int procID, physical_address_t address,
- int len );
-
- void writePhysicalMemory( int procID, physical_address_t address,
- integer_t value, int len );
- uint64 getOpalTime(int procID) const;
-
- // for WATTCH power
- void incrementL2Access(int procID) const;
- void incrementPrefetcherAccess(int procID, int num_prefetches, int isinstr) const;
-
- // notifies Opal of an L2 miss
- void notifyL2Miss(int procID, physical_address_t physicalAddr, OpalMemop_t type, int tagexists) const;
-
- void printDebug();
-
- /// The static opalinterface instance
- static OpalInterface *inst;
-
- /// static methods
- static int getNumberOutstanding(int cpuNumber);
- static int getNumberOutstandingDemand(int cpuNumber);
- static int getNumberOutstandingPrefetch( int cpuNumber );
-
- /* returns true if the sequencer is able to handle more requests.
- This implements "back-pressure" by which the processor knows
- not to issue more requests if the network or cache's limits are reached.
- */
- static int isReady( int cpuNumber, la_t logicalAddr, pa_t physicalAddr, OpalMemop_t typeOfRequest, int thread );
-
- /*
- makeRequest performs the coherence transactions necessary to get the
- physical address in the cache with the correct permissions. More than
- one request can be outstanding to ruby, but only one per block address.
- The size of the cache line is defined to Intf_CacheLineSize.
- When a request is finished (e.g. the cache contains physical address),
- ruby calls completedRequest(). No request can be bigger than
- Opal_CacheLineSize. It is illegal to request non-aligned memory
- locations. A request of size 2 must be at an even byte, a size 4 must
- be at a byte address half-word aligned, etc. Requests also can't cross a
- cache-line boundaries.
- */
- static void makeRequest(int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
- int requestSize, OpalMemop_t typeOfRequest,
- la_t virtualPC, int isPriv, int thread);
-
- /* prefetch a given block...
- */
- static void makePrefetch(int cpuNumber, la_t logicalAddr, pa_t physicalAddr,
- int requestSize, OpalMemop_t typeOfRequest,
- la_t virtualPC, int isPriv, int thread);
-
- /*
- * request data from the cache, even if it's state is "Invalid".
- */
- static int staleDataRequest( int cpuNumber, pa_t physicalAddr,
- int requestSize, int8 *buffer );
-
- /* notify ruby of opal's status
- */
- static void notify( int status );
-
- /*
- * advance ruby one cycle
- */
- static void advanceTime( void );
-
- /*
- * return ruby's cycle count.
- */
- static unsigned long long getTime( void );
-
- /* prints Ruby's outstanding request table */
- static void printProgress(int cpuNumber);
-
- /*
- * initialize / install the inter-module interface
- */
- static void installInterface( mf_ruby_api_t *api );
-
- /*
- * Test if opal is loaded or not
- */
- static bool isOpalLoaded( void );
-
- /*
- * query opal for its api
- */
- void queryOpalInterface( void );
-
- /*
- * remove the opal interface (opal is unloaded).
- */
- void removeOpalInterface( void );
-
- /*
- * set the opal interface (used if stand-alone testing)
- */
- void setOpalInterface( mf_opal_api_t *opal_intf ) {
- m_opal_intf = opal_intf;
- }
-
- /**
- * Signal an abort
- */
- //void abortCallback(NodeID proc);
-
-private:
- // Private Methods
-
- // Private copy constructor and assignment operator
- OpalInterface(const OpalInterface& obj);
- OpalInterface& operator=(const OpalInterface& obj);
-
- // Data Members (m_ prefix)
- mf_opal_api_t *m_opal_intf;
- Time m_simicsStartTime;
-
- static int s_advance_counter;
-};
-
-// Output operator declaration
-ostream& operator<<(ostream& out, const OpalInterface& obj);
-
-// ******************* Definitions *******************
-
-// Output operator definition
-extern inline
-ostream& operator<<(ostream& out, const OpalInterface& obj)
-{
-// obj.print(out);
- out << flush;
- return out;
-}
-
-#endif // OpalInterface_H
diff --git a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh
index 53dd67563..b35dfba67 100644
--- a/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh
+++ b/src/mem/ruby/network/garnet-flexible-pipeline/NetworkConfig.hh
@@ -58,9 +58,6 @@ class NetworkConfig {
string filename = "network/garnet-flexible-pipeline/";
filename += NETCONFIG_DEFAULTS;
- if (g_SIMICS) {
- filename = "../../../ruby/"+filename;
- }
ifstream NetconfigFile( filename.c_str(), ios::in);
if(!NetconfigFile.is_open())
{
diff --git a/src/mem/ruby/network/simple/Topology.cc b/src/mem/ruby/network/simple/Topology.cc
index db886052f..a7454a5af 100644
--- a/src/mem/ruby/network/simple/Topology.cc
+++ b/src/mem/ruby/network/simple/Topology.cc
@@ -433,9 +433,6 @@ void Topology::makeFileSpecified()
+"_Memories-"+int_to_string(RubyConfig::numberOfMemories())
+".txt";
- if (g_SIMICS) {
- filename = "../../../ruby/"+filename;
- }
ifstream networkFile( filename.c_str() , ios::in);
if (!networkFile.is_open()) {
cerr << "Error: Could not open network file: " << filename << endl;
diff --git a/src/mem/ruby/profiler/Profiler.cc b/src/mem/ruby/profiler/Profiler.cc
index 9bb4b6b4c..456123a68 100644
--- a/src/mem/ruby/profiler/Profiler.cc
+++ b/src/mem/ruby/profiler/Profiler.cc
@@ -64,20 +64,11 @@
#include "Map.hh"
#include "Debug.hh"
#include "MachineType.hh"
-// #include "TransactionInterfaceManager.hh"
-#include "interface.hh"
-//#include "XactVisualizer.hh" //gem5:Arka for decomissioning log_tm
-//#include "XactProfiler.hh" //gem5:Arka for decomissioning log_tm
-
-// extern "C" {
-// #include "Rock.hh"
-// }
// Allows use of times() library call, which determines virtual runtime
#include <sys/times.h>
extern std::ostream * debug_cout_ptr;
-extern std::ostream * xact_cout_ptr;
static double process_memory_total();
static double process_memory_resident();
@@ -97,42 +88,11 @@ Profiler::Profiler()
m_inst_profiler_ptr = new AddressProfiler;
}
- //m_xact_profiler_ptr = new XactProfiler; //gem5:Arka for decomissioning og log_tm
-
m_conflicting_map_ptr = new Map<Address, Time>;
m_real_time_start_time = time(NULL); // Not reset in clearStats()
m_stats_period = 1000000; // Default
m_periodic_output_file_ptr = &cerr;
- m_xact_visualizer_ptr = &cout;
-
- //---- begin XACT_MEM code
- m_xactExceptionMap_ptr = new Map<int, int>;
- m_procsInXactMap_ptr = new Map<int, int>;
- m_abortIDMap_ptr = new Map<int, int>;
- m_commitIDMap_ptr = new Map<int, int>;
- m_xactRetryIDMap_ptr = new Map<int, int>;
- m_xactCyclesIDMap_ptr = new Map<int, int>;
- m_xactReadSetIDMap_ptr = new Map<int, int>;
- m_xactWriteSetIDMap_ptr = new Map<int, int>;
- m_xactLoadMissIDMap_ptr = new Map<int, int>;
- m_xactStoreMissIDMap_ptr = new Map<int, int>;
- m_xactInstrCountIDMap_ptr = new Map<int, integer_t>;
- m_abortPCMap_ptr = new Map<Address, int>;
- m_abortAddressMap_ptr = new Map<Address, int>;
- m_nackXIDMap_ptr = new Map<int, int>;
- m_nackXIDPairMap_ptr = new Map<int, Map<int, int> * >;
- m_nackPCMap_ptr = new Map<Address, int>;
- m_watch_address_list_ptr = new Map<Address, int>;
- m_readSetMatch_ptr = new Map<Address, int>;
- m_readSetNoMatch_ptr = new Map<Address, int>;
- m_writeSetMatch_ptr = new Map<Address, int>;
- m_writeSetNoMatch_ptr = new Map<Address, int>;
- m_xactReadFilterBitsSetOnCommit = new Map<int, Histogram>;
- m_xactReadFilterBitsSetOnAbort = new Map<int, Histogram>;
- m_xactWriteFilterBitsSetOnCommit = new Map<int, Histogram>;
- m_xactWriteFilterBitsSetOnAbort = new Map<int, Histogram>;
- //---- end XACT_MEM code
// for MemoryControl:
m_memReq = 0;
@@ -169,7 +129,6 @@ Profiler::~Profiler()
delete m_L1D_cache_profiler_ptr;
delete m_L1I_cache_profiler_ptr;
delete m_L2_cache_profiler_ptr;
- //delete m_xact_profiler_ptr; //gem5:Arka for decomissioning of log_tm
delete m_requestProfileMap_ptr;
delete m_conflicting_map_ptr;
}
@@ -192,14 +151,14 @@ void Profiler::wakeup()
integer_t total_misses = m_perProcTotalMisses.sum();
integer_t instruction_executed = perProcInstructionCount.sum();
- integer_t simics_cycles_executed = perProcCycleCount.sum();
+ integer_t cycles_executed = perProcCycleCount.sum();
integer_t transactions_started = m_perProcStartTransaction.sum();
integer_t transactions_ended = m_perProcEndTransaction.sum();
(*m_periodic_output_file_ptr) << "ruby_cycles: " << g_eventQueue_ptr->getTime()-m_ruby_start << endl;
(*m_periodic_output_file_ptr) << "total_misses: " << total_misses << " " << m_perProcTotalMisses << endl;
(*m_periodic_output_file_ptr) << "instruction_executed: " << instruction_executed << " " << perProcInstructionCount << endl;
- (*m_periodic_output_file_ptr) << "simics_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl;
+ (*m_periodic_output_file_ptr) << "cycles_executed: " << cycles_executed << " " << perProcCycleCount << endl;
(*m_periodic_output_file_ptr) << "transactions_started: " << transactions_started << " " << m_perProcStartTransaction << endl;
(*m_periodic_output_file_ptr) << "transactions_ended: " << transactions_ended << " " << m_perProcEndTransaction << endl;
(*m_periodic_output_file_ptr) << "L1TBE_usage: " << m_L1tbeProfile << endl;
@@ -350,7 +309,7 @@ void Profiler::printStats(ostream& out, bool short_stats)
integer_t user_misses = m_perProcUserMisses.sum();
integer_t supervisor_misses = m_perProcSupervisorMisses.sum();
integer_t instruction_executed = perProcInstructionCount.sum();
- integer_t simics_cycles_executed = perProcCycleCount.sum();
+ integer_t cycles_executed = perProcCycleCount.sum();
integer_t transactions_started = m_perProcStartTransaction.sum();
integer_t transactions_ended = m_perProcEndTransaction.sum();
@@ -364,7 +323,7 @@ void Profiler::printStats(ostream& out, bool short_stats)
out << "supervisor_misses: " << supervisor_misses << " " << m_perProcSupervisorMisses << endl;
out << endl;
out << "instruction_executed: " << instruction_executed << " " << perProcInstructionCount << endl;
- out << "simics_cycles_executed: " << simics_cycles_executed << " " << perProcCycleCount << endl;
+ out << "cycles_executed: " << cycles_executed << " " << perProcCycleCount << endl;
out << "cycles_per_instruction: " << (RubyConfig::numberOfProcessors()*double(ruby_cycles))/double(instruction_executed) << " " << perProcCPI << endl;
out << "misses_per_thousand_instructions: " << 1000.0 * (double(total_misses) / double(instruction_executed)) << " " << perProcMissesPerInsn << endl;
out << endl;
@@ -520,304 +479,6 @@ void Profiler::printStats(ostream& out, bool short_stats)
}
}
- if (XACT_MEMORY){
- // Transactional Memory stats
- out << "Transactional Memory Stats:" << endl;
- out << "------- xact --------" << endl;
- out << "xact_size_dist: " << m_xactSizes << endl;
- out << "xact_instr_count: " << m_xactInstrCount << endl;
- out << "xact_time_dist: " << m_xactCycles << endl;
- out << "xact_log_size_dist: " << m_xactLogs << endl;
- out << "xact_read_set_size_dist: " << m_xactReads << endl;
- out << "xact_write_set_size_dist: " << m_xactWrites << endl;
- out << "xact_overflow_read_lines_dist: " << m_xactOverflowReads << endl;
- out << "xact_overflow_write_lines_dist: " << m_xactOverflowWrites << endl;
- out << "xact_overflow_read_set_size_dist: " << m_xactOverflowTotalReads << endl;
- out << "xact_overflow_write_set_size_dist: " << m_xactOverflowTotalWrites << endl;
- out << "xact_miss_load_dist: " << m_xactLoadMisses << endl;
- out << "xact_miss_store_dist: " << m_xactStoreMisses << endl;
- out << "xact_nacked: " << m_xactNacked << endl;
- out << "xact_retries: " << m_xactRetries << endl;
- out << "xact_abort_delays: " << m_abortDelays << endl;
- out << "xact_aborts: " << m_transactionAborts << endl;
- if (ATMTP_ENABLED) {
- out << "xact_log_overflows: " << m_transactionLogOverflows << endl;
- out << "xact_cache_overflows: " << m_transactionCacheOverflows << endl;
- out << "xact_unsup_inst_aborts: " << m_transactionUnsupInsts << endl;
- out << "xact_save_rest_aborts: " << m_transactionSaveRestAborts << endl;
- }
- out << "xact_writebacks: " << m_transWBs << endl;
- out << "xact_extra_wbs: " << m_extraWBs << endl;
- out << "xact_handler_startup_delay: " << m_abortStarupDelay << endl;
- out << "xact_handler_per_block_delay: " << m_abortPerBlockDelay << endl;
- out << "xact_inferred_aborts: " << m_inferredAborts << endl;
- //out << "xact_histogram: " << m_procsInXact << endl;
-
- if (!short_stats) {
- Vector<int> nackedXIDKeys = m_nackXIDMap_ptr->keys();
- nackedXIDKeys.sortVector();
- out << endl;
- int total_nacks = 0;
- out << "------- xact Nacks by XID --------" << endl;
- for(int i=0; i<nackedXIDKeys.size(); i++) {
- int key = nackedXIDKeys[i];
- int count = m_nackXIDMap_ptr->lookup(key);
- total_nacks += count;
- out << "xact " << key << " "
- << setw(6) << dec << count
- << endl;
- }
- out << "Total Nacks: " << total_nacks << endl;
- out << "---------------" << endl;
- out << endl;
-
- // Print XID Nack Pairs
- Vector<int> nackedXIDPairKeys = m_nackXIDPairMap_ptr->keys();
- nackedXIDPairKeys.sortVector();
- out << endl;
- total_nacks = 0;
- out << "------- xact Nacks by XID Pairs --------" << endl;
- for(int i=0; i<nackedXIDPairKeys.size(); i++) {
- int key = nackedXIDPairKeys[i];
- Map<int, int> * my_map = m_nackXIDPairMap_ptr->lookup(key);
- Vector<int> my_keys = my_map->keys();
- my_keys.sortVector();
- for(int j=0; j<my_keys.size(); j++){
- int nid = my_keys[j];
- int count = my_map->lookup(nid);
- total_nacks += count;
- out << "xact " << key << " nacked by xact " << nid << " "
- << setw(6) << dec << count
- << endl;
- }
- }
- out << "Total Nacks: " << total_nacks << endl;
- out << "---------------" << endl;
- out << endl;
-
-
- Vector<Address> nackedPCKeys = m_nackPCMap_ptr->keys();
- nackedPCKeys.sortVector();
- out << endl;
- out << "------- xact Nacks by PC --------" << endl;
- for(int i=0; i<nackedPCKeys.size(); i++) {
- Address key = nackedPCKeys[i];
- int count = m_nackPCMap_ptr->lookup(key);
- out << "xact_Nack " << key << " "
- << setw(4) << dec << count
- << endl;
- }
- out << "---------------" << endl;
- out << endl;
-
-
- Vector<int> xactExceptionKeys = m_xactExceptionMap_ptr->keys();
- xactExceptionKeys.sortVector();
- out << "------- xact exceptions --------" << endl;
- for(int i=0; i<xactExceptionKeys.size(); i++) {
- int key = xactExceptionKeys[i];
- int count = m_xactExceptionMap_ptr->lookup(key);
- out << "xact_exception("
- << hex << key << "):"
- << setw(4) << dec << count
- << endl;
- }
- out << endl;
- out << "---------------" << endl;
- out << endl;
-
- Vector<int> abortIDKeys = m_abortIDMap_ptr->keys();
- abortIDKeys.sortVector();
- out << "------- xact abort by XID --------" << endl;
- for(int i=0; i<abortIDKeys.size(); i++) {
- int count = m_abortIDMap_ptr->lookup(abortIDKeys[i]);
- out << "xact_aborts("
- << dec << abortIDKeys[i] << "):"
- << setw(7) << count
- << endl;
- }
- out << endl;
- out << "---------------" << endl;
- out << endl;
-
- Vector<Address> abortedPCKeys = m_abortPCMap_ptr->keys();
- abortedPCKeys.sortVector();
- out << endl;
- out << "------- xact Aborts by PC --------" << endl;
- for(int i=0; i<abortedPCKeys.size(); i++) {
- Address key = abortedPCKeys[i];
- int count = m_abortPCMap_ptr->lookup(key);
- out << "xact_abort_pc " << key
- << setw(4) << dec << count
- << endl;
- }
- out << "---------------" << endl;
- out << endl;
-
- Vector<Address> abortedAddrKeys = m_abortAddressMap_ptr->keys();
- abortedAddrKeys.sortVector();
- out << endl;
- out << "------- xact Aborts by Address --------" << endl;
- for(int i=0; i<abortedAddrKeys.size(); i++) {
- Address key = abortedAddrKeys[i];
- int count = m_abortAddressMap_ptr->lookup(key);
- out << "xact_abort_address " << key
- << setw(4) << dec << count
- << endl;
- }
- out << "---------------" << endl;
- out << endl;
- } // !short_stats
-
- Vector<int> commitIDKeys = m_commitIDMap_ptr->keys();
- commitIDKeys.sortVector();
- out << "------- xact Commit Stats by XID --------" << endl;
- for(int i=0; i<commitIDKeys.size(); i++) {
- int count = m_commitIDMap_ptr->lookup(commitIDKeys[i]);
- double retry_count = (double)m_xactRetryIDMap_ptr->lookup(commitIDKeys[i]) / count;
- double cycles_count = (double)m_xactCyclesIDMap_ptr->lookup(commitIDKeys[i]) / count;
- double readset_count = (double)m_xactReadSetIDMap_ptr->lookup(commitIDKeys[i]) / count;
- double writeset_count = (double)m_xactWriteSetIDMap_ptr->lookup(commitIDKeys[i]) / count;
- double loadmiss_count = (double)m_xactLoadMissIDMap_ptr->lookup(commitIDKeys[i]) / count;
- double storemiss_count = (double)m_xactStoreMissIDMap_ptr->lookup(commitIDKeys[i]) / count;
- double instr_count = (double)m_xactInstrCountIDMap_ptr->lookup(commitIDKeys[i]) / count;
- out << "xact_stats id: "
- << dec << commitIDKeys[i]
- << " count: " << setw(7) << count
- << " Cycles: " << setw(7) << cycles_count
- << " Instr: " << setw(7) << instr_count
- << " ReadSet: " << setw(7) << readset_count
- << " WriteSet: " << setw(7) << writeset_count
- << " LoadMiss: " << setw(7) << loadmiss_count
- << " StoreMiss: " << setw(7) << storemiss_count
- << " Retry Count: " << setw(7) << retry_count
- << endl;
- }
- out << endl;
- out << "---------------" << endl;
- out << endl;
-
- if (!short_stats) {
- Vector<int> procsInXactKeys = m_procsInXactMap_ptr->keys();
- procsInXactKeys.sortVector();
- out << "------- xact histogram --------" << endl;
- for(int i=0; i<procsInXactKeys.size(); i++) {
- int count = m_procsInXactMap_ptr->lookup(procsInXactKeys[i]);
- int key = procsInXactKeys[i];
- out << "xact_histogram("
- << dec << key << "):"
- << setw(8) << count
- << endl;
- }
- out << endl;
- out << "---------------" << endl;
- out << endl;
-
- // Read/Write set Bloom filter stats
- //int false_reads = 0;
- long long int false_reads = m_readSetNoMatch;
- Vector<Address> fp_read_keys = m_readSetNoMatch_ptr->keys();
- out << "------- xact read set false positives -------" << endl;
- for(int i=0; i < fp_read_keys.size(); ++i){
- int count = m_readSetNoMatch_ptr->lookup(fp_read_keys[i]);
- //out << "read_false_positive( " << fp_read_keys[i] << " ): "
- // << setw(8) << dec << count << endl;
- false_reads += count;
- }
- out << "Total read set false positives : " << setw(8) << false_reads << endl;
- out << "-----------------------" << endl;
- out << endl;
-
- //int matching_reads = 0;
- long long int matching_reads = m_readSetMatch;
- long long int empty_checks = m_readSetEmptyChecks;
- Vector<Address> read_keys = m_readSetMatch_ptr->keys();
- out << "------- xact read set matches -------" << endl;
- for(int i=0; i < read_keys.size(); ++i){
- int count = m_readSetMatch_ptr->lookup(read_keys[i]);
- //out << "read_match( " << read_keys[i] << " ): "
- // << setw(8) << dec << count << endl;
- matching_reads += count;
- }
- out << "Total read set matches : " << setw(8) << matching_reads << endl;
- out << "Total read set empty checks : " << setw(8) << empty_checks << endl;
- double false_positive_pct = 0.0;
- if((false_reads + matching_reads)> 0){
- false_positive_pct = (1.0*false_reads)/(false_reads+matching_reads)*100.0;
- }
- out << "Read set false positives rate : " << false_positive_pct << "%" << endl;
- out << "-----------------------" << endl;
- out << endl;
-
- // for write set
- //int false_writes = 0;
- long long int false_writes = m_writeSetNoMatch;
- Vector<Address> fp_write_keys = m_writeSetNoMatch_ptr->keys();
- out << "------- xact write set false positives -------" << endl;
- for(int i=0; i < fp_write_keys.size(); ++i){
- int count = m_writeSetNoMatch_ptr->lookup(fp_write_keys[i]);
- //out << "write_false_positive( " << fp_write_keys[i] << " ): "
- // << setw(8) << dec << count << endl;
- false_writes += count;
- }
- out << "Total write set false positives : " << setw(8) << false_writes << endl;
- out << "-----------------------" << endl;
- out << endl;
-
- //int matching_writes = 0;
- long long int matching_writes = m_writeSetMatch;
- empty_checks = m_writeSetEmptyChecks;
- Vector<Address> write_keys = m_writeSetMatch_ptr->keys();
- out << "------- xact write set matches -------" << endl;
- for(int i=0; i < write_keys.size(); ++i){
- int count = m_writeSetMatch_ptr->lookup(write_keys[i]);
- //out << "write_match( " << write_keys[i] << " ): "
- // << setw(8) << dec << count << endl;
- matching_writes += count;
- }
- out << "Total write set matches : " << setw(8) << matching_writes << endl;
- out << "Total write set empty checks : " << setw(8) << empty_checks << endl;
- false_positive_pct = 0.0;
- if((matching_writes+false_writes) > 0){
- false_positive_pct = (1.0*false_writes)/(false_writes+matching_writes)*100.0;
- }
- out << "Write set false positives rate : " << false_positive_pct << "%" << endl;
- out << "-----------------------" << endl;
- out << endl;
-
- out << "----- Xact Signature Stats ------" << endl;
- Vector<int> xids = m_xactReadFilterBitsSetOnCommit->keys();
- for(int i=0; i < xids.size(); ++i){
- int xid = xids[i];
- out << "xid " << xid << " Read set bits set on commit: " << (m_xactReadFilterBitsSetOnCommit->lookup(xid)) << endl;
- }
- xids = m_xactWriteFilterBitsSetOnCommit->keys();
- for(int i=0; i < xids.size(); ++i){
- int xid = xids[i];
- out << "xid " << xid << " Write set bits set on commit: " << (m_xactWriteFilterBitsSetOnCommit->lookup(xid)) << endl;
- }
- xids = m_xactReadFilterBitsSetOnAbort->keys();
- for(int i=0; i < xids.size(); ++i){
- int xid = xids[i];
- out << "xid " << xid << " Read set bits set on abort: " << (m_xactReadFilterBitsSetOnAbort->lookup(xid)) << endl;
- }
- xids = m_xactWriteFilterBitsSetOnAbort->keys();
- for(int i=0; i < xids.size(); ++i){
- int xid = xids[i];
- out << "xid " << xid << " Write set bits set on abort: " << (m_xactWriteFilterBitsSetOnAbort->lookup(xid)) << endl;
- }
- out << endl;
-
- cout << "------- WATCHPOINTS --------" << endl;
- cout << "False Triggers : " << m_watchpointsFalsePositiveTrigger << endl;
- cout << "True Triggers : " << m_watchpointsTrueTrigger << endl;
- cout << "Total Triggers : " << m_watchpointsTrueTrigger + m_watchpointsFalsePositiveTrigger << endl;
- cout << "---------------" << endl;
- cout << endl;
- } // !short_stats
- //m_xact_profiler_ptr->printStats(out, short_stats); // gem5:Arka for decomissioning of log_tm
- } // XACT_MEMORY
-
if (!short_stats) {
out << "Request vs. RubySystem State Profile" << endl;
out << "--------------------------------" << endl;
@@ -993,75 +654,6 @@ void Profiler::clearStats()
m_L1D_cache_profiler_ptr->clearStats();
m_L1I_cache_profiler_ptr->clearStats();
m_L2_cache_profiler_ptr->clearStats();
- //m_xact_profiler_ptr->clearStats(); //gem5:Arka for decomissiong of log_tm
-
- //---- begin XACT_MEM code
- ASSERT(m_xactExceptionMap_ptr != NULL);
- ASSERT(m_procsInXactMap_ptr != NULL);
- ASSERT(m_abortIDMap_ptr != NULL);
- ASSERT(m_abortPCMap_ptr != NULL);
- ASSERT( m_nackXIDMap_ptr != NULL);
- ASSERT(m_nackPCMap_ptr != NULL);
-
- m_abortStarupDelay = -1;
- m_abortPerBlockDelay = -1;
- m_transWBs = 0;
- m_extraWBs = 0;
- m_transactionAborts = 0;
- m_transactionLogOverflows = 0;
- m_transactionCacheOverflows = 0;
- m_transactionUnsupInsts = 0;
- m_transactionSaveRestAborts = 0;
- m_inferredAborts = 0;
- m_xactNacked = 0;
-
- m_xactLogs.clear();
- m_xactCycles.clear();
- m_xactReads.clear();
- m_xactWrites.clear();
- m_xactSizes.clear();
- m_abortDelays.clear();
- m_xactRetries.clear();
- m_xactOverflowReads.clear();
- m_xactOverflowWrites.clear();
- m_xactLoadMisses.clear();
- m_xactStoreMisses.clear();
- m_xactOverflowTotalReads.clear();
- m_xactOverflowTotalWrites.clear();
-
- m_xactExceptionMap_ptr->clear();
- m_procsInXactMap_ptr->clear();
- m_abortIDMap_ptr->clear();
- m_commitIDMap_ptr->clear();
- m_xactRetryIDMap_ptr->clear();
- m_xactCyclesIDMap_ptr->clear();
- m_xactReadSetIDMap_ptr->clear();
- m_xactWriteSetIDMap_ptr->clear();
- m_xactLoadMissIDMap_ptr->clear();
- m_xactStoreMissIDMap_ptr->clear();
- m_xactInstrCountIDMap_ptr->clear();
- m_abortPCMap_ptr->clear();
- m_abortAddressMap_ptr->clear();
- m_nackXIDMap_ptr->clear();
- m_nackXIDPairMap_ptr->clear();
- m_nackPCMap_ptr->clear();
-
- m_xactReadFilterBitsSetOnCommit->clear();
- m_xactReadFilterBitsSetOnAbort->clear();
- m_xactWriteFilterBitsSetOnCommit->clear();
- m_xactWriteFilterBitsSetOnAbort->clear();
-
- m_readSetEmptyChecks = 0;
- m_readSetMatch = 0;
- m_readSetNoMatch = 0;
- m_writeSetEmptyChecks = 0;
- m_writeSetMatch = 0;
- m_writeSetNoMatch = 0;
-
- m_xact_visualizer_last = 0;
- m_watchpointsFalsePositiveTrigger = 0;
- m_watchpointsTrueTrigger = 0;
- //---- end XACT_MEM code
// for MemoryControl:
m_memReq = 0;
@@ -1357,6 +949,27 @@ void Profiler::profileTrainingMask(const Set& pred_set)
m_explicit_training_mask.add(pred_set.count());
}
+// For MemoryControl:
+void Profiler::profileMemReq(int bank) {
+ m_memReq++;
+ m_memBankCount[bank]++;
+}
+
+void Profiler::profileMemBankBusy() { m_memBankBusy++; }
+void Profiler::profileMemBusBusy() { m_memBusBusy++; }
+void Profiler::profileMemReadWriteBusy() { m_memReadWriteBusy++; }
+void Profiler::profileMemDataBusBusy() { m_memDataBusBusy++; }
+void Profiler::profileMemTfawBusy() { m_memTfawBusy++; }
+void Profiler::profileMemRefresh() { m_memRefresh++; }
+void Profiler::profileMemRead() { m_memRead++; }
+void Profiler::profileMemWrite() { m_memWrite++; }
+void Profiler::profileMemWaitCycles(int cycles) { m_memWaitCycles += cycles; }
+void Profiler::profileMemInputQ(int cycles) { m_memInputQ += cycles; }
+void Profiler::profileMemBankQ(int cycles) { m_memBankQ += cycles; }
+void Profiler::profileMemArbWait(int cycles) { m_memArbWait += cycles; }
+void Profiler::profileMemRandBusy() { m_memRandBusy++; }
+void Profiler::profileMemNotOld() { m_memNotOld++; }
+
int64 Profiler::getTotalInstructionsExecuted() const
{
int64 sum = 1; // Starting at 1 allows us to avoid division by zero
@@ -1410,885 +1023,3 @@ GenericRequestType Profiler::CacheRequestType_to_GenericRequestType(const CacheR
}
}
-//---- begin Transactional Memory CODE
-void Profiler::profileTransaction(int size, int logSize, int readS, int writeS, int overflow_readS, int overflow_writeS, int retries, int useful_cycles, bool nacked, int loadMisses, int storeMisses, int instrCount, int xid){
- m_xactLogs.add(logSize);
- m_xactSizes.add(size);
- m_xactReads.add(readS);
- m_xactWrites.add(writeS);
- m_xactRetries.add(retries);
- m_xactCycles.add(useful_cycles);
- m_xactLoadMisses.add(loadMisses);
- m_xactStoreMisses.add(storeMisses);
- m_xactInstrCount.add(instrCount);
-
- // was this transaction nacked?
- if(nacked){
- m_xactNacked++;
- }
-
- // for overflowed transactions
- if(overflow_readS > 0 || overflow_writeS > 0){
- m_xactOverflowReads.add(overflow_readS);
- m_xactOverflowWrites.add(overflow_writeS);
- m_xactOverflowTotalReads.add(readS);
- m_xactOverflowTotalWrites.add(writeS);
- }
-
- // Record commits by xid
- if(!m_commitIDMap_ptr->exist(xid)){
- m_commitIDMap_ptr->add(xid, 1);
- m_xactRetryIDMap_ptr->add(xid, retries);
- m_xactCyclesIDMap_ptr->add(xid, useful_cycles);
- m_xactReadSetIDMap_ptr->add(xid, readS);
- m_xactWriteSetIDMap_ptr->add(xid, writeS);
- m_xactLoadMissIDMap_ptr->add(xid, loadMisses);
- m_xactStoreMissIDMap_ptr->add(xid, storeMisses);
- m_xactInstrCountIDMap_ptr->add(xid, instrCount);
- } else {
- (m_commitIDMap_ptr->lookup(xid))++;
- (m_xactRetryIDMap_ptr->lookup(xid)) += retries;
- (m_xactCyclesIDMap_ptr->lookup(xid)) += useful_cycles;
- (m_xactReadSetIDMap_ptr->lookup(xid)) += readS;
- (m_xactWriteSetIDMap_ptr->lookup(xid)) += writeS;
- (m_xactLoadMissIDMap_ptr->lookup(xid)) += loadMisses;
- (m_xactStoreMissIDMap_ptr->lookup(xid)) += storeMisses;
- (m_xactInstrCountIDMap_ptr->lookup(xid)) += instrCount;
- }
-}
-
-void Profiler::profileBeginTransaction(NodeID id, int tid, int xid, int thread, Address pc, bool isOpen){
- //- if(PROFILE_XACT){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 2)){
- const char* openStr = isOpen ? " OPEN" : " CLOSED";
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
- // The actual processor number
- int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
- << " XACT BEGIN " << xid
- << " PC 0x" << hex << pc.getAddress()
- << dec
- << " *PC 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << openStr
- << endl;
- }
-}
-
-void Profiler::profileCommitTransaction(NodeID id, int tid, int xid, int thread, Address pc, bool isOpen){
- //- if(PROFILE_XACT){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 2)){
- const char* openStr = isOpen ? " OPEN" : " CLOSED";
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
- // The actual processor number
- int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
- << " XACT COMMIT " << xid
- << " PC 0x" << hex << pc.getAddress()
- << dec
- << " *PC 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << openStr
- << endl;
- }
-
-}
-
-// for profiling overflows
-void Profiler::profileLoadOverflow(NodeID id, int tid, int xid, int thread, Address addr, bool l1_overflow){
- //- if(PROFILE_XACT){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- string overflow_str = " XACT LOAD L1 OVERFLOW ";
- if(!l1_overflow){
- overflow_str = " XACT LOAD L2 OVERFLOW ";
- }
- // The actual processor number
- int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
- << overflow_str << xid
- << " ADDR " << addr
- << endl;
- }
-}
-
-// for profiling overflows
-void Profiler::profileStoreOverflow(NodeID id, int tid, int xid, int thread, Address addr, bool l1_overflow){
- //- if(PROFILE_XACT){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- string overflow_str = " XACT STORE L1 OVERFLOW ";
- if(!l1_overflow){
- overflow_str = " XACT STORE L2 OVERFLOW ";
- }
- // The actual processor number
- int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
- << overflow_str << xid
- << " ADDR " << addr
- << endl;
- }
-}
-
-void Profiler::profileLoadTransaction(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
- //- if(PROFILE_XACT){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 3)){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
- // The actual processor number
- int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
- << " XACT LOAD " << xid
- << " " << addr
- << " VA " << logicalAddress
- << " PC " << pc
- << " *PC 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- //<< " VAL 0x" << hex << SIMICS_read_physical_memory(proc_no, SIMICS_translate_data_address(proc_no, logicalAddress), 4) << dec
- << " VAL 0x" << hex << g_system_ptr->getDriver()->readPhysicalMemory(proc_no, addr.getAddress(), 4) << dec
- << endl;
- }
-}
-
-void Profiler::profileLoad(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
- if(PROFILE_NONXACT){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- // The actual processor number
- int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
- << " LOAD " << xid
- << " " << addr
- << " VA " << logicalAddress
- << " PC " << pc
- //<< " VAL 0x" << hex << SIMICS_read_physical_memory(proc_no, SIMICS_translate_data_address(proc_no, logicalAddress), 4) << dec
- << " VAL 0x" << hex << g_system_ptr->getDriver()->readPhysicalMemory(proc_no, addr.getAddress(), 4) << dec
- << endl;
- }
-}
-
-void Profiler::profileStoreTransaction(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
- //- if(PROFILE_XACT){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 3)){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
- // The actual processor number
- int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
- << " XACT STORE " << xid
- << " " << addr
- << " VA " << logicalAddress
- << " PC " << pc
- << " *PC 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << endl;
- }
-}
-
-void Profiler::profileStore(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc){
- if(PROFILE_NONXACT){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- // The actual processor number
- int proc_no = id*RubyConfig::numberofSMTThreads() + thread;
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
- << " STORE " << xid
- << " " << addr
- << " VA " << logicalAddress
- << " PC " << pc
- << endl;
- }
-}
-
-void Profiler::profileNack(NodeID id, int tid, int xid, int thread, int nacking_thread, NodeID nackedBy, Address addr, Address logicalAddress, Address pc, uint64 seq_ts, uint64 nack_ts, bool possibleCycle){
- int nid = 0; // g_system_ptr->getChip(nackedBy/RubyConfig::numberOfProcsPerChip())->getTransactionInterfaceManager(nackedBy%RubyConfig::numberOfProcsPerChip())->getXID(nacking_thread);
- assert(0);
- //- if(PROFILE_XACT){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
- // The actual processor number
- int proc_no = id*g_NUM_SMT_THREADS + thread;
- int nack_proc_no = nackedBy*g_NUM_SMT_THREADS + nacking_thread;
- Address nack_pc = SIMICS_get_program_counter(nack_proc_no);
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
- << " XACT NACK " << xid
- << " by " << nack_proc_no
- << " [ " << nackedBy
- << ", " << nacking_thread
- << " ]"
- << " NID: " << nid
- << " " << addr
- << " VA " << logicalAddress
- << " PC " << pc
- << " *PC 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << " NackerPC " << nack_pc
- << " my_ts " << seq_ts
- << " nack_ts " << nack_ts
- << " possible_cycle " << possibleCycle
- << endl;
- }
-
- // Record nacks by xid
- if(!m_nackXIDMap_ptr->exist(xid)){
- m_nackXIDMap_ptr->add(xid, 1);
- } else {
- (m_nackXIDMap_ptr->lookup(xid))++;
- }
-
- // Record nack ID pairs by xid
- if(!m_nackXIDPairMap_ptr->exist(xid)){
- Map<int, int> * new_map = new Map<int, int>;
- new_map->add(nid, 1);
- m_nackXIDPairMap_ptr->add(xid, new_map);
- }
- else{
- // retrieve existing map
- Map<int, int> * my_map = m_nackXIDPairMap_ptr->lookup(xid);
- if(!my_map->exist(nid)){
- my_map->add(nid, 1);
- }
- else{
- (my_map->lookup(nid))++;
- }
- }
-
- // Record nacks by pc
- if(!m_nackPCMap_ptr->exist(pc)){
- m_nackPCMap_ptr->add(pc, 1);
- } else {
- (m_nackPCMap_ptr->lookup(pc))++;
- }
-}
-
-void Profiler::profileExposedConflict(NodeID id, int xid, int thread, Address addr, Address pc){
- //if(PROFILE_XACT){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- // The actual processor number
- int proc_no = id*g_NUM_SMT_THREADS + thread;
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " "
- << " EXPOSED ACTION CONFLICT " << xid
- << " ADDR " << addr
- << " PC " << pc
- << endl;
- //}
-}
-
-void Profiler::profileInferredAbort(){
- m_inferredAborts++;
-}
-
-void Profiler::profileAbortDelayConstants(int startupDelay, int perBlock){
- m_abortStarupDelay = startupDelay;
- m_abortPerBlockDelay = perBlock;
-}
-
-void Profiler::profileAbortTransaction(NodeID id, int tid, int xid, int thread, int delay, int abortingThread, int abortingProc, Address addr, Address pc){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- int abortingXID = -1;
- // The actual processor number
- int proc_no = id*g_NUM_SMT_THREADS + thread;
- // we are passed in physical proc number. Compute logical abort proc_no
- int logical_abort_proc_no = abortingProc/g_NUM_SMT_THREADS;
- if(abortingProc >= 0){
- AbstractChip * c = g_system_ptr->getChip(logical_abort_proc_no/RubyConfig::numberOfProcsPerChip());
- abortingXID = 0; // c->getTransactionInterfaceManager(logical_abort_proc_no%RubyConfig::numberOfProcsPerChip())->getXID(abortingThread);
- assert(0);
- }
- //- if(PROFILE_XACT){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << "]" << " TID " << tid
- << " XACT ABORT " << xid
- << " caused by " << abortingProc
- << " [ " << logical_abort_proc_no
- << ", " << abortingThread
- << " ]"
- << " xid: " << abortingXID << " "
- << " address: " << addr
- << " delay: " << delay
- << " PC " << pc
- << " *PC 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << endl;
- }
- m_transactionAborts++;
-
- // Record aborts by xid
- if(!m_abortIDMap_ptr->exist(xid)){
- m_abortIDMap_ptr->add(xid, 1);
- } else {
- (m_abortIDMap_ptr->lookup(xid))++;
- }
- m_abortDelays.add(delay);
-
- // Record aborts by pc
- if(!m_abortPCMap_ptr->exist(pc)){
- m_abortPCMap_ptr->add(pc, 1);
- } else {
- (m_abortPCMap_ptr->lookup(pc))++;
- }
-
- // Record aborts by address
- if(!m_abortAddressMap_ptr->exist(addr)){
- m_abortAddressMap_ptr->add(addr, 1);
- } else {
- (m_abortAddressMap_ptr->lookup(addr))++;
- }
-}
-
-void Profiler::profileTransWB(){
- m_transWBs++;
-}
-
-void Profiler::profileExtraWB(){
- m_extraWBs++;
-}
-
-void Profiler::profileXactChange(int procs, int cycles){
- if(!m_procsInXactMap_ptr->exist(procs)){
- m_procsInXactMap_ptr->add(procs, cycles);
- } else {
- (m_procsInXactMap_ptr->lookup(procs)) += cycles;
- }
-}
-
-void Profiler::profileReadSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
- // do NOT count instances when signature is empty!
- if(!bf_filter_result && !perfect_filter_result){
- m_readSetEmptyChecks++;
- return;
- }
-
- if(bf_filter_result != perfect_filter_result){
- m_readSetNoMatch++;
- /*
- // we have a false positive
- if(!m_readSetNoMatch_ptr->exist(addr)){
- m_readSetNoMatch_ptr->add(addr, 1);
- }
- else{
- (m_readSetNoMatch_ptr->lookup(addr))++;
- }
- */
- }
- else{
- m_readSetMatch++;
- /*
- // Bloom filter agrees with perfect filter
- if(!m_readSetMatch_ptr->exist(addr)){
- m_readSetMatch_ptr->add(addr, 1);
- }
- else{
- (m_readSetMatch_ptr->lookup(addr))++;
- }
- */
- }
-}
-
-
-void Profiler::profileRemoteReadSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
- if(bf_filter_result != perfect_filter_result){
- // we have a false positive
- if(!m_remoteReadSetNoMatch_ptr->exist(addr)){
- m_remoteReadSetNoMatch_ptr->add(addr, 1);
- }
- else{
- (m_remoteReadSetNoMatch_ptr->lookup(addr))++;
- }
- }
- else{
- // Bloom filter agrees with perfect filter
- if(!m_remoteReadSetMatch_ptr->exist(addr)){
- m_remoteReadSetMatch_ptr->add(addr, 1);
- }
- else{
- (m_remoteReadSetMatch_ptr->lookup(addr))++;
- }
- }
-}
-
-void Profiler::profileWriteSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
- // do NOT count instances when signature is empty!
- if(!bf_filter_result && !perfect_filter_result){
- m_writeSetEmptyChecks++;
- return;
- }
-
- if(bf_filter_result != perfect_filter_result){
- m_writeSetNoMatch++;
- /*
- // we have a false positive
- if(!m_writeSetNoMatch_ptr->exist(addr)){
- m_writeSetNoMatch_ptr->add(addr, 1);
- }
- else{
- (m_writeSetNoMatch_ptr->lookup(addr))++;
- }
- */
- }
- else{
- m_writeSetMatch++;
- /*
- // Bloom filter agrees with perfect filter
- if(!m_writeSetMatch_ptr->exist(addr)){
- m_writeSetMatch_ptr->add(addr, 1);
- }
- else{
- (m_writeSetMatch_ptr->lookup(addr))++;
- }
- */
- }
-}
-
-
-void Profiler::profileRemoteWriteSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread){
- if(bf_filter_result != perfect_filter_result){
- // we have a false positive
- if(!m_remoteWriteSetNoMatch_ptr->exist(addr)){
- m_remoteWriteSetNoMatch_ptr->add(addr, 1);
- }
- else{
- (m_remoteWriteSetNoMatch_ptr->lookup(addr))++;
- }
- }
- else{
- // Bloom filter agrees with perfect filter
- if(!m_remoteWriteSetMatch_ptr->exist(addr)){
- m_remoteWriteSetMatch_ptr->add(addr, 1);
- }
- else{
- (m_remoteWriteSetMatch_ptr->lookup(addr))++;
- }
- }
-}
-
-void Profiler::profileTransactionLogOverflow(NodeID id, Address addr, Address pc){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
- << " XACT LOG OVERFLOW"
- << " ADDR " << addr
- << " PC " << pc
- << " *PC 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << endl;
-
- }
- m_transactionLogOverflows++;
-}
-
-void Profiler::profileTransactionCacheOverflow(NodeID id, Address addr, Address pc){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
- << " XACT CACHE OVERFLOW "
- << " ADDR " << addr
- << " PC " << pc
- << " *PC 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << endl;
-
- }
- m_transactionCacheOverflows++;
-}
-
-void Profiler::profileGetCPS(NodeID id, uint32 cps, Address pc){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
-
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
- << " XACT GET CPS"
- << " PC " << pc
- << " *PC 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << " CPS 0x" << hex << cps << dec
- << endl;
- }
-}
-//---- end Transactional Memory CODE
-
-
-void Profiler::profileExceptionStart(bool xact, NodeID id, int thread, int val, int trap_level, uinteger_t pc, uinteger_t npc){
- if(xact){
- if(!m_xactExceptionMap_ptr->exist(val)){
- m_xactExceptionMap_ptr->add(val, 1);
- } else {
- (m_xactExceptionMap_ptr->lookup(val))++;
- }
- }
-
- if (!xact && !PROFILE_NONXACT) return;
-
- if(PROFILE_EXCEPTIONS){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- // The actual processor number
- int proc_no = id*g_NUM_SMT_THREADS + thread;
-
- // get the excepting instruction
- const char * instruction;
- physical_address_t addr = SIMICS_translate_address( proc_no, Address(pc));
- if(val != 0x64 && addr != 0x0){
- // ignore instruction TLB miss
- instruction = SIMICS_disassemble_physical( proc_no, addr );
- }
-
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << " ]" << " ";
- if (xact)
- (* debug_cout_ptr) << " XACT Exception(";
- else
- (* debug_cout_ptr) << " Exception(";
-
- (* debug_cout_ptr) << hex << val << dec << ")_START--Trap Level " << trap_level
- << "--(PC=0x" << hex << pc << ", " << npc << ")"
- << dec;
-
- if(val != 0x64 && addr != 0x0){
- (* debug_cout_ptr) << " instruction = " << instruction;
- }
- else{
- (* debug_cout_ptr) << " instruction = INSTRUCTION TLB MISS";
- }
- (* debug_cout_ptr) << dec << endl;
- }
-}
-
-void Profiler::profileExceptionDone(bool xact, NodeID id, int thread, int val, int trap_level, uinteger_t pc, uinteger_t npc, uinteger_t tpc, uinteger_t tnpc){
- if (!xact && !PROFILE_NONXACT) return;
-
- if (PROFILE_EXCEPTIONS){
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- // The actual processor number
- int proc_no = id*g_NUM_SMT_THREADS + thread;
-
- // get the excepting instruction
- const char * instruction;
- instruction = SIMICS_disassemble_physical( proc_no, SIMICS_translate_address( proc_no, Address(pc) ) );
-
-
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << proc_no << " [" << id << "," << thread << " ]" << " ";
- if (xact)
- (* debug_cout_ptr) << " XACT Exception(";
- else
- (* debug_cout_ptr) << " Exception(";
-
- (* debug_cout_ptr) << hex << val << dec << ")_DONE--Trap Level " << trap_level
- << "--(PC=0x" << hex << pc << ", " << npc << dec << ")"
- << "--(TPC=0x" << hex << tpc << ", " << tnpc << dec << ")"
- << endl;
- }
-}
-
-void Profiler::rubyWatch(int id){
- int rn_g1 = SIMICS_get_register_number(id, "g1");
- uint64 tr = SIMICS_read_register(id, rn_g1);
- Address watch_address = Address(tr);
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
-
- (* debug_cout_ptr).flags(ios::right);
- (* debug_cout_ptr) << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- (* debug_cout_ptr) << setw(ID_SPACES) << id << " "
- << "RUBY WATCH "
- << watch_address
- << endl;
-
- if(!m_watch_address_list_ptr->exist(watch_address)){
- m_watch_address_list_ptr->add(watch_address, 1);
- }
-}
-
-bool Profiler::watchAddress(Address addr){
- if (m_watch_address_list_ptr->exist(addr))
- return true;
- else
- return false;
-}
-
-void Profiler::profileReadFilterBitsSet(int xid, int bits, bool isCommit) {
- if (isCommit) {
- if(!m_xactReadFilterBitsSetOnCommit->exist(xid)){
- Histogram hist;
- hist.add(bits);
- m_xactReadFilterBitsSetOnCommit->add(xid, hist);
- }
- else{
- (m_xactReadFilterBitsSetOnCommit->lookup(xid)).add(bits);
- }
- } else {
- if(!m_xactReadFilterBitsSetOnAbort->exist(xid)){
- Histogram hist;
- hist.add(bits);
- m_xactReadFilterBitsSetOnAbort->add(xid, hist);
- }
- else{
- (m_xactReadFilterBitsSetOnAbort->lookup(xid)).add(bits);
- }
- }
-}
-
-void Profiler::profileWriteFilterBitsSet(int xid, int bits, bool isCommit) {
- if (isCommit) {
- if(!m_xactWriteFilterBitsSetOnCommit->exist(xid)){
- Histogram hist;
- hist.add(bits);
- m_xactWriteFilterBitsSetOnCommit->add(xid, hist);
- }
- else{
- (m_xactWriteFilterBitsSetOnCommit->lookup(xid)).add(bits);
- }
- } else {
- if(!m_xactWriteFilterBitsSetOnAbort->exist(xid)){
- Histogram hist;
- hist.add(bits);
- m_xactWriteFilterBitsSetOnAbort->add(xid, hist);
- }
- else{
- (m_xactWriteFilterBitsSetOnAbort->lookup(xid)).add(bits);
- }
- }
-}
-/*
- //gem5:Arka for decomissioning log_tm
-
-void Profiler::setXactVisualizerFile(char * filename){
- if ( (filename == NULL) ||
- (!strcmp(filename, "none")) ) {
- m_xact_visualizer_ptr = &cout;
- return;
- }
-
- if (m_xact_visualizer.is_open() ) {
- m_xact_visualizer.close ();
- }
- m_xact_visualizer.open (filename, std::ios::out);
- if (! m_xact_visualizer.is_open() ) {
- cerr << "setXactVisualizer: can't open file " << filename << endl;
- }
- else {
- m_xact_visualizer_ptr = &m_xact_visualizer;
- }
- cout << "setXactVisualizer file " << filename << endl;
-}
-
-void Profiler::printTransactionState(bool can_skip){
- if (!XACT_VISUALIZER) return;
- int num_processors = RubyConfig::numberOfProcessors() * RubyConfig::numberofSMTThreads();
-
- if (!g_system_ptr->getXactVisualizer()->existXactActivity() && can_skip)
- return;
-
- if (can_skip && ((g_eventQueue_ptr->getTime()/10000) <= m_xact_visualizer_last))
- return;
-
- Vector<char> xactStateVector = g_system_ptr->getXactVisualizer()->getTransactionStateVector();
- for (int i = 0 ; i < num_processors; i++){
- (* m_xact_visualizer_ptr) << xactStateVector[i] << " ";
- }
- (* m_xact_visualizer_ptr) << " " << g_eventQueue_ptr->getTime() << endl;
- m_xact_visualizer_last = g_eventQueue_ptr->getTime() / 10000;
-}
-*/
-void Profiler::watchpointsFalsePositiveTrigger()
-{
- m_watchpointsFalsePositiveTrigger++;
-}
-
-void Profiler::watchpointsTrueTrigger()
-{
- m_watchpointsTrueTrigger++;
-}
-
-// For MemoryControl:
-void Profiler::profileMemReq(int bank) {
- m_memReq++;
- m_memBankCount[bank]++;
-}
-void Profiler::profileMemBankBusy() { m_memBankBusy++; }
-void Profiler::profileMemBusBusy() { m_memBusBusy++; }
-void Profiler::profileMemReadWriteBusy() { m_memReadWriteBusy++; }
-void Profiler::profileMemDataBusBusy() { m_memDataBusBusy++; }
-void Profiler::profileMemTfawBusy() { m_memTfawBusy++; }
-void Profiler::profileMemRefresh() { m_memRefresh++; }
-void Profiler::profileMemRead() { m_memRead++; }
-void Profiler::profileMemWrite() { m_memWrite++; }
-void Profiler::profileMemWaitCycles(int cycles) { m_memWaitCycles += cycles; }
-void Profiler::profileMemInputQ(int cycles) { m_memInputQ += cycles; }
-void Profiler::profileMemBankQ(int cycles) { m_memBankQ += cycles; }
-void Profiler::profileMemArbWait(int cycles) { m_memArbWait += cycles; }
-void Profiler::profileMemRandBusy() { m_memRandBusy++; }
-void Profiler::profileMemNotOld() { m_memNotOld++; }
-
-
-//----------- ATMTP -------------------//
-
-void Profiler::profileTransactionTCC(NodeID id, Address pc){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
-
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- cout.flags(ios::right);
- cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- cout << setw(ID_SPACES) << id << " "
- << " XACT Aborting! Executed TCC "
- << " PC: " << pc
- << " *PC: 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << endl;
- }
- m_transactionUnsupInsts++;
-}
-
-void Profiler::profileTransactionUnsupInst(NodeID id, Address pc){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
-
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- cout.flags(ios::right);
- cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- cout << setw(ID_SPACES) << id << " "
- << " XACT Aborting! Executed Unsupported Instruction "
- << " PC: " << pc
- << " *PC: 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << endl;
- }
- m_transactionUnsupInsts++;
-}
-
-void Profiler::profileTransactionSaveInst(NodeID id, Address pc){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
-
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- cout.flags(ios::right);
- cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- cout << setw(ID_SPACES) << id << " "
- << " XACT Aborting! Executed Save Instruction "
- << " PC: " << pc
- << " *PC: 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << endl;
- }
- m_transactionSaveRestAborts++;
-}
-
-void Profiler::profileTransactionRestoreInst(NodeID id, Address pc){
- if(PROFILE_XACT || (ATMTP_DEBUG_LEVEL >= 1)){
- physical_address_t myPhysPC = SIMICS_translate_address(id, pc);
- integer_t myInst = SIMICS_read_physical_memory(id, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(id, myPhysPC);
-
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- cout.flags(ios::right);
- cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- cout << setw(ID_SPACES) << id << " "
- << " XACT Aborting! Executed Restore Instruction "
- << " PC: " << pc
- << " *PC: 0x" << hex << myInst << dec
- << " '" << myInstStr << "'"
- << endl;
- }
- m_transactionSaveRestAborts++;
-}
-
-void Profiler::profileTimerInterrupt(NodeID id,
- uinteger_t tick, uinteger_t tick_cmpr,
- uinteger_t stick, uinteger_t stick_cmpr,
- int trap_level,
- uinteger_t pc, uinteger_t npc,
- uinteger_t pstate, int pil){
- if (PROFILE_EXCEPTIONS) {
- const int ID_SPACES = 3;
- const int TIME_SPACES = 7;
- cout.flags(ios::right);
- cout << setw(TIME_SPACES) << g_eventQueue_ptr->getTime() << " ";
- cout << setw(ID_SPACES) << id << " ";
- cout << hex << "Timer--(Tick=0x" << tick << ", TckCmp=0x" << tick_cmpr
- << ", STick=0x" << stick << ", STickCmp=0x" << stick_cmpr
- << ")--(PC=" << pc << ", " << npc
- << dec << ")--(TL=" << trap_level << ", pil=" << pil
- << hex << ", pstate=0x" << pstate
- << dec << ")" << endl;
- }
-}
diff --git a/src/mem/ruby/profiler/Profiler.hh b/src/mem/ruby/profiler/Profiler.hh
index 2961a81d1..aa018029c 100644
--- a/src/mem/ruby/profiler/Profiler.hh
+++ b/src/mem/ruby/profiler/Profiler.hh
@@ -27,19 +27,19 @@
*/
/*
- This file has been modified by Kevin Moore and Dan Nussbaum of the
- Scalable Systems Research Group at Sun Microsystems Laboratories
- (http://research.sun.com/scalable/) to support the Adaptive
- Transactional Memory Test Platform (ATMTP).
+ This file has been modified by Kevin Moore and Dan Nussbaum of the
+ Scalable Systems Research Group at Sun Microsystems Laboratories
+ (http://research.sun.com/scalable/) to support the Adaptive
+ Transactional Memory Test Platform (ATMTP).
- Please send email to atmtp-interest@sun.com with feedback, questions, or
- to request future announcements about ATMTP.
+ Please send email to atmtp-interest@sun.com with feedback, questions, or
+ to request future announcements about ATMTP.
- ----------------------------------------------------------------------
+ ----------------------------------------------------------------------
- File modification date: 2008-02-23
+ File modification date: 2008-02-23
- ----------------------------------------------------------------------
+ ----------------------------------------------------------------------
*/
/*
@@ -68,7 +68,6 @@
#include "Set.hh"
#include "CacheRequestType.hh"
#include "GenericRequestType.hh"
-//#include "XactProfiler.hh" //gem5:Arka for decomissioning og log_tm
class CacheMsg;
class CacheProfiler;
@@ -78,355 +77,229 @@ template <class KEY_TYPE, class VALUE_TYPE> class Map;
class Profiler : public Consumer {
public:
- // Constructors
- Profiler();
-
- // Destructor
- ~Profiler();
-
- // Public Methods
- void wakeup();
-
- void setPeriodicStatsFile(const string& filename);
- void setPeriodicStatsInterval(integer_t period);
-
- void setXactVisualizerFile(char* filename);
-
- void printStats(ostream& out, bool short_stats=false);
- void printShortStats(ostream& out) { printStats(out, true); }
- void printTraceStats(ostream& out) const;
- void clearStats();
- void printConfig(ostream& out) const;
- void printResourceUsage(ostream& out) const;
-
- AddressProfiler* getAddressProfiler() { return m_address_profiler_ptr; }
- AddressProfiler* getInstructionProfiler() { return m_inst_profiler_ptr; }
- //XactProfiler* getXactProfiler() { return m_xact_profiler_ptr;} //gem5:Arka for decomissioning og log_tm
-
- void addPrimaryStatSample(const CacheMsg& msg, NodeID id);
- void addSecondaryStatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id);
- void addSecondaryStatSample(CacheRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id);
- void addAddressTraceSample(const CacheMsg& msg, NodeID id);
-
- void profileRequest(const string& requestStr);
- void profileSharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner);
-
- void profileMulticastRetry(const Address& addr, int count);
-
- void profileFilterAction(int action);
-
- void profileConflictingRequests(const Address& addr);
- void profileOutstandingRequest(int outstanding) { m_outstanding_requests.add(outstanding); }
- void profileOutstandingPersistentRequest(int outstanding) { m_outstanding_persistent_requests.add(outstanding); }
- void profileAverageLatencyEstimate(int latency) { m_average_latency_estimate.add(latency); }
-
- void countBAUnicast() { m_num_BA_unicasts++; }
- void countBABroadcast() { m_num_BA_broadcasts++; }
-
- void recordPrediction(bool wasGood, bool wasPredicted);
-
- void startTransaction(int cpu);
- void endTransaction(int cpu);
- void profilePFWait(Time waitTime);
-
- void controllerBusy(MachineID machID);
- void bankBusy();
- void missLatency(Time t, CacheRequestType type, GenericMachineType respondingMach);
- void swPrefetchLatency(Time t, CacheRequestType type, GenericMachineType respondingMach);
- void stopTableUsageSample(int num) { m_stopTableProfile.add(num); }
- void L1tbeUsageSample(int num) { m_L1tbeProfile.add(num); }
- void L2tbeUsageSample(int num) { m_L2tbeProfile.add(num); }
- void sequencerRequests(int num) { m_sequencer_requests.add(num); }
- void storeBuffer(int size, int blocks) { m_store_buffer_size.add(size); m_store_buffer_blocks.add(blocks);}
-
- void profileGetXMaskPrediction(const Set& pred_set);
- void profileGetSMaskPrediction(const Set& pred_set);
- void profileTrainingMask(const Set& pred_set);
- void profileTransition(const string& component, NodeID id, NodeID version, Address addr,
- const string& state, const string& event,
- const string& next_state, const string& note);
- void profileMsgDelay(int virtualNetwork, int delayCycles);
-
- void print(ostream& out) const;
-
- int64 getTotalInstructionsExecuted() const;
- int64 getTotalTransactionsExecuted() const;
-
- //---- begin Transactional Memory CODE
- #if 0 //gem5:Arka for decomissioning og log_tm
- void profileTransCycles(int proc, int cycles) { getXactProfiler()->profileTransCycles(proc, cycles);}
- void profileNonTransCycles(int proc, int cycles) { getXactProfiler()->profileNonTransCycles(proc, cycles);}
- void profileStallTransCycles(int proc, int cycles) { getXactProfiler()->profileStallTransCycles(proc, cycles); }
- void profileStallNonTransCycles(int proc, int cycles) { getXactProfiler()->profileStallNonTransCycles(proc, cycles); }
- void profileAbortingTransCycles(int proc, int cycles) { getXactProfiler()->profileAbortingTransCycles(proc, cycles); }
- void profileCommitingTransCycles(int proc, int cycles) { getXactProfiler()->profileCommitingTransCycles(proc, cycles); }
- void profileBarrierCycles(int proc, int cycles) { getXactProfiler()->profileBarrierCycles(proc, cycles);}
- void profileBackoffTransCycles(int proc, int cycles) { getXactProfiler()->profileBackoffTransCycles(proc, cycles); }
- void profileGoodTransCycles(int proc, int cycles) {getXactProfiler()->profileGoodTransCycles(proc, cycles); }
-
- #endif //gem5:Arka TODO clean up the rest of this functions as well
- void profileTransaction(int size, int logSize, int readS, int writeS, int overflow_readS, int overflow_writeS, int retries, int cycles, bool nacked, int loadMisses, int storeMisses, int instrCount, int xid);
- void profileBeginTransaction(NodeID id, int tid, int xid, int thread, Address pc, bool isOpen);
- void profileCommitTransaction(NodeID id, int tid, int xid, int thread, Address pc, bool isOpen);
- void profileLoadTransaction(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc);
- void profileLoad(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc);
- void profileStoreTransaction(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc);
- void profileStore(NodeID id, int tid, int xid, int thread, Address addr, Address logicalAddress, Address pc);
- void profileLoadOverflow(NodeID id, int tid, int xid, int thread, Address addr, bool l1_overflow);
- void profileStoreOverflow(NodeID id, int tid, int xid, int thread, Address addr, bool l1_overflow);
- void profileNack(NodeID id, int tid, int xid, int thread, int nacking_thread, NodeID nackedBy, Address addr, Address logicalAddress, Address pc, uint64 seq_ts, uint64 nack_ts, bool possibleCycle);
- void profileExposedConflict(NodeID id, int xid, int thread, Address addr, Address pc);
- void profileTransWB();
- void profileExtraWB();
- void profileInferredAbort();
- void profileAbortTransaction(NodeID id, int tid, int xid, int thread, int delay, int abortingThread, int abortingProc, Address addr, Address pc);
- void profileExceptionStart(bool xact, NodeID proc_no, int thread, int val, int trap_level, uinteger_t pc, uinteger_t npc);
- void profileExceptionDone(bool xact, NodeID proc_no, int thread, int val, int trap_level, uinteger_t pc, uinteger_t npc, uinteger_t tpc, uinteger_t tnpc);
- void profileTimerInterrupt(NodeID id,
- uinteger_t tick, uinteger_t tick_cmpr,
- uinteger_t stick, uinteger_t stick_cmpr,
- int trap_level,
- uinteger_t pc, uinteger_t npc,
- uinteger_t pstate, int pil);
-
- void profileAbortDelayConstants(int handlerStartupDelay, int handlerPerBlockDelay);
- void profileXactChange(int procs, int cycles);
- void profileReadSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread);
- void profileWriteSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread);
- void profileRemoteReadSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread);
- void profileRemoteWriteSet(Address addr, bool bf_filter_result, bool perfect_filter_result, NodeID id, int thread);
-
-
- void profileReadFilterBitsSet(int xid, int bits, bool isCommit);
- void profileWriteFilterBitsSet(int xid, int bits, bool isCommit);
-
- void printTransactionState(bool can_skip);
-
- void watchpointsFalsePositiveTrigger();
- void watchpointsTrueTrigger();
-
- void profileTransactionLogOverflow(NodeID id, Address addr, Address pc);
- void profileTransactionCacheOverflow(NodeID id, Address addr, Address pc);
- void profileGetCPS(NodeID id, uint32 cps, Address pc);
- void profileTransactionTCC(NodeID id, Address pc);
- void profileTransactionUnsupInst(NodeID id, Address pc);
- void profileTransactionSaveInst(NodeID id, Address pc);
- void profileTransactionRestoreInst(NodeID id, Address pc);
-
- //---- end Transactional Memory CODE
-
- void rubyWatch(int proc);
- bool watchAddress(Address addr);
-
- // return Ruby's start time
- Time getRubyStartTime(){
- return m_ruby_start;
- }
-
- // added for MemoryControl:
- void profileMemReq(int bank);
- void profileMemBankBusy();
- void profileMemBusBusy();
- void profileMemTfawBusy();
- void profileMemReadWriteBusy();
- void profileMemDataBusBusy();
- void profileMemRefresh();
- void profileMemRead();
- void profileMemWrite();
- void profileMemWaitCycles(int cycles);
- void profileMemInputQ(int cycles);
- void profileMemBankQ(int cycles);
- void profileMemArbWait(int cycles);
- void profileMemRandBusy();
- void profileMemNotOld();
+ // Constructors
+ Profiler();
+
+ // Destructor
+ ~Profiler();
+
+ // Public Methods
+ void wakeup();
+
+ void setPeriodicStatsFile(const string& filename);
+ void setPeriodicStatsInterval(integer_t period);
+
+ void printStats(ostream& out, bool short_stats=false);
+ void printShortStats(ostream& out) { printStats(out, true); }
+ void printTraceStats(ostream& out) const;
+ void clearStats();
+ void printConfig(ostream& out) const;
+ void printResourceUsage(ostream& out) const;
+
+ AddressProfiler* getAddressProfiler() { return m_address_profiler_ptr; }
+ AddressProfiler* getInstructionProfiler() { return m_inst_profiler_ptr; }
+
+ void addPrimaryStatSample(const CacheMsg& msg, NodeID id);
+ void addSecondaryStatSample(GenericRequestType requestType,
+ AccessModeType type, int msgSize,
+ PrefetchBit pfBit, NodeID id);
+ void addSecondaryStatSample(CacheRequestType requestType,
+ AccessModeType type, int msgSize,
+ PrefetchBit pfBit, NodeID id);
+ void addAddressTraceSample(const CacheMsg& msg, NodeID id);
+
+ void profileRequest(const string& requestStr);
+ void profileSharing(const Address& addr, AccessType type,
+ NodeID requestor, const Set& sharers,
+ const Set& owner);
+
+ void profileMulticastRetry(const Address& addr, int count);
+
+ void profileFilterAction(int action);
+
+ void profileConflictingRequests(const Address& addr);
+ void profileOutstandingRequest(int outstanding) {
+ m_outstanding_requests.add(outstanding);
+ }
+
+ void profileOutstandingPersistentRequest(int outstanding) {
+ m_outstanding_persistent_requests.add(outstanding);
+ }
+ void profileAverageLatencyEstimate(int latency) {
+ m_average_latency_estimate.add(latency);
+ }
+
+ void countBAUnicast() { m_num_BA_unicasts++; }
+ void countBABroadcast() { m_num_BA_broadcasts++; }
+
+ void recordPrediction(bool wasGood, bool wasPredicted);
+
+ void startTransaction(int cpu);
+ void endTransaction(int cpu);
+ void profilePFWait(Time waitTime);
+
+ void controllerBusy(MachineID machID);
+ void bankBusy();
+ void missLatency(Time t, CacheRequestType type,
+ GenericMachineType respondingMach);
+ void swPrefetchLatency(Time t, CacheRequestType type,
+ GenericMachineType respondingMach);
+ void stopTableUsageSample(int num) { m_stopTableProfile.add(num); }
+ void L1tbeUsageSample(int num) { m_L1tbeProfile.add(num); }
+ void L2tbeUsageSample(int num) { m_L2tbeProfile.add(num); }
+ void sequencerRequests(int num) { m_sequencer_requests.add(num); }
+ void storeBuffer(int size, int blocks) {
+ m_store_buffer_size.add(size);
+ m_store_buffer_blocks.add(blocks);
+ }
+
+ void profileGetXMaskPrediction(const Set& pred_set);
+ void profileGetSMaskPrediction(const Set& pred_set);
+ void profileTrainingMask(const Set& pred_set);
+ void profileTransition(const string& component, NodeID id, NodeID version,
+ Address addr, const string& state,
+ const string& event, const string& next_state,
+ const string& note);
+ void profileMsgDelay(int virtualNetwork, int delayCycles);
+
+ void print(ostream& out) const;
+
+ int64 getTotalInstructionsExecuted() const;
+ int64 getTotalTransactionsExecuted() const;
+
+ Time getRubyStartTime(){
+ return m_ruby_start;
+ }
+
+ // added for MemoryControl:
+ void profileMemReq(int bank);
+ void profileMemBankBusy();
+ void profileMemBusBusy();
+ void profileMemTfawBusy();
+ void profileMemReadWriteBusy();
+ void profileMemDataBusBusy();
+ void profileMemRefresh();
+ void profileMemRead();
+ void profileMemWrite();
+ void profileMemWaitCycles(int cycles);
+ void profileMemInputQ(int cycles);
+ void profileMemBankQ(int cycles);
+ void profileMemArbWait(int cycles);
+ void profileMemRandBusy();
+ void profileMemNotOld();
private:
- // Private Methods
- void addL2StatSample(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID id);
- void addL1DStatSample(const CacheMsg& msg, NodeID id);
- void addL1IStatSample(const CacheMsg& msg, NodeID id);
-
- GenericRequestType CacheRequestType_to_GenericRequestType(const CacheRequestType& type);
-
- // Private copy constructor and assignment operator
- Profiler(const Profiler& obj);
- Profiler& operator=(const Profiler& obj);
-
- // Data Members (m_ prefix)
- CacheProfiler* m_L1D_cache_profiler_ptr;
- CacheProfiler* m_L1I_cache_profiler_ptr;
- CacheProfiler* m_L2_cache_profiler_ptr;
- AddressProfiler* m_address_profiler_ptr;
- AddressProfiler* m_inst_profiler_ptr;
-
-// XactProfiler* m_xact_profiler_ptr; // gem5:Arka for decomissioning of log_tm
-
- Vector<int64> m_instructions_executed_at_start;
- Vector<int64> m_cycles_executed_at_start;
-
- ostream* m_periodic_output_file_ptr;
- integer_t m_stats_period;
- std::fstream m_xact_visualizer;
- std::ostream *m_xact_visualizer_ptr;
-
- Time m_ruby_start;
- time_t m_real_time_start_time;
-
- int m_num_BA_unicasts;
- int m_num_BA_broadcasts;
-
- Vector<integer_t> m_perProcTotalMisses;
- Vector<integer_t> m_perProcUserMisses;
- Vector<integer_t> m_perProcSupervisorMisses;
- Vector<integer_t> m_perProcStartTransaction;
- Vector<integer_t> m_perProcEndTransaction;
- Vector < Vector < integer_t > > m_busyControllerCount;
- integer_t m_busyBankCount;
- Histogram m_multicast_retry_histogram;
-
- Histogram m_L1tbeProfile;
- Histogram m_L2tbeProfile;
- Histogram m_stopTableProfile;
-
- Histogram m_filter_action_histogram;
- Histogram m_tbeProfile;
-
- Histogram m_sequencer_requests;
- Histogram m_store_buffer_size;
- Histogram m_store_buffer_blocks;
- Histogram m_read_sharing_histogram;
- Histogram m_write_sharing_histogram;
- Histogram m_all_sharing_histogram;
- int64 m_cache_to_cache;
- int64 m_memory_to_cache;
-
- Histogram m_prefetchWaitHistogram;
-
- Vector<Histogram> m_missLatencyHistograms;
- Vector<Histogram> m_machLatencyHistograms;
- Histogram m_L2MissLatencyHistogram;
- Histogram m_allMissLatencyHistogram;
-
- Histogram m_allSWPrefetchLatencyHistogram;
- Histogram m_SWPrefetchL2MissLatencyHistogram;
- Vector<Histogram> m_SWPrefetchLatencyHistograms;
- Vector<Histogram> m_SWPrefetchMachLatencyHistograms;
-
- Histogram m_delayedCyclesHistogram;
- Histogram m_delayedCyclesNonPFHistogram;
- Vector<Histogram> m_delayedCyclesVCHistograms;
-
- int m_predictions;
- int m_predictionOpportunities;
- int m_goodPredictions;
-
- Histogram m_gets_mask_prediction;
- Histogram m_getx_mask_prediction;
- Histogram m_explicit_training_mask;
-
- // For profiling possibly conflicting requests
- Map<Address, Time>* m_conflicting_map_ptr;
- Histogram m_conflicting_histogram;
-
- Histogram m_outstanding_requests;
- Histogram m_outstanding_persistent_requests;
-
- Histogram m_average_latency_estimate;
-
- //---- begin Transactional Memory CODE
- Map <int, int>* m_procsInXactMap_ptr;
-
- Histogram m_xactCycles;
- Histogram m_xactLogs;
- Histogram m_xactReads;
- Histogram m_xactWrites;
- Histogram m_xactOverflowReads;
- Histogram m_xactOverflowWrites;
- Histogram m_xactOverflowTotalReads;
- Histogram m_xactOverflowTotalWrites;
- Histogram m_xactSizes;
- Histogram m_xactRetries;
- Histogram m_abortDelays;
- Histogram m_xactLoadMisses;
- Histogram m_xactStoreMisses;
- Histogram m_xactInstrCount;
- int m_xactNacked;
- int m_transactionAborts;
- int m_transWBs;
- int m_extraWBs;
- int m_abortStarupDelay;
- int m_abortPerBlockDelay;
- int m_inferredAborts;
- Map <int, int>* m_nackXIDMap_ptr;
- // pairs of XIDs involved in NACKs
- Map<int, Map<int, int> * > * m_nackXIDPairMap_ptr;
- Map <Address, int>* m_nackPCMap_ptr;
- Map <int, int>* m_xactExceptionMap_ptr;
- Map <int, int>* m_abortIDMap_ptr;
- Map <int, int>* m_commitIDMap_ptr;
- Map <int, int>* m_xactRetryIDMap_ptr;
- Map <int, int>* m_xactCyclesIDMap_ptr;
- Map <int, int>* m_xactReadSetIDMap_ptr;
- Map <int, int>* m_xactWriteSetIDMap_ptr;
- Map <int, int>* m_xactLoadMissIDMap_ptr;
- Map <int, int>* m_xactStoreMissIDMap_ptr;
- Map <int, integer_t> *m_xactInstrCountIDMap_ptr;
- Map <Address, int>* m_abortPCMap_ptr;
- Map <Address, int>* m_abortAddressMap_ptr;
- Map <Address, int>* m_readSetMatch_ptr;
- Map <Address, int>* m_readSetNoMatch_ptr;
- Map <Address, int>* m_writeSetMatch_ptr;
- Map <Address, int>* m_writeSetNoMatch_ptr;
- Map <Address, int>* m_remoteReadSetMatch_ptr;
- Map <Address, int>* m_remoteReadSetNoMatch_ptr;
- Map <Address, int>* m_remoteWriteSetMatch_ptr;
- Map <Address, int>* m_remoteWriteSetNoMatch_ptr;
- long long int m_readSetEmptyChecks;
- long long int m_readSetMatch;
- long long int m_readSetNoMatch;
- long long int m_writeSetEmptyChecks;
- long long int m_writeSetMatch;
- long long int m_writeSetNoMatch;
- Map<int, Histogram> * m_xactReadFilterBitsSetOnCommit;
- Map<int, Histogram> * m_xactReadFilterBitsSetOnAbort;
- Map<int, Histogram> * m_xactWriteFilterBitsSetOnCommit;
- Map<int, Histogram> * m_xactWriteFilterBitsSetOnAbort;
-
- unsigned int m_watchpointsFalsePositiveTrigger;
- unsigned int m_watchpointsTrueTrigger;
-
- int m_transactionUnsupInsts;
- int m_transactionSaveRestAborts;
-
- int m_transactionLogOverflows;
- int m_transactionCacheOverflows;
-
- //---- end Transactional Memory CODE
-
- Map<Address, int>* m_watch_address_list_ptr;
- // counts all initiated cache request including PUTs
- int m_requests;
- Map <string, int>* m_requestProfileMap_ptr;
-
- Time m_xact_visualizer_last;
-
- // added for MemoryControl:
- long long int m_memReq;
- long long int m_memBankBusy;
- long long int m_memBusBusy;
- long long int m_memTfawBusy;
- long long int m_memReadWriteBusy;
- long long int m_memDataBusBusy;
- long long int m_memRefresh;
- long long int m_memRead;
- long long int m_memWrite;
- long long int m_memWaitCycles;
- long long int m_memInputQ;
- long long int m_memBankQ;
- long long int m_memArbWait;
- long long int m_memRandBusy;
- long long int m_memNotOld;
- Vector<long long int> m_memBankCount;
+ // Private Methods
+ void addL2StatSample(GenericRequestType requestType, AccessModeType type,
+ int msgSize, PrefetchBit pfBit, NodeID id);
+ void addL1DStatSample(const CacheMsg& msg, NodeID id);
+ void addL1IStatSample(const CacheMsg& msg, NodeID id);
+
+ GenericRequestType CacheRequestType_to_GenericRequestType(const CacheRequestType& type);
+
+ // Private copy constructor and assignment operator
+ Profiler(const Profiler& obj);
+ Profiler& operator=(const Profiler& obj);
+
+ // Data Members (m_ prefix)
+ CacheProfiler* m_L1D_cache_profiler_ptr;
+ CacheProfiler* m_L1I_cache_profiler_ptr;
+ CacheProfiler* m_L2_cache_profiler_ptr;
+ AddressProfiler* m_address_profiler_ptr;
+ AddressProfiler* m_inst_profiler_ptr;
+
+ Vector<int64> m_instructions_executed_at_start;
+ Vector<int64> m_cycles_executed_at_start;
+
+ ostream* m_periodic_output_file_ptr;
+ integer_t m_stats_period;
+
+ Time m_ruby_start;
+ time_t m_real_time_start_time;
+
+ int m_num_BA_unicasts;
+ int m_num_BA_broadcasts;
+
+ Vector<integer_t> m_perProcTotalMisses;
+ Vector<integer_t> m_perProcUserMisses;
+ Vector<integer_t> m_perProcSupervisorMisses;
+ Vector<integer_t> m_perProcStartTransaction;
+ Vector<integer_t> m_perProcEndTransaction;
+ Vector < Vector < integer_t > > m_busyControllerCount;
+ integer_t m_busyBankCount;
+ Histogram m_multicast_retry_histogram;
+
+ Histogram m_L1tbeProfile;
+ Histogram m_L2tbeProfile;
+ Histogram m_stopTableProfile;
+
+ Histogram m_filter_action_histogram;
+ Histogram m_tbeProfile;
+
+ Histogram m_sequencer_requests;
+ Histogram m_store_buffer_size;
+ Histogram m_store_buffer_blocks;
+ Histogram m_read_sharing_histogram;
+ Histogram m_write_sharing_histogram;
+ Histogram m_all_sharing_histogram;
+ int64 m_cache_to_cache;
+ int64 m_memory_to_cache;
+
+ Histogram m_prefetchWaitHistogram;
+
+ Vector<Histogram> m_missLatencyHistograms;
+ Vector<Histogram> m_machLatencyHistograms;
+ Histogram m_L2MissLatencyHistogram;
+ Histogram m_allMissLatencyHistogram;
+
+ Histogram m_allSWPrefetchLatencyHistogram;
+ Histogram m_SWPrefetchL2MissLatencyHistogram;
+ Vector<Histogram> m_SWPrefetchLatencyHistograms;
+ Vector<Histogram> m_SWPrefetchMachLatencyHistograms;
+
+ Histogram m_delayedCyclesHistogram;
+ Histogram m_delayedCyclesNonPFHistogram;
+ Vector<Histogram> m_delayedCyclesVCHistograms;
+
+ int m_predictions;
+ int m_predictionOpportunities;
+ int m_goodPredictions;
+
+ Histogram m_gets_mask_prediction;
+ Histogram m_getx_mask_prediction;
+ Histogram m_explicit_training_mask;
+
+ // For profiling possibly conflicting requests
+ Map<Address, Time>* m_conflicting_map_ptr;
+ Histogram m_conflicting_histogram;
+
+ Histogram m_outstanding_requests;
+ Histogram m_outstanding_persistent_requests;
+
+ Histogram m_average_latency_estimate;
+
+ Map<Address, int>* m_watch_address_list_ptr;
+ // counts all initiated cache request including PUTs
+ int m_requests;
+ Map <string, int>* m_requestProfileMap_ptr;
+
+ // added for MemoryControl:
+ long long int m_memReq;
+ long long int m_memBankBusy;
+ long long int m_memBusBusy;
+ long long int m_memTfawBusy;
+ long long int m_memReadWriteBusy;
+ long long int m_memDataBusBusy;
+ long long int m_memRefresh;
+ long long int m_memRead;
+ long long int m_memWrite;
+ long long int m_memWaitCycles;
+ long long int m_memInputQ;
+ long long int m_memBankQ;
+ long long int m_memArbWait;
+ long long int m_memRandBusy;
+ long long int m_memNotOld;
+ Vector<long long int> m_memBankCount;
};
@@ -439,9 +312,9 @@ ostream& operator<<(ostream& out, const Profiler& obj);
extern inline
ostream& operator<<(ostream& out, const Profiler& obj)
{
- obj.print(out);
- out << flush;
- return out;
+ obj.print(out);
+ out << flush;
+ return out;
}
#endif //PROFILER_H
diff --git a/src/mem/ruby/recorder/CacheRecorder.cc b/src/mem/ruby/recorder/CacheRecorder.cc
index 73f33ff27..ba7ca8966 100644
--- a/src/mem/ruby/recorder/CacheRecorder.cc
+++ b/src/mem/ruby/recorder/CacheRecorder.cc
@@ -36,40 +36,44 @@
#include "TraceRecord.hh"
#include "RubyEventQueue.hh"
#include "PrioHeap.hh"
-#include "gzstream.hh"
CacheRecorder::CacheRecorder()
{
- m_records_ptr = new PrioHeap<TraceRecord>;
+ std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
+ // m_records_ptr = new PrioHeap<TraceRecord>;
}
CacheRecorder::~CacheRecorder()
{
- delete m_records_ptr;
+ std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
+ // delete m_records_ptr;
}
void CacheRecorder::addRecord(NodeID id, const Address& data_addr, const Address& pc_addr, CacheRequestType type, Time time)
{
- m_records_ptr->insert(TraceRecord(id, data_addr, pc_addr, type, time));
+ std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
+ // m_records_ptr->insert(TraceRecord(id, data_addr, pc_addr, type, time));
}
int CacheRecorder::dumpRecords(string filename)
{
- ogzstream out(filename.c_str());
- if (out.fail()) {
- cout << "Error: error opening file '" << filename << "'" << endl;
- return 0;
- }
+ std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
+ // ogzstream out(filename.c_str());
+ // if (out.fail()) {
+ // cout << "Error: error opening file '" << filename << "'" << endl;
+ // return 0;
+ // }
- int counter = 0;
- while (m_records_ptr->size() != 0) {
- TraceRecord record = m_records_ptr->extractMin();
- record.output(out);
- counter++;
- }
- return counter;
+ // int counter = 0;
+ // while (m_records_ptr->size() != 0) {
+ // TraceRecord record = m_records_ptr->extractMin();
+ // record.output(out);
+ // counter++;
+ // }
+ // return counter;
}
void CacheRecorder::print(ostream& out) const
{
+ std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
}
diff --git a/src/mem/ruby/simics/commands.cc b/src/mem/ruby/simics/commands.cc
deleted file mode 100644
index e0a4f969e..000000000
--- a/src/mem/ruby/simics/commands.cc
+++ /dev/null
@@ -1,867 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- This file has been modified by Kevin Moore and Dan Nussbaum of the
- Scalable Systems Research Group at Sun Microsystems Laboratories
- (http://research.sun.com/scalable/) to support the Adaptive
- Transactional Memory Test Platform (ATMTP).
-
- Please send email to atmtp-interest@sun.com with feedback, questions, or
- to request future announcements about ATMTP.
-
- ----------------------------------------------------------------------
-
- File modification date: 2008-02-23
-
- ----------------------------------------------------------------------
-*/
-
-/*
- * $Id$
- *
- */
-
-#include "protocol_name.hh"
-#include "Global.hh"
-#include "System.hh"
-#include "CacheRecorder.hh"
-//#include "Tracer.hh"
-#include "RubyConfig.hh"
-#include "interface.hh"
-#include "Network.hh"
-// #include "TransactionInterfaceManager.hh"
-// #include "TransactionVersionManager.hh"
-// #include "TransactionIsolationManager.hh"
-//#include "XactCommitArbiter.hh" // gem5:Arka for decomissioning of log_tm
-#include "Chip.hh"
-//#include "XactVisualizer.hh" // gem5:Arka for decomissioning of log_tm
-
-extern "C" {
-#include "commands.hh"
-}
-
-#ifdef CONTIGUOUS_ADDRESSES
-#include "ContiguousAddressTranslator.hh"
-
-/* Declared in interface.C */
-extern ContiguousAddressTranslator * g_p_ca_translator;
-
-memory_transaction_t local_memory_transaction_t_shadow;
-
-#endif // #ifdef CONTIGUOUS_ADDRESSES
-
-//////////////////////// extern "C" api ////////////////////////////////
-
-extern "C"
-void ruby_dump_cache(int cpuNumber)
-{
- assert(0);
- g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->dumpCaches(cout);
-}
-
-extern "C"
-void ruby_dump_cache_data(int cpuNumber, char* tag)
-{
- assert(0);
- if (tag == NULL) {
- // No filename, dump to screen
- g_system_ptr->printConfig(cout);
- g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->dumpCacheData(cout);
- } else {
- // File name, dump to file
- string filename(tag);
-
- cout << "Dumping stats to output file '" << filename << "'..." << endl;
- ofstream m_outputFile;
- m_outputFile.open(filename.c_str());
- if(m_outputFile == NULL){
- cout << endl << "Error: error opening output file '" << filename << "'" << endl;
- return;
- }
- g_system_ptr->getChip(cpuNumber/RubyConfig::numberOfProcsPerChip())->dumpCacheData(m_outputFile);
- }
-}
-
-extern "C"
-void ruby_set_periodic_stats_file(char* filename)
-{
- assert(0);
- g_system_ptr->getProfiler()->setPeriodicStatsFile(filename);
-}
-
-extern "C"
-void ruby_set_periodic_stats_interval(int interval)
-{
- assert(0);
- g_system_ptr->getProfiler()->setPeriodicStatsInterval(interval);
-}
-
-extern "C"
-int mh_memorytracer_possible_cache_miss(memory_transaction_t *mem_trans)
-{
-
- assert(0);
- memory_transaction_t *p_mem_trans_shadow = mem_trans;
-
-#ifdef CONTIGUOUS_ADDRESSES
- if(g_p_ca_translator!=NULL) {
- memcpy( &local_memory_transaction_t_shadow, mem_trans, sizeof(memory_transaction_t) );
- p_mem_trans_shadow = &local_memory_transaction_t_shadow;
- uint64 contiguous_address = g_p_ca_translator->TranslateSimicsToRuby( p_mem_trans_shadow->s.physical_address );
- p_mem_trans_shadow->s.physical_address = contiguous_address;
- }
-#endif // #ifdef CONTIGUOUS_ADDRESSES
-
-
- // Pass this request off to SimicsDriver::makeRequest()
- // SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
- // return simics_interface_ptr->makeRequest(p_mem_trans_shadow);
- return 0;
-}
-
-extern "C"
-void mh_memorytracer_observe_memory(memory_transaction_t *mem_trans)
-{
-
- assert(0);
- memory_transaction_t *p_mem_trans_shadow = mem_trans;
-
-
-#ifdef CONTIGUOUS_ADDRESSES
- if(g_p_ca_translator!=NULL) {
- memcpy( &local_memory_transaction_t_shadow, mem_trans, sizeof(memory_transaction_t) );
- p_mem_trans_shadow = &local_memory_transaction_t_shadow;
- uint64 contiguous_address = g_p_ca_translator->TranslateSimicsToRuby( p_mem_trans_shadow->s.physical_address );
- p_mem_trans_shadow->s.physical_address = contiguous_address;
-
- }
-#endif // #ifdef CONTIGUOUS_ADDRESSES
-
-
- // Pass this request off to SimicsDriver::makeRequest()
- //SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
- //simics_interface_ptr->observeMemoryAccess(p_mem_trans_shadow);
-}
-
-
-void ruby_set_g3_reg(void *cpu, void *parameter){
- assert(0);
-#if 0
- int proc_num = SIM_get_proc_no(cpu);
- sparc_v9_interface_t * m_v9_interface = (sparc_v9_interface_t *) SIM_get_interface(cpu, SPARC_V9_INTERFACE);
-
- for(int set=0; set < 4; set++) {
- for(int i=0; i <8; i++) {
- int registerNumber = i;
- uinteger_t value = m_v9_interface->read_global_register((void *)cpu, set, registerNumber);
- cout << "ruby_set_g3_reg BEFORE: proc =" << proc_num << " GSET = " << set << " GLOBAL_REG = " << i << " VALUE = " << value << endl;
- }
- }
-
- uinteger_t value_ptr = (uinteger_t) parameter;
- int g3_regnum = SIM_get_register_number(cpu, "g3");
- SIM_write_register(cpu, g3_regnum, (uinteger_t) value_ptr);
-
- cout << endl;
- for(int set=0; set < 4; set++) {
- for(int i=0; i <8; i++) {
- int registerNumber = i;
- uinteger_t value = m_v9_interface->read_global_register((void *)cpu, set, registerNumber);
- cout << "ruby_set_g3_reg AFTER: proc =" << proc_num << " GSET = " << set << " GLOBAL_REG = " << i << " VALUE = " << value << endl;
- }
- }
-#endif
-
-}
-
-// #define XACT_MGR g_system_ptr->getChip(SIMICS_current_processor_number()/RubyConfig::numberOfProcsPerChip()/RubyConfig::numberofSMTThreads())->getTransactionInterfaceManager( (SIMICS_current_processor_number()/RubyConfig::numberofSMTThreads())%RubyConfig::numberOfProcsPerChip())
-
-extern "C"
-void magic_instruction_callback(void* desc, void* cpu, integer_t val)
-{
- assert(0);
-#if 0
- // Use magic callbacks to start and end transactions w/o opal
- if (val > 0x10000) // older magic call numbers. Need to be right-shifted.
- val = val >> 16;
- int id = -1;
- int proc_num = SIMICS_current_processor_number();
- int sim_proc_num = proc_num / RubyConfig::numberofSMTThreads();
- int thread_num = proc_num % RubyConfig::numberofSMTThreads();
- int ruby_cycle = g_eventQueue_ptr->getTime();
-
- if(proc_num < 0){
- cout << "ERROR proc_num= " << proc_num << endl;
- }
- assert(proc_num >= 0);
- if(thread_num < 0){
- cout << "ERROR thread_num= " << thread_num << endl;
- }
- assert(thread_num >= 0);
- if( sim_proc_num < 0){
- cout << "ERROR sim_proc_num = " << sim_proc_num << endl;
- }
- assert(sim_proc_num >= 0);
-
- if (val == 3) {
- g_system_ptr->getProfiler()->startTransaction(sim_proc_num);
- } else if (val == 4) {
- ; // magic breakpoint
- } else if (val == 5) {
- g_system_ptr->getProfiler()->endTransaction(sim_proc_num);
- } else if (val == 6){ // Begin Exposed Action
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "Begin exposed action for thread " << thread_num << " of proc " << proc_num << " PC " << SIMICS_get_program_counter(proc_num) << endl;
- XACT_MGR->beginEscapeAction(thread_num);
- } else if (val == 7){ // Begin Exposed Action
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "End exposed action for thread " << thread_num << " of proc " << proc_num << " PC " << SIMICS_get_program_counter(proc_num) << endl;
- XACT_MGR->endEscapeAction(thread_num);
- } else if (val == 8) {
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "Set log Base Address for thread " << thread_num << " of proc " << proc_num << endl;
- XACT_MGR->setLogBase(thread_num);
- } else if (val == 9) {
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "Setting Handler Address for thread " << thread_num << " of proc " << proc_num << endl;
- XACT_MGR->setHandlerAddress(thread_num);
- } else if (val == 10) {
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "Release Isolation for thread " << thread_num << " of proc " << proc_num << endl;
- XACT_MGR->releaseIsolation(thread_num);
- } else if (val == 11) {
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "Restart transaction for thread " << thread_num << " of proc " << proc_num << endl;
- XACT_MGR->restartTransaction(thread_num);
- } else if (val == 12) {
- // NOTE: this is a functional magic call for the Java VM
- // It is used by mfacet.py to check whether to use TM macros or JVM locking
- return;
- } else if (val == 13) {
- // NOTE: this is a debug magic call for the Java VM
- // Indicates BEGIN XACT
- return;
- } else if (val == 14) {
- // NOTE: this is a debug magic call for the Java VM
- // Indicates COMMIT_XACT
- return;
- } else if (val == 15) {
- cout << "SIMICS SEG FAULT for thread " << thread_num << " of proc " << proc_num << endl;
- SIM_break_simulation("SIMICS SEG FAULT");
- return;
- } else if (val == 16) {
- // NOTE : this is a debug magic call for the Java VM
- // Indicates LOCKING object
- return;
- } else if (val == 17) {
- // NOTE : this is a debug magic call for the Java VM
- // Indicates UNLOCKING object
- return;
- } else if (val == 18) {
- // NOTE: this is a magic call to enable the xact mem macros in the Java VM
- // The functionality is implemented in gen-scripts/mfacet.py because it can be independent of Ruby
- return;
- } else if (val == 19){
- cout << "RUBY WATCH: " << endl;
- g_system_ptr->getProfiler()->rubyWatch(SIMICS_current_processor_number());
- } else if (val == 20) {
- //XACT_MGR->setJavaPtrs(thread_num);
- } else if (val == 21){
- // NOTE : this is a debug magic call used to dump the registers for a processor
- // Functionality is implemented in gen-scripts/mfacet.py because it can be independent of Ruby
- return;
- } else if (val == 23){
- // register compensating action
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << proc_num << "," << thread_num << " REGISTER COMPENSATING ACTION " << endl;
- XACT_MGR->registerCompensatingAction(thread_num);
- } else if (val == 24){
- // register commit action
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << proc_num << "," << thread_num << " REGISTER COMMIT ACTION " << endl;
- XACT_MGR->registerCommitAction(thread_num);
- } else if (val == 27){
- // xmalloc
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << proc_num << "," << thread_num << " XMALLOC " << endl;
- XACT_MGR->xmalloc(thread_num);
- } else if (val == 29){
- // Begin Barrier
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << proc_num << "," << thread_num << " BEGIN BARRIER " << endl;
- g_system_ptr->getXactVisualizer()->moveToBarrier(proc_num);
- } else if (val == 30){
- // End Barrier
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << proc_num << "," << thread_num << " END BARRIER " << endl;
- g_system_ptr->getXactVisualizer()->moveToNonXact(proc_num);
- } else if (val == 28) {
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "Continue execution for thread " << thread_num << " of proc " << proc_num << endl;
- XACT_MGR->continueExecution(thread_num);
- } else if (val == 31){
- // Begin Timer
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << proc_num << "," << thread_num << " BEGIN TIMER " << endl;
- g_system_ptr->getProfiler()->getXactProfiler()->profileBeginTimer(proc_num);
- } else if (val == 32){
- // End Timer
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << proc_num << "," << thread_num << " END TIMER " << endl;
- g_system_ptr->getProfiler()->getXactProfiler()->profileEndTimer(proc_num);
- } else if (val == 40) {
- // register a thread for virtualization
- if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
- SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
- simics_interface_ptr->getHypervisor()->registerThreadWithHypervisor(proc_num);
- }
- } else if (val == 41) {
- // get information about the last summary conflict
- if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
- Address addr = XACT_MGR->getXactIsolationManager()->getSummaryConflictAddress();
- unsigned int conflictAddress = addr.getAddress();
- unsigned int conflictType = XACT_MGR->getXactIsolationManager()->getSummaryConflictType();
- SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "g2"), conflictAddress);
- SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "g3"), conflictType);
- }
- } else if (val == 42) {
- // resolve summary conflict magic callback
- if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
- SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
- simics_interface_ptr->getHypervisor()->resolveSummarySignatureConflict(proc_num);
- }
- } else if (val == 50) {
- // set summary signature bit
- int index = SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2"));
- XACT_MGR->writeBitSummaryWriteSetFilter(thread_num, index, 1);
- } else if (val == 51) {
- // unset summary signature bit
- int index = SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2"));
- XACT_MGR->writeBitSummaryWriteSetFilter(thread_num, index, 0);
- } else if (val == 52) {
- // add address in summary signature
- Address addr = Address(SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2")));
- cout << "Add to summary write set filter: " << addr << endl;
- XACT_MGR->addToSummaryWriteSetFilter(thread_num, addr);
- } else if (val == 53) {
- // remove address from summary signature
- Address addr = Address(SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2")));
- XACT_MGR->removeFromSummaryWriteSetFilter(thread_num, addr);
- } else if (val == 54) {
- // translate address to summary signature index
- Address addr = Address(SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2")));
- SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "g3"), XACT_MGR->getIndexSummaryFilter(thread_num, addr));
- } else if (val == 55) {
- XACT_MGR->setIgnoreWatchpointFlag(thread_num, true);
- } else if (val == 56) {
- g_system_ptr->getProfiler()->watchpointsFalsePositiveTrigger();
- } else if (val == 57) {
- g_system_ptr->getProfiler()->watchpointsTrueTrigger();
- } else if (val == 60) {
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2) {
- cout << "Set restorePC for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
- }
- unsigned int pc = SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "g2"));
- XACT_MGR->setRestorePC(thread_num, pc);
- } else if (val == 61) {
- // get log size
- SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "g2"), XACT_MGR->getXactVersionManager()->getLogSize(thread_num));
- } else if (val == 62) {
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2){
- cout << " GET THREAD ID " << thread_num << " of proc " << proc_num << " TID " << XACT_MGR->getTID(thread_num) << endl;
- }
- // get thread id
- SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "g2"), XACT_MGR->getTID(thread_num));
- } else if (val == 100) {
- dump_registers((void*)cpu);
- } else if (val >= 1024 && val < 2048) {
- // begin closed
- id = val - 1024;
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "Begin CLOSED transaction for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
- XACT_MGR->beginTransaction(thread_num, id, false);
- //} else if (val >= min_closed_commit && val < XACT_OPEN_MIN_ID) {
- } else if (val >= 2048 && val < 3072) {
- // commit closed
- id = val - 2048;
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "Commit CLOSED transaction for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
- XACT_MGR->commitTransaction(thread_num, id, false);
- } else if (val >= 3072 && val < 4096) {
- // begin open
- id = val - 3072;
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "Begin OPEN transaction for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
- XACT_MGR->beginTransaction(thread_num, id, true);
- } else if (val >= 4096 && val < 5120) {
- // commit open
- id = val - 4096;
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "COMMIT OPEN transaction for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
- XACT_MGR->commitTransaction(thread_num, id, true);
- } else if (val >= 5120 && val < 6144){
-
- cout << " SYSCALL " << val - 5120 << " of proc " << proc_num << " " << thread_num << " time = " << ruby_cycle << endl;
- } else if (val >= 6144 && val < 7168) {
- // commit open
- id = val - 6144;
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2)
- cout << "ABORT transaction for thread " << thread_num << " of proc " << proc_num << " XID " << id << endl;
- XACT_MGR->abortTransaction(thread_num, id);
- } else if (val == 8000) {
- // transaction level
- if (XACT_DEBUG && XACT_DEBUG_LEVEL > 2) {
- id = val - 8000;
- cout << "Transaction Level for thread " << thread_num << " of proc " << proc_num << " XID " << id << " : "
- << XACT_MGR->getTransactionLevel(thread_num)<< endl;
- }
- SIMICS_write_register(proc_num, SIMICS_get_register_number(proc_num, "i0"),(unsigned int) XACT_MGR->getTransactionLevel(thread_num));
- } else if (val==8001) {
- cout << " " << g_eventQueue_ptr->getTime() << " " << dec << proc_num << " [" << proc_num << "," << thread_num << " ]"
- << " TID " << XACT_MGR->getTID(0)
- << " DEBUGMSG " << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i0")) << " "
- << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i1")) << " "
- << "(0x" << hex << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i1")) << ") "
- << dec << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i2")) << " "
- << "(0x" << hex << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i2")) << ")" << dec
- << " break = " << SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i3")) << endl;
- if (SIMICS_read_register(proc_num, SIMICS_get_register_number(proc_num, "i3")) == 1) {
- SIM_break_simulation("DEBUGMSG");
- }
- } else {
- WARN_EXPR(val);
- WARN_EXPR(SIMICS_get_program_counter(proc_num));
- WARN_MSG("Unexpected magic call");
- }
-#endif
-}
-
-/* -- Handle command to change the debugging verbosity for Ruby */
-extern "C"
-void ruby_change_debug_verbosity(char* new_verbosity_str)
-{
- assert(0);
- g_debug_ptr->setVerbosityString(new_verbosity_str);
-}
-
-/* -- Handle command to change the debugging filter for Ruby */
-extern "C"
-void ruby_change_debug_filter(char* new_filter_str)
-{
- assert(0);
- g_debug_ptr->setFilterString(new_filter_str);
-}
-
-/* -- Handle command to set the debugging output file for Ruby */
-extern "C"
-void ruby_set_debug_output_file (const char * new_filename)
-{
- assert(0);
- string filename(new_filename);
-
- filename += "-";
- filename += CURRENT_PROTOCOL;
- // get the date and time to label the debugging file
- const time_t T = time(NULL);
- tm *localTime = localtime(&T);
- char buf[100];
- strftime(buf, 100, ".%b%d.%Y-%H.%M.%S", localTime);
-
- filename += buf;
- filename += ".debug";
-
- cout << "Dumping debugging output to file '" << filename << "'...";
- g_debug_ptr->setDebugOutputFile (filename.c_str());
-}
-
-extern "C"
-void ruby_set_debug_start_time(char* start_time_str)
-{
- assert(0);
- int startTime = atoi(start_time_str);
- g_debug_ptr->setDebugTime(startTime);
-}
-
-/* -- Clear stats */
-extern "C"
-void ruby_clear_stats()
-{
- assert(0);
- cout << "Clearing stats...";
- fflush(stdout);
- g_system_ptr->clearStats();
- cout << "Done." << endl;
-}
-
-/* -- Dump stats */
-extern "C"
-// File name, dump to file
-void ruby_dump_stats(char* filename)
-{
- assert(0);
- /*g_debug_ptr->closeDebugOutputFile();*/
- if (filename == NULL) {
- // No output file, dump to screen
- cout << "Dumping stats to standard output..." << endl;
- g_system_ptr->printConfig(cout);
- g_system_ptr->printStats(cout);
- } else {
- cout << "Dumping stats to output file '" << filename << "'..." << endl;
- ofstream m_outputFile;
- m_outputFile.open(filename);
- if(m_outputFile == NULL) {
- cout << "Error: error opening output file '" << filename << "'" << endl;
- return;
- }
- g_system_ptr->printConfig(m_outputFile);
- g_system_ptr->printStats(m_outputFile);
- }
- cout << "Dumping stats completed." << endl;
-}
-
-/* -- Dump stats */
-extern "C"
-// File name, dump to file
-void ruby_dump_short_stats(char* filename)
-{
- assert(0);
- g_debug_ptr->closeDebugOutputFile();
- if (filename == NULL) {
- // No output file, dump to screen
- //cout << "Dumping short stats to standard output..." << endl;
- //g_system_ptr->printConfig(cout);
- g_system_ptr->getProfiler()->printStats(cout, true);
- } else {
- cout << "Dumping stats to output file '" << filename << "'..." << endl;
- ofstream m_outputFile;
- m_outputFile.open(filename);
- if(m_outputFile == NULL) {
- cout << "Error: error opening output file '" << filename << "'" << endl;
- return;
- }
- g_system_ptr->getProfiler()->printShortStats(m_outputFile);
- cout << "Dumping stats completed." << endl;
- }
-}
-
-extern "C"
-void ruby_load_caches(char* name)
-{
- assert(0);
- if (name == NULL) {
- cout << "Error: ruby_load_caches requires a file name" << endl;
- return;
- }
-
- cout << "Reading cache contents from '" << name << "'...";
- /* gem5:Binkert for decomissiong of tracer
- int read = Tracer::playbackTrace(name);
- cout << "done. (" << read << " cache lines read)" << endl;
- */
- cout << "done. (TRACER DISABLED!)" << endl;
- ruby_clear_stats();
-}
-
-extern "C"
-void ruby_save_caches(char* name)
-{
- assert(0);
- if (name == NULL) {
- cout << "Error: ruby_save_caches requires a file name" << endl;
- return;
- }
-
- cout << "Writing cache contents to '" << name << "'...";
- CacheRecorder recorder;
- g_system_ptr->recordCacheContents(recorder);
- int written = recorder.dumpRecords(name);
- cout << "done. (" << written << " cache lines written)" << endl;
-}
-
-extern "C"
-void ruby_set_tracer_output_file (const char * new_filename)
-{
- assert(0);
- //g_system_ptr->getTracer()->startTrace(string(new_filename));
-}
-
-/* -- Handle command to set the xact visualizer file for Ruby */
-extern "C"
-void ruby_xact_visualizer_file (char * new_filename)
-{
- cout << "Dumping xact visualizer output to file '" << new_filename << "'...";
- // g_system_ptr->getProfiler()->setXactVisualizerFile (new_filename);
-}
-
-extern "C"
-void ctrl_exception_start(void* desc, void* cpu, integer_t val)
-{
-#if 0
- int proc_no = SIM_get_proc_no((void*) cpu);
- void* cpu_obj = (void*) cpu;
- uinteger_t trap_level = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tl"));
-
- if (!XACT_MEMORY) return;
- TransactionInterfaceManager *xact_mgr = XACT_MGR;
-
- // level {10,14} interrupt
- //
- if (val == 0x4a || val == 0x4e) {
- int rn_tick = SIM_get_register_number(cpu_obj, "tick");
- uinteger_t tick = SIM_read_register(cpu_obj, rn_tick);
- int rn_tick_cmpr = SIM_get_register_number(cpu_obj, "tick_cmpr");
- uinteger_t tick_cmpr = SIM_read_register(cpu_obj, rn_tick_cmpr);
- int rn_stick = SIM_get_register_number(cpu_obj, "stick");
- uinteger_t stick = SIM_read_register(cpu_obj, rn_stick);
- int rn_stick_cmpr = SIM_get_register_number(cpu_obj, "stick_cmpr");
- uinteger_t stick_cmpr = SIM_read_register(cpu_obj, rn_stick_cmpr);
- int rn_pc = SIM_get_register_number(cpu_obj, "pc");
- uinteger_t pc = SIM_read_register(cpu_obj, rn_pc);
- int rn_npc = SIM_get_register_number(cpu_obj, "npc");
- uinteger_t npc = SIM_read_register(cpu_obj, rn_npc);
- int rn_pstate = SIM_get_register_number(cpu_obj, "pstate");
- uinteger_t pstate = SIM_read_register(cpu_obj, rn_pstate);
- int rn_pil = SIM_get_register_number(cpu_obj, "pil");
- int pil = SIM_read_register(cpu_obj, rn_pil);
- g_system_ptr->getProfiler()->profileTimerInterrupt(proc_no,
- tick, tick_cmpr,
- stick, stick_cmpr,
- trap_level,
- pc, npc,
- pstate, pil);
- }
-
- int smt_thread_num = proc_no % RubyConfig::numberofSMTThreads();
- // The simulated processor number
- int sim_proc_no = proc_no / RubyConfig::numberofSMTThreads();
-
- uinteger_t pc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "pc"));
- uinteger_t npc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "npc"));
-
- g_system_ptr->getProfiler()->profileExceptionStart(xact_mgr->getTransactionLevel(smt_thread_num) > 0, sim_proc_no, smt_thread_num, val, trap_level, pc, npc);
-
- if((val >= 0x80 && val <= 0x9f) || (val >= 0xc0 && val <= 0xdf)){
- //xact_mgr->setLoggedException(smt_thread_num);
- }
- // CORNER CASE - You take an exception while stalling for a commit token
- if (XACT_LAZY_VM && !XACT_EAGER_CD){
- if (g_system_ptr->getXactCommitArbiter()->getTokenOwner() == proc_no)
- g_system_ptr->getXactCommitArbiter()->releaseCommitToken(proc_no);
- }
-#endif
- assert(0);
-}
-
-extern "C"
-void ctrl_exception_done(void* desc, void* cpu, integer_t val)
-{
- assert(0);
-#if 0
- int proc_no = SIM_get_proc_no((void*) cpu);
- void* cpu_obj = (void*) cpu;
- uinteger_t trap_level = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tl"));
- uinteger_t pc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "pc"));
- uinteger_t npc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "npc"));
- uinteger_t tpc = 0;
- uinteger_t tnpc = 0;
- //get the return PC,NPC pair based on the trap level
- ASSERT(1 <= trap_level && trap_level <= 5);
- if(trap_level == 1){
- tpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tpc1"));
- tnpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tnpc1"));
- }
- if(trap_level == 2){
- tpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tpc2"));
- tnpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tnpc2"));
- }
- if(trap_level == 3){
- tpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tpc3"));
- tnpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tnpc3"));
- }
- if(trap_level == 4){
- tpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tpc4"));
- tnpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tnpc4"));
- }
- if(trap_level == 5){
- tpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tpc5"));
- tnpc = SIM_read_register(cpu_obj, SIM_get_register_number(cpu_obj, "tnpc5"));
- }
-
- if (!XACT_MEMORY) return;
- TransactionInterfaceManager *xact_mgr = XACT_MGR;
-
- int smt_thread_num = proc_no % RubyConfig::numberofSMTThreads();
- // The simulated processor number
- int sim_proc_no = proc_no / RubyConfig::numberofSMTThreads();
-
- if (proc_no != SIMICS_current_processor_number()){
- WARN_EXPR(proc_no);
- WARN_EXPR(SIMICS_current_processor_number());
- WARN_MSG("Callback for a different processor");
- }
-
- g_system_ptr->getProfiler()->profileExceptionDone(xact_mgr->getTransactionLevel(smt_thread_num) > 0, sim_proc_no, smt_thread_num, val, trap_level, pc, npc, tpc, tnpc);
-
- if((val >= 0x80 && val <= 0x9f) || (val >= 0xc0 && val <= 0xdf)){
- //xact_mgr->clearLoggedException(smt_thread_num);
- }
-
- if ((val == 0x122) && xact_mgr->shouldTrap(smt_thread_num)){
- // use software handler
- if (xact_mgr->shouldUseHardwareAbort(smt_thread_num)){
- xact_mgr->hardwareAbort(smt_thread_num);
- } else {
- xact_mgr->trapToHandler(smt_thread_num);
- }
- }
-#endif
-}
-
-extern "C"
-void change_mode_callback(void* desc, void* cpu, integer_t old_mode, integer_t new_mode)
-{
- assert(0);
-#if 0
- if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
- SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
- simics_interface_ptr->getHypervisor()->change_mode_callback(desc, cpu, old_mode, new_mode);
- }
-#endif
-}
-
-extern "C"
-void dtlb_map_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg){
- assert(0);
-#if 0
- if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
- SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
- simics_interface_ptr->getHypervisor()->dtlb_map_callback(desc, chmmu, tag_reg, data_reg);
- }
-#endif
-}
-
-extern "C"
-void dtlb_demap_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg){
- assert(0);
-#if 0
- if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
- SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
- simics_interface_ptr->getHypervisor()->dtlb_demap_callback(desc, chmmu, tag_reg, data_reg);
- }
-#endif
-}
-
-extern "C"
-void dtlb_replace_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg){
- assert(0);
-#if 0
- if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
- SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
- simics_interface_ptr->getHypervisor()->dtlb_replace_callback(desc, chmmu, tag_reg, data_reg);
- }
-#endif
-}
-
-extern "C"
-void dtlb_overwrite_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg){
- assert(0);
-#if 0
- if (XACT_ENABLE_VIRTUALIZATION_LOGTM_SE) {
- SimicsDriver* simics_interface_ptr = static_cast<SimicsDriver*>(g_system_ptr->getDriver());
- simics_interface_ptr->getHypervisor()->dtlb_overwrite_callback(desc, chmmu, tag_reg, data_reg);
- }
-#endif
-}
-
-extern "C"
-void core_control_register_write_callback(void* desc, void* cpu, integer_t register_number, integer_t value) {
- assert(0);
-#if 0
- int proc_no = SIM_get_proc_no((void*) cpu);
- void* cpu_obj = (void*) cpu;
-#endif
-}
-
-integer_t
-read_reg(void *cpu, const char* reg_name)
-{
- assert(0);
-#if 0
- int reg_num = SIM_get_register_number(SIM_current_processor(), reg_name);
- if (SIM_clear_exception()) {
- fprintf(stderr, "read_reg: SIM_get_register_number(%s, %s) failed!\n",
- cpu->name, reg_name);
- assert(0);
- }
- integer_t val = SIM_read_register(cpu, reg_num);
- if (SIM_clear_exception()) {
- fprintf(stderr, "read_reg: SIM_read_register(%s, %d) failed!\n",
- cpu->name, reg_num);
- assert(0);
- }
- return val;
-#endif
- return 0;
-}
-
-extern "C"
-void dump_registers(void *cpu)
-{
- assert(0);
-#if 0
- const char* reg_names[] = {
- "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
- "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
- "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
- "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
- "ccr", "pc", "npc"
- };
-
- printf("Registers for %s\n", cpu->name);
- printf("------------------\n");
-
- for (int i = 0; i < (sizeof(reg_names) / sizeof(char*)); i++) {
- const char* reg_name = reg_names[i];
- printf(" %3s: 0x%016llx\n", reg_name, read_reg(cpu, reg_name));
- if (i % 8 == 7) {
- printf("\n");
- }
- }
-
- int myID = SIMICS_get_proc_no(cpu);
- Address myPC = SIMICS_get_program_counter(myID);
- physical_address_t myPhysPC = SIMICS_translate_address(myID, myPC);
- integer_t myInst = SIMICS_read_physical_memory(myID, myPhysPC, 4);
- const char *myInstStr = SIMICS_disassemble_physical(myID, myPhysPC);
- printf("\n *pc: 0x%llx: %s\n", myInst, myInstStr);
-
- printf("\n\n");
-#endif
-}
diff --git a/src/mem/ruby/simics/commands.hh b/src/mem/ruby/simics/commands.hh
deleted file mode 100644
index e7593c2c3..000000000
--- a/src/mem/ruby/simics/commands.hh
+++ /dev/null
@@ -1,106 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- This file has been modified by Kevin Moore and Dan Nussbaum of the
- Scalable Systems Research Group at Sun Microsystems Laboratories
- (http://research.sun.com/scalable/) to support the Adaptive
- Transactional Memory Test Platform (ATMTP).
-
- Please send email to atmtp-interest@sun.com with feedback, questions, or
- to request future announcements about ATMTP.
-
- ----------------------------------------------------------------------
-
- File modification date: 2008-02-23
-
- ----------------------------------------------------------------------
-*/
-
-/*
- * $Id$
- *
- * Description:
- *
- */
-
-#ifndef COMMANDS_H
-#define COMMANDS_H
-
-#ifdef SPARC
- #define MEMORY_TRANSACTION_TYPE void
-#else
- #define MEMORY_TRANSACTION_TYPE void
-#endif
-
-int mh_memorytracer_possible_cache_miss(MEMORY_TRANSACTION_TYPE *mem_trans);
-void mh_memorytracer_observe_memory(MEMORY_TRANSACTION_TYPE *mem_trans);
-
-void magic_instruction_callback(void* desc, void * cpu, integer_t val);
-
-void ruby_change_debug_verbosity(char* new_verbosity_str);
-void ruby_change_debug_filter(char* new_filter_str);
-void ruby_set_debug_output_file (const char * new_filename);
-void ruby_set_debug_start_time(char* start_time_str);
-
-void ruby_clear_stats();
-void ruby_dump_stats(char* tag);
-void ruby_dump_short_stats(char* tag);
-
-void ruby_set_periodic_stats_file(char* filename);
-void ruby_set_periodic_stats_interval(int interval);
-
-void ruby_load_caches(char* name);
-void ruby_save_caches(char* name);
-
-void ruby_dump_cache(int cpuNumber);
-void ruby_dump_cache_data(int cpuNumber, char *tag);
-
-void ruby_set_tracer_output_file (const char * new_filename);
-void ruby_xact_visualizer_file (char * new_filename);
-
-void ctrl_exception_start(void* desc, void* cpu, integer_t val);
-void ctrl_exception_done(void* desc, void* cpu, integer_t val);
-
-void change_mode_callback(void* desc, void* cpu, integer_t old_mode, integer_t new_mode);
-void dtlb_map_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg);
-void dtlb_demap_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg);
-void dtlb_replace_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg);
-void dtlb_overwrite_callback(void* desc, void* chmmu, integer_t tag_reg, integer_t data_reg);
-
-integer_t read_reg(void *cpu, const char* reg_name);
-void dump_registers(void *cpu);
-
-// Needed so that the ruby module will compile, but functions are
-// implemented in Rock.C.
-//
-void rock_exception_start(void* desc, void* cpu, integer_t val);
-void rock_exception_done(void* desc, void* cpu, integer_t val);
-
-#endif //COMMANDS_H
diff --git a/src/mem/ruby/simics/interface.cc b/src/mem/ruby/simics/interface.cc
deleted file mode 100644
index 1f088c023..000000000
--- a/src/mem/ruby/simics/interface.cc
+++ /dev/null
@@ -1,935 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: interface.C 1.39 05/01/19 13:12:31-06:00 mikem@maya.cs.wisc.edu $
- *
- */
-
-#include "Global.hh"
-#include "System.hh"
-#include "OpalInterface.hh"
-#include "RubyEventQueue.hh"
-#include "mf_api.hh"
-#include "interface.hh"
-#include "Sequencer.hh"
-// #include "TransactionInterfaceManager.hh"
-
-#ifdef CONTIGUOUS_ADDRESSES
-#include "ContiguousAddressTranslator.hh"
-
-/* Also used in init.C, commands.C */
-ContiguousAddressTranslator * g_p_ca_translator = NULL;
-
-#endif // #ifdef CONTIGUOUS_ADDRESSES
-
-//////////////////////// Local helper functions //////////////////////
-
-// Callback when exception occur
-static void core_exception_callback(void *data, void *cpu,
- integer_t exc)
-{
- // SimicsDriver *simics_intf = dynamic_cast<SimicsDriver*>(g_system_ptr->getDriver());
- // ASSERT( simics_intf );
- // simics_intf->exceptionCallback(cpu, exc);
- assert(0);
-}
-
-#ifdef SPARC
-// Callback when asi accesses occur
-// static exception_type_t core_asi_callback(void * cpu, generic_transaction_t *g)
-// {
-// SimicsDriver *simics_intf = dynamic_cast<SimicsDriver*>(g_system_ptr->getDriver());
-// assert( simics_intf );
-// return simics_intf->asiCallback(cpu, g);
-// }
-#endif
-
-static void runRubyEventQueue(void* obj, void* arg)
-{
- Time time = g_eventQueue_ptr->getTime() + 1;
- DEBUG_EXPR(NODE_COMP, HighPrio, time);
- g_eventQueue_ptr->triggerEvents(time);
-// void* obj_ptr = (void*) SIM_proc_no_2_ptr(0); // Maurice
-// SIM_time_post_cycle(obj_ptr, SIMICS_RUBY_MULTIPLIER, Sim_Sync_Processor, &runRubyEventQueue, NULL); // Maurice
- assert(0);
-}
-
-//////////////////////// Simics API functions //////////////////////
-
-int SIMICS_number_processors()
-{
-// return SIM_number_processors(); // Maurice
- assert(0);
- return 0;
-}
-
-void SIMICS_wakeup_ruby()
-{
-// void* obj_ptr = (void*) SIM_proc_no_2_ptr(0); // Maurice
-// SIM_time_post_cycle(obj_ptr, SIMICS_RUBY_MULTIPLIER, Sim_Sync_Processor, &runRubyEventQueue, NULL); // Maurice
- assert(0);
-}
-
-// an analogue to wakeup ruby, this function ends the callbacks ruby normally
-// recieves from simics. (it removes ruby from simics's event queue). This
-// function should only be called when opal is installed. Opal advances ruby's
-// event queue independently of simics.
-void SIMICS_remove_ruby_callback( void )
-{
-// void* obj_ptr = (void*) SIM_proc_no_2_ptr(0); // Maurice
-// SIM_time_clean( obj_ptr, Sim_Sync_Processor, &runRubyEventQueue, NULL); // Maurice
- assert(0);
-}
-
-// Install ruby as the timing model (analogous code exists in ruby/ruby.c)
-void SIMICS_install_timing_model( void )
-{
-// // void *phys_mem0 = SIM_get_object("phys_mem0"); // Maurice
-// attr_value_t val;
-// // val.kind = Sim_Val_String; // Maurice
-// val.u.string = "ruby0";
-// set_error_t install_error;
-//
-// if(phys_mem0==NULL) {
-// /* Look for "phys_mem" instead */
-// // SIM_clear_exception(); // Maurice
-// // phys_mem0 = SIM_get_object("phys_mem"); // Maurice
-// }
-//
-// if(phys_mem0==NULL) {
-// /* Okay, now panic... can't install ruby without a physical memory object */
-// WARN_MSG( "Cannot Install Ruby... no phys_mem0 or phys_mem object found" );
-// WARN_MSG( "Ruby is NOT installed." );
-// // SIM_clear_exception(); // Maurice
-// return;
-// }
-//
-// // install_error = SIM_set_attribute(phys_mem0, "timing_model", &val); // Maurice
-//
-// // if (install_error == Sim_Set_Ok) { // Maurice
-// WARN_MSG( "successful installation of the ruby timing model" );
-// } else {
-// WARN_MSG( "error installing ruby timing model" );
-// // WARN_MSG( SIM_last_error() ); // Maurice
-// }
-
- assert(0);
-}
-
-// Removes ruby as the timing model interface
-void SIMICS_remove_timing_model( void )
-{
-// void *phys_mem0 = SIM_get_object("phys_mem0"); // Maurice
-// attr_value_t val;
-// memset( &val, 0, sizeof(attr_value_t) );
-// // val.kind = Sim_Val_Nil; // Maurice
-//
-// if(phys_mem0==NULL) {
-// /* Look for "phys_mem" instead */
-// // SIM_clear_exception(); // Maurice
-// // phys_mem0 = SIM_get_object("phys_mem"); // Maurice
-// }
-//
-// if(phys_mem0==NULL) {
-// /* Okay, now panic... can't uninstall ruby without a physical memory object */
-// WARN_MSG( "Cannot Uninstall Ruby... no phys_mem0 or phys_mem object found" );
-// WARN_MSG( "Uninstall NOT performed." );
-// // SIM_clear_exception(); // Maurice
-// return;
-// }
-//
-// // SIM_set_attribute(phys_mem0, "timing_model", &val); // Maurice
- assert(0);
-}
-
-// Installs the (SimicsDriver) function to recieve the exeception callback
-void SIMICS_install_exception_callback( void )
-{
- // install exception callback
- // s_exception_hap_handle =
-// SIM_hap_add_callback("Core_Exception", // Maurice
- // (obj_hap_func_t)core_exception_callback, NULL );
- assert(0);
-}
-
-// removes the exception callback
-void SIMICS_remove_exception_callback( void )
-{
- // uninstall exception callback
-// SIM_hap_delete_callback_id( "Core_Exception", // Maurice
- // s_exception_hap_handle );
- assert(0);
-}
-
-#ifdef SPARC
-// Installs the (SimicsDriver) function to recieve the asi callback
-void SIMICS_install_asi_callback( void )
-{
-// for(int i = 0; i < SIM_number_processors(); i++) { // Maurice
- // sparc_v9_interface_t *v9_interface = (sparc_v9_interface_t *)
-// SIM_get_interface(SIM_proc_no_2_ptr(i), SPARC_V9_INTERFACE); // Maurice
-
- // init asi callbacks, 16bit ASI
- // for(int j = 0; j < MAX_ADDRESS_SPACE_ID; j++) {
- // v9_interface->install_user_asi_handler(core_asi_callback, j);
- // }
- // }
- assert(0);
-}
-
-// removes the asi callback
-void SIMICS_remove_asi_callback( void )
-{
-// for(int i = 0; i < SIM_number_processors(); i++) { // Maurice
-// sparc_v9_interface_t *v9_interface = (sparc_v9_interface_t *)
-// SIM_get_interface(SIM_proc_no_2_ptr(i), SPARC_V9_INTERFACE); // Maurice
-
- // disable asi callback
- // for(int j = 0; j < MAX_ADDRESS_SPACE_ID; j++) {
- // v9_interface->remove_user_asi_handler(core_asi_callback, j);
- // }
- // }
- assert(0);
-}
-#endif
-
-// Query simics for the presence of the opal object.
-// returns its interface if found, NULL otherwise
-mf_opal_api_t *SIMICS_get_opal_interface( void )
-{
-// void *opal = SIM_get_object("opal0"); // Maurice
- //if (opal != NULL) {
-// mf_opal_api_t *opal_intf = (mf_opal_api_t *) SIM_get_interface( opal, "mf-opal-api" ); // Maurice
- // if ( opal_intf != NULL ) {
- // return opal_intf;
-// } else {
-// WARN_MSG("error: OpalInterface: opal does not implement mf-opal-api interface.\n");
-// return NULL;
-// }
-// }
-// SIM_clear_exception(); // Maurice
- assert(0);
- return NULL;
-}
-
-void * SIMICS_current_processor(){
-// return SIM_current_processor(); // Maurice
- assert(0);
- return NULL;
-}
-
-int SIMICS_current_processor_number()
-{
-// return (SIM_get_proc_no((processor_t *) SIM_current_processor())); // Maurice
- assert(0);
- return 0;
-}
-
-integer_t SIMICS_get_insn_count(int cpuNumber)
-{
- // NOTE: we already pass in the logical cpuNumber (ie Simics simulated cpu number)
- int num_smt_threads = RubyConfig::numberofSMTThreads();
- integer_t total_insn = 0;
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// total_insn += SIM_step_count((void*) cpu); // Maurice
- assert(0);
- return total_insn;
-}
-
-integer_t SIMICS_get_cycle_count(int cpuNumber)
-{
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// integer_t result = SIM_cycle_count((void*) cpu); // Maurice
- assert(0);
- return 0;
-}
-
-void SIMICS_unstall_proc(int cpuNumber)
-{
-// void* proc_ptr = (void *) SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// SIM_stall_cycle(proc_ptr, 0); // Maurice
- assert(0);
-}
-
-void SIMICS_unstall_proc(int cpuNumber, int cycles)
-{
-// void* proc_ptr = (void *) SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// SIM_stall_cycle(proc_ptr, cycles); // Maurice
- assert(0);
-}
-
-void SIMICS_stall_proc(int cpuNumber, int cycles)
-{
-// void* proc_ptr = (void*) SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// if (SIM_stalled_until(proc_ptr) != 0){ // Maurice
-// cout << cpuNumber << " Trying to stall. Stall Count currently at " << SIM_stalled_until(proc_ptr) << endl; // Maurice
-// }
-// SIM_stall_cycle(proc_ptr, cycles); // Maurice
- assert(0);
-}
-
-void SIMICS_post_stall_proc(int cpuNumber, int cycles)
-{
-// void* proc_ptr = (void*) SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// SIM_stacked_post(proc_ptr, ruby_stall_proc, (void *) cycles); // Maurice
- assert(0);
-}
-
-integer_t SIMICS_read_physical_memory( int procID, physical_address_t address,
- int len )
-{
-// // SIM_clear_exception(); // Maurice
-// ASSERT( len <= 8 );
-// #ifdef CONTIGUOUS_ADDRESSES
-// if(g_p_ca_translator != NULL) {
-// address = g_p_ca_translator->TranslateRubyToSimics( address );
-// }
-// #endif // #ifdef CONTIGUOUS_ADDRESSES
-// // integer_t result = SIM_read_phys_memory( SIM_proc_no_2_ptr(procID), // Maurice
-// // // address, len );
-// //
-// // // int isexcept = SIM_get_pending_exception(); // Maurice
-// // if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
-// // // sim_exception_t except_code = SIM_clear_exception(); // Maurice
-// // WARN_MSG( "SIMICS_read_physical_memory: raised exception." );
-// // // WARN_MSG( SIM_last_error() ); // Maurice
-// // WARN_MSG( Address(address) );
-// // WARN_MSG( procID );
-// // ASSERT(0);
-// // }
-// // return ( result );
- assert(0);
- return 0;
-}
-//
-// /*
-// * Read data into a buffer and assume the buffer is already allocated
-// */
-void SIMICS_read_physical_memory_buffer(int procID, physical_address_t addr,
- char* buffer, int len ) {
-// // // processor_t* obj = SIM_proc_no_2_ptr(procID); // Maurice
-// //
-// // assert( obj != NULL);
-// // assert( buffer != NULL );
-// //
-// // #ifdef CONTIGUOUS_ADDRESSES
-// // if(g_p_ca_translator != NULL) {
-// // addr = g_p_ca_translator->TranslateRubyToSimics( addr );
-// // }
-// // #endif // #ifdef CONTIGUOUS_ADDRESSES
-// //
-// // int buffer_pos = 0;
-// // physical_address_t start = addr;
-// // do {
-// // int size = (len < 8)? len:8;
-// // // integer_t result = SIM_read_phys_memory( obj, start, size ); // Maurice
-// // // int isexcept = SIM_get_pending_exception(); // Maurice
-// // if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
-// // // sim_exception_t except_code = SIM_clear_exception(); // Maurice
-// // WARN_MSG( "SIMICS_read_physical_memory_buffer: raised exception." );
-// // // WARN_MSG( SIM_last_error() ); // Maurice
-// // WARN_MSG( addr );
-// // WARN_MSG( procID );
-// // ASSERT( 0 );
-// // }
-// //
-// // #ifdef SPARC
-// // // assume big endian (i.e. SPARC V9 target)
-// // for(int i = size-1; i >= 0; i--) {
-// // #else
-// // // assume little endian (i.e. x86 target)
-// // for(int i = 0; i<size; i++) {
-// // #endif
-// // buffer[buffer_pos++] = (char) ((result>>(i<<3))&0xff);
-// // }
-// //
-// // len -= size;
-// // start += size;
-// // } while(len != 0);
- assert(0);
-}
-//
-void SIMICS_write_physical_memory( int procID, physical_address_t address,
- integer_t value, int len )
- {
-// // ASSERT( len <= 8 );
-// //
-// // // SIM_clear_exception(); // Maurice
-// //
-// // // processor_t* obj = SIM_proc_no_2_ptr(procID); // Maurice
-// //
-// // #ifdef CONTIGUOUS_ADDRESSES
-// // if(g_p_ca_translator != NULL) {
-// // address = g_p_ca_translator->TranslateRubyToSimics( address );
-// // }
-// // #endif // #ifdef CONTIGUOUS_ADDRESSES
-// //
-// // // int isexcept = SIM_get_pending_exception(); // Maurice
-// // if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
-// // // sim_exception_t except_code = SIM_clear_exception(); // Maurice
-// // WARN_MSG( "SIMICS_write_physical_memory 1: raised exception." );
-// // // WARN_MSG( SIM_last_error() ); // Maurice
-// // WARN_MSG( address );
-// // }
-// //
-// // // SIM_write_phys_memory(obj, address, value, len ); // Maurice
-// //
-// // // isexcept = SIM_get_pending_exception(); // Maurice
-// // if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
-// // // sim_exception_t except_code = SIM_clear_exception(); // Maurice
-// // WARN_MSG( "SIMICS_write_physical_memory 2: raised exception." );
-// // // WARN_MSG( SIM_last_error() ); // Maurice
-// // WARN_MSG( address );
-// // }
- assert(0);
-}
-//
-// /*
-// * write data to simics memory from a buffer (assumes the buffer is valid)
-// */
-void SIMICS_write_physical_memory_buffer(int procID, physical_address_t addr,
- char* buffer, int len ) {
-// // processor_t* obj = SIM_proc_no_2_ptr(procID); // Maurice
-//
-// assert( obj != NULL);
-// assert( buffer != NULL );
-//
-// #ifdef CONTIGUOUS_ADDRESSES
-// if(g_p_ca_translator != NULL) {
-// addr = g_p_ca_translator->TranslateRubyToSimics( addr );
-// }
-// #endif // #ifdef CONTIGUOUS_ADDRESSES
-//
-// int buffer_pos = 0;
-// physical_address_t start = addr;
-// do {
-// int size = (len < 8)? len:8;
-// // //integer_t result = SIM_read_phys_memory( obj, start, size ); // Maurice
-// integer_t value = 0;
-// #ifdef SPARC
-// // assume big endian (i.e. SPARC V9 target)
-// for(int i = size-1; i >= 0; i--) {
-// #else
-// // assume little endian (i.e. x86 target)
-// for(int i = 0; i<size; i++) {
-// #endif
-// integer_t mask = buffer[buffer_pos++];
-// value |= ((mask)<<(i<<3));
-// }
-//
-//
-// // SIM_write_phys_memory( obj, start, value, size); // Maurice
-// // int isexcept = SIM_get_pending_exception(); // Maurice
-// if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
-// // sim_exception_t except_code = SIM_clear_exception(); // Maurice
-// WARN_MSG( "SIMICS_write_physical_memory_buffer: raised exception." );
-// // WARN_MSG( SIM_last_error() ); // Maurice
-// WARN_MSG( addr );
-// }
-//
-// len -= size;
-// start += size;
-// } while(len != 0);
- assert(0);
-}
-
-bool SIMICS_check_memory_value(int procID, physical_address_t addr,
- char* buffer, int len) {
- char buf[len];
- SIMICS_read_physical_memory_buffer(procID, addr, buf, len);
- assert(0);
- return (memcmp(buffer, buf, len) == 0)? true:false;
-}
-
-physical_address_t SIMICS_translate_address( int procID, Address address ) {
-// SIM_clear_exception(); // Maurice
-// physical_address_t physical_addr = SIM_logical_to_physical(SIM_proc_no_2_ptr(procID), Sim_DI_Instruction, address.getAddress() ); // Maurice
-// int isexcept = SIM_get_pending_exception(); // Maurice
-// if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
-// sim_exception_t except_code = SIM_clear_exception(); // Maurice
-// /*
-// WARN_MSG( "SIMICS_translate_address: raised exception." );
-// WARN_MSG( procID );
-// WARN_MSG( address );
-// // WARN_MSG( SIM_last_error() ); // Maurice
-// */
-// return 0;
-// }
-//
-// #ifdef CONTIGUOUS_ADDRESSES
-// if(g_p_ca_translator != NULL) {
-// physical_addr = g_p_ca_translator->TranslateSimicsToRuby( physical_addr );
-// }
-// #endif // #ifdef CONTIGUOUS_ADDRESSES
-//
-// return physical_addr;
- assert(0);
- return 0;
-}
-
-physical_address_t SIMICS_translate_data_address( int procID, Address address ) {
-// SIM_clear_exception(); // Maurice
-// physical_address_t physical_addr = SIM_logical_to_physical(SIM_proc_no_2_ptr(procID), Sim_DI_Data, address.getAddress() ); // Maurice
-// int isexcept = SIM_get_pending_exception(); // Maurice
-// if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
-// sim_exception_t except_code = SIM_clear_exception(); // Maurice
- /*
- WARN_MSG( "SIMICS_translate_data_address: raised exception." );
- WARN_MSG( procID );
- WARN_MSG( address );
-// WARN_MSG( SIM_last_error() ); // Maurice
- */
-// }
-// return physical_addr;
- assert(0);
- return 0;
-}
-
-#ifdef SPARC
-bool SIMICS_is_ldda(const memory_transaction_t *mem_trans) {
-// void *cpu = mem_trans->s.ini_ptr;
-// int proc= SIMICS_get_proc_no(cpu);
-// Address addr = SIMICS_get_program_counter(cpu);
-// physical_address_t phys_addr = SIMICS_translate_address( proc, addr );
-// uint32 instr= SIMICS_read_physical_memory( proc, phys_addr, 4 );
-//
-// // determine if this is a "ldda" instruction (non-exclusive atomic)
-// // ldda bit mask: 1100 0001 1111 1000 == 0xc1f80000
-// // ldda match : 1100 0000 1001 1000 == 0xc0980000
-// if ( (instr & 0xc1f80000) == 0xc0980000 ) {
-// // should exactly be ldda instructions
-// ASSERT(!strncmp(SIMICS_disassemble_physical(proc, phys_addr), "ldda", 4));
-// //cout << "SIMICS_is_ldda END" << endl;
-// return true;
-// }
-// return false;
- assert(0);
- return false;
-}
-#endif
-
-const char *SIMICS_disassemble_physical( int procID, physical_address_t pa ) {
-//#ifdef CONTIGUOUS_ADDRESSES
-// if(g_p_ca_translator != NULL) {
-// pa = g_p_ca_translator->TranslateRubyToSimics( pa );
-// }
-//#endif // #ifdef CONTIGUOUS_ADDRESSES
-// return SIM_disassemble( SIM_proc_no_2_ptr(procID), pa , /* physical */ 0)->string; // Maurice
- assert(0);
- return "There is no spoon";
-}
-
-Address SIMICS_get_program_counter(void *cpu) {
- assert(cpu != NULL);
-// return Address(SIM_get_program_counter((processor_t *) cpu)); // Maurice
- assert(0);
- return Address(0);
-}
-
-Address SIMICS_get_npc(int procID) {
-// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
-// return Address(SIM_read_register(cpu, SIM_get_register_number(cpu, "npc"))); // Maurice
- assert(0);
- return Address(0);
-}
-
-Address SIMICS_get_program_counter(int procID) {
-// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
-// assert(cpu != NULL);
-
-// Address addr = Address(SIM_get_program_counter(cpu)); // Maurice
- assert(0);
- return Address(0);
-}
-
-// /* NOTE: SIM_set_program_counter sets NPC to PC+4 */ // Maurice
-void SIMICS_set_program_counter(int procID, Address newPC) {
-// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
-// assert(cpu != NULL);
-
-// SIM_stacked_post(cpu, ruby_set_program_counter, (void*) newPC.getAddress()); // Maurice
- assert(0);
-}
-
-void SIMICS_set_pc(int procID, Address newPC) {
- // IMPORTANT: procID is the SIMICS simulated proc number (takes into account SMT)
-// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
-// assert(cpu != NULL);
-//
-// if(OpalInterface::isOpalLoaded() == false){
-// // SIM_set_program_counter(cpu, newPC.getAddress()); // Maurice
-// } else {
-// // explicitly change PC
-// ruby_set_pc( cpu, (void *) newPC.getAddress() );
-// }
-// // int isexcept = SIM_get_pending_exception(); // Maurice
-// if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
-// // sim_exception_t except_code = SIM_clear_exception(); // Maurice
-// WARN_MSG( "SIMICS_set_pc: raised exception." );
-// // WARN_MSG( SIM_last_error() ); // Maurice
-// ASSERT(0);
-// }
- assert(0);
-}
-
-void SIMICS_set_next_program_counter(int procID, Address newNPC) {
-// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
-// assert(cpu != NULL);
-
-// SIM_stacked_post(cpu, ruby_set_npc, (void*) newNPC.getAddress()); // Maurice
- assert(0);
-}
-
-void SIMICS_set_npc(int procID, Address newNPC) {
-// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
-// assert(cpu != NULL);
-//
-// if(OpalInterface::isOpalLoaded() == false){
-// // SIM_write_register(cpu, SIM_get_register_number(cpu, "npc"), newNPC.getAddress()); // Maurice
-// } else {
-// // explicitly change NPC
-// ruby_set_npc( cpu, (void *) newNPC.getAddress() );
-// }
-//
-// // int isexcept = SIM_get_pending_exception(); // Maurice
-// if ( !(isexcept == SimExc_No_Exception || isexcept == SimExc_Break) ) {
-// // sim_exception_t except_code = SIM_clear_exception(); // Maurice
-// WARN_MSG( "SIMICS_set_npc: raised exception " );
-// // WARN_MSG( SIM_last_error() ); // Maurice
-// ASSERT(0);
-// }
- assert(0);
-}
-
-void SIMICS_post_continue_execution(int procID){
-// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
-// assert(cpu != NULL);
-//
-// if(OpalInterface::isOpalLoaded() == false){
-// // SIM_stacked_post(cpu, ruby_continue_execution, (void *) NULL); // Maurice
-// } else{
-// ruby_continue_execution( cpu, (void *) NULL );
-// }
- assert(0);
-}
-
-void SIMICS_post_restart_transaction(int procID){
-// void *cpu = SIM_proc_no_2_ptr(procID); // Maurice
-// assert(cpu != NULL);
-//
-// if(OpalInterface::isOpalLoaded() == false){
-// // SIM_stacked_post(cpu, ruby_restart_transaction, (void *) NULL); // Maurice
-// } else{
-// ruby_restart_transaction( cpu, (void *) NULL );
-// }
- assert(0);
-}
-
-// return -1 when fail
-int SIMICS_get_proc_no(void *cpu) {
-// int proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
-// return proc_no;
- assert(0);
- return -1;
-}
-
-void SIMICS_disable_processor( int cpuNumber ) {
-// if(SIM_cpu_enabled(SIMICS_get_proc_ptr(cpuNumber))) { // Maurice
-// SIM_disable_processor(SIMICS_get_proc_ptr(cpuNumber)); // Maurice
-// } else {
-// WARN_MSG(cpuNumber);
-// WARN_MSG( "Tried to disable a 'disabled' processor");
-// ASSERT(0);
-// }
- assert(0);
-}
-
-void SIMICS_post_disable_processor( int cpuNumber ) {
-// SIM_stacked_post(SIMICS_get_proc_ptr(cpuNumber), ruby_disable_processor, (void*) NULL); // Maurice
- assert(0);
-}
-
-void SIMICS_enable_processor( int cpuNumber ) {
-// if(!SIM_cpu_enabled(SIMICS_get_proc_ptr(cpuNumber))) { // Maurice
-// SIM_enable_processor(SIMICS_get_proc_ptr(cpuNumber)); // Maurice
-// } else {
-// WARN_MSG(cpuNumber);
-// WARN_MSG( "Tried to enable an 'enabled' processor");
-// }
- assert(0);
-}
-
-bool SIMICS_processor_enabled( int cpuNumber ) {
-// return SIM_cpu_enabled(SIMICS_get_proc_ptr(cpuNumber)); // Maurice
- assert(0);
- return false;
-}
-
-// return NULL when fail
-void* SIMICS_get_proc_ptr(int cpuNumber) {
-// return (void *) SIM_proc_no_2_ptr(cpuNumber); // Maurice
- assert(0);
- return NULL;
-}
-
-void SIMICS_print_version(ostream& out) {
-// const char* version = SIM_version(); // Maurice
-// if (version != NULL) {
-// out << "simics_version: " << SIM_version() << endl; // Maurice
-// }
- out << "Mwa ha ha this is not Simics!!";
-}
-
-// KM -- From Nikhil's SN code
-//these functions should be in interface.C ??
-
-uinteger_t SIMICS_read_control_register(int cpuNumber, int registerNumber)
-{
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// uinteger_t result = SIM_read_register(cpu, registerNumber); // Maurice
-// return result;
- assert(0);
- return 0;
-}
-
-uinteger_t SIMICS_read_window_register(int cpuNumber, int window, int registerNumber)
-{
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// uinteger_t result = SIM_read_register(cpu, registerNumber); // Maurice
-// return result;
- assert(0);
- return 0;
-}
-
-uinteger_t SIMICS_read_global_register(int cpuNumber, int globals, int registerNumber)
-{
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// uinteger_t result = SIM_read_register(cpu, registerNumber); // Maurice
-// return result;
- assert(0);
- return 0;
-}
-
-/**
- uint64 SIMICS_read_fp_register_x(int cpuNumber, int registerNumber)
- {
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// return SIM_read_fp_register_x(cpu, registerNumber); // Maurice
- }
-**/
-
-void SIMICS_write_control_register(int cpuNumber, int registerNumber, uinteger_t value)
-{
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// SIM_write_register(cpu, registerNumber, value); // Maurice
- assert(0);
-}
-
-void SIMICS_write_window_register(int cpuNumber, int window, int registerNumber, uinteger_t value)
-{
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// SIM_write_register(cpu, registerNumber, value); // Maurice
- assert(0);
-}
-
-void SIMICS_write_global_register(int cpuNumber, int globals, int registerNumber, uinteger_t value)
-{
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// SIM_write_register(cpu, registerNumber, value); // Maurice
- assert(0);
-}
-
-/***
- void SIMICS_write_fp_register_x(int cpuNumber, int registerNumber, uint64 value)
- {
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// SIM_write_fp_register_x(cpu, registerNumber, value); // Maurice
- }
-***/
-
-// KM -- Functions using new APIs (update from Nikhil's original)
-
-int SIMICS_get_register_number(int cpuNumber, const char * reg_name){
-// int result = SIM_get_register_number(SIM_proc_no_2_ptr(cpuNumber), reg_name); // Maurice
-// return result;
- assert(0);
- return 0;
-}
-
-const char * SIMICS_get_register_name(int cpuNumber, int reg_num){
-// const char * result = SIM_get_register_name(SIM_proc_no_2_ptr(cpuNumber), reg_num); // Maurice
-// return result;
- assert(0);
- return "Then we shall fight in the shade";
-}
-
-uinteger_t SIMICS_read_register(int cpuNumber, int registerNumber)
-{
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// uinteger_t result = SIM_read_register(cpu, registerNumber); // Maurice
-// return result;
- assert(0);
- return 0;
-}
-
-void SIMICS_write_register(int cpuNumber, int registerNumber, uinteger_t value)
-{
-// processor_t* cpu = SIM_proc_no_2_ptr(cpuNumber); // Maurice
-// SIM_write_register(cpu, registerNumber, value); // Maurice
- assert(0);
-}
-
-// This version is called whenever we are about to jump to the SW handler
-void ruby_set_pc(void *cpu, void *parameter){
-// physical_address_t paddr;
-// paddr = (physical_address_t) parameter;
-// // Simics' processor number
-// // int proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
-// int smt_proc_no = proc_no / RubyConfig::numberofSMTThreads();
-// // SIM_set_program_counter(cpu, paddr); // Maurice
-// //cout << "ruby_set_pc setting cpu[ " << proc_no << " ] smt_cpu[ " << smt_proc_no << " ] PC[ " << hex << paddr << " ]" << dec << endl;
-// // physical_address_t newpc = SIM_get_program_counter(cpu); // Maurice
-// // int pc_reg = SIM_get_register_number(cpu, "pc"); // Maurice
-// // int npc_reg = SIM_get_register_number( cpu, "npc"); // Maurice
-// // uinteger_t pc = SIM_read_register(cpu, pc_reg); // Maurice
-// // uinteger_t npc = SIM_read_register(cpu, npc_reg); // Maurice
-// //cout << "NEW PC[ 0x" << hex << newpc << " ]" << " PC REG[ 0x" << pc << " ] NPC REG[ 0x" << npc << " ]" << dec << endl;
-//
-// if(XACT_MEMORY){
-// if( !OpalInterface::isOpalLoaded() ){
-// // using SimicsDriver
-// ASSERT( proc_no == smt_proc_no );
-// SimicsDriver *simics_intf = dynamic_cast<SimicsDriver*>(g_system_ptr->getDriver());
-// ASSERT( simics_intf );
-// simics_intf->notifyTrapStart( proc_no, Address(paddr), 0 /*dummy threadID*/, 0 /* Simics uses 1 thread */ );
-// }
-// else{
-// // notify Opal about changing pc to SW handler
-// //cout << "informing Opal via notifyTrapStart proc = " << proc_no << endl;
-// //g_system_ptr->getSequencer(smt_proc_no)->notifyTrapStart( proc_no, Address(paddr) );
-// }
-//
-// if (XACT_DEBUG && XACT_DEBUG_LEVEL > 1){
-// cout << g_eventQueue_ptr->getTime() << " " << proc_no
-// << " ruby_set_pc PC: " << hex
-// // << SIM_get_program_counter(cpu) << // Maurice
-// // " NPC is: " << hex << SIM_read_register(cpu, 33) << " pc_val: " << paddr << dec << endl; // Maurice
-// }
-// }
- assert(0);
-}
-
-// This version is called whenever we are about to return from SW handler
-void ruby_set_program_counter(void *cpu, void *parameter){
-// physical_address_t paddr;
-// paddr = (physical_address_t) parameter;
-// // Simics' processor number
-//// int proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
-// // SMT proc number
-// int smt_proc_no = proc_no / RubyConfig::numberofSMTThreads();
-//
-//// // SIM_set_program_counter() also sets the NPC to PC+4. // Maurice
-// // Need to ensure that NPC doesn't change especially for PCs in the branch delay slot
-//// uinteger_t npc_val = SIM_read_register(cpu, SIM_get_register_number(cpu, "npc")); // Maurice
-//// SIM_set_program_counter(cpu, paddr); // Maurice
-//// SIM_write_register(cpu, SIM_get_register_number(cpu, "npc"), npc_val); // Maurice
-//
-// //LUKE - notify Opal of PC change (ie end of all register updates and abort complete)
-// // I moved the register checkpoint restoration to here also, to jointly update the PC and the registers at the same time
-// if(XACT_MEMORY){
-// if( !OpalInterface::isOpalLoaded() ){
-// //using SimicsDriver
-// //we should only be running with 1 thread with Simics
-// ASSERT( proc_no == smt_proc_no );
-// SimicsDriver *simics_intf = dynamic_cast<SimicsDriver*>(g_system_ptr->getDriver());
-// ASSERT( simics_intf );
-// simics_intf->notifyTrapComplete(proc_no, Address( paddr ), 0 /* Simics uses 1 thread */ );
-// }
-// else{
-// //using OpalInterface
-// // g_system_ptr->getSequencer(smt_proc_no)->notifyTrapComplete( proc_no, Address(paddr) );
-// }
-// }
-// if (XACT_DEBUG && XACT_DEBUG_LEVEL > 1){
-// cout << g_eventQueue_ptr->getTime() << " " << proc_no
-// << " ruby_set_program_counter PC: " << hex
-//// << SIM_get_program_counter(cpu) << // Maurice
-//// " NPC is: " << hex << SIM_read_register(cpu, 33) << " pc_val: " << paddr << " npc_val: " << npc_val << dec << endl; // Maurice
-// }
- assert(0);
-}
-
-void ruby_set_npc(void *cpu, void *parameter){
-// physical_address_t paddr;
-// paddr = (physical_address_t) parameter;
-// // int proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
-// // SMT proc number
-// int smt_proc_no = proc_no / RubyConfig::numberofSMTThreads();
-//
-// // SIM_write_register(cpu, SIM_get_register_number(cpu, "npc"), paddr); // Maurice
-// if (XACT_DEBUG && XACT_DEBUG_LEVEL > 1){
-// cout << g_eventQueue_ptr->getTime() << " " << proc_no
-// << " ruby_set_npc val: " << hex << paddr << " PC: " << hex
-// // << SIM_get_program_counter(cpu) << // Maurice
-// // " NPC is: " << hex << SIM_read_register(cpu, 33) << dec << endl; // Maurice
-// }
- assert(0);
-}
-
-void ruby_continue_execution(void *cpu, void *parameter){
-// int logical_proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
-// int thread = logical_proc_no % RubyConfig::numberofSMTThreads();
-// int proc_no = logical_proc_no / RubyConfig::numberofSMTThreads();
-// g_system_ptr->getTransactionInterfaceManager(proc_no)->continueExecutionCallback(thread);
- assert(0);
-}
-
-void ruby_restart_transaction(void *cpu, void *parameter){
-// int logical_proc_no = SIM_get_proc_no((processor_t *) cpu); // Maurice
-// int thread = logical_proc_no % RubyConfig::numberofSMTThreads();
-// int proc_no = logical_proc_no / RubyConfig::numberofSMTThreads();
-// g_system_ptr->getTransactionInterfaceManager(proc_no)->restartTransactionCallback(thread);
- assert(0);
-}
-
-void ruby_stall_proc(void *cpu, void *parameter){
-// int logical_proc_no = SIM_get_proc_no((processor_t*)cpu); // Maurice
-// int cycles = (uint64)parameter;
-
-// SIMICS_stall_proc(logical_proc_no, cycles);
- assert(0);
-}
-
-void ruby_disable_processor(void *cpu, void *parameter){
-// int logical_proc_no = SIM_get_proc_no((processor_t*)cpu); // Maurice
-// SIMICS_disable_processor(logical_proc_no);
- assert(0);
-}
-
diff --git a/src/mem/ruby/simics/interface.hh b/src/mem/ruby/simics/interface.hh
deleted file mode 100644
index f8d9375d7..000000000
--- a/src/mem/ruby/simics/interface.hh
+++ /dev/null
@@ -1,152 +0,0 @@
-
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: interface.h 1.33 05/01/19 13:12:32-06:00 mikem@maya.cs.wisc.edu $
- *
- * Description:
- *
- */
-
-#ifndef INTERFACE_H
-#define INTERFACE_H
-
-#include "Global.hh"
-#include "mf_api.hh"
-#include "Address.hh"
-
-// // Simics includes
-// extern "C" {
-// #include "simics/api.hh"
-// }
-
-typedef void memory_transaction_t;
-
-// simics memory access
-integer_t SIMICS_read_physical_memory(int procID, physical_address_t address,
- int len );
-void SIMICS_read_physical_memory_buffer(int procID, physical_address_t addr,
- char* buffer, int len );
-void SIMICS_write_physical_memory( int procID, physical_address_t address,
- integer_t value, int len );
-void SIMICS_write_physical_memory_buffer(int procID, physical_address_t addr,
- char* buffer, int len );
-bool SIMICS_check_memory_value(int procID, physical_address_t addr,
- char* buffer, int len);
-const char *SIMICS_disassemble_physical( int procID, physical_address_t pa );
-
-// simics VM translation, decoding, etc.
-physical_address_t SIMICS_translate_address( int procID, Address address );
-physical_address_t SIMICS_translate_data_address( int procID, Address address );
-#ifdef SPARC
-bool SIMICS_is_ldda(const memory_transaction_t *mem_trans);
-#endif
-
-// simics timing
-void SIMICS_unstall_proc(int cpuNumber);
-void SIMICS_unstall_proc(int cpuNumber, int cycles);
-void SIMICS_stall_proc(int cpuNumber, int cycles);
-void SIMICS_post_stall_proc(int cpuNumber, int cycles);
-void SIMICS_wakeup_ruby();
-
-// simics callbacks
-void SIMICS_remove_ruby_callback( void );
-void SIMICS_install_timing_model( void );
-void SIMICS_remove_timing_model( void );
-void SIMICS_install_exception_callback( void );
-void SIMICS_remove_exception_callback( void );
-#ifdef SPARC
-void SIMICS_install_asi_callback( void );
-void SIMICS_remove_asi_callback( void );
-#endif
-
-// simics PC, IC
-integer_t SIMICS_get_insn_count( int cpuNumber );
-integer_t SIMICS_get_cycle_count(int cpuNumber);
-Address SIMICS_get_program_counter( void *cpu );
-Address SIMICS_get_program_counter( int procID );
-Address SIMICS_get_npc(int procID);
-void SIMICS_set_program_counter( int procID, Address newPC );
-void SIMICS_set_next_program_counter( int procID, Address newPC );
-void SIMICS_set_pc( int procID, Address newPC );
-void SIMICS_set_npc( int procID, Address newNPC );
-
-void SIMICS_post_continue_execution(int procID);
-void SIMICS_post_restart_transaction(int procID);
-
-// simics processor number
-int SIMICS_number_processors( void );
-void * SIMICS_current_processor( void );
-int SIMICS_current_processor_number( void );
-int SIMICS_get_proc_no( void *cpu );
-void* SIMICS_get_proc_ptr( int cpuNumber );
-
-// simics version
-void SIMICS_print_version(ostream& out);
-
-// opal
-mf_opal_api_t *SIMICS_get_opal_interface( void );
-
-// STC related, should not be used anymore!
-void SIMICS_flush_STC(int cpuNumber);
-void SIMICS_invalidate_from_STC(const Address& address, int cpuNumber);
-void SIMICS_downgrade_from_STC(const Address& address, int cpuNumber);
-
-// KM -- from Nikhil's SN code
-uinteger_t SIMICS_read_control_register(int cpuNumber, int registerNumber);
-uinteger_t SIMICS_read_window_register(int cpuNumber, int window, int registerNumber);
-uinteger_t SIMICS_read_global_register(int cpuNumber, int globals, int registerNumber);
-//uint64 SIMICS_read_fp_register_x(int cpuNumber, int registerNumber);
-
-// KM -- new version based on reg names
-int SIMICS_get_register_number(int cpuNumber, const char * reg_name);
-const char * SIMICS_get_register_name(int cpuNumber, int reg_num);
-uinteger_t SIMICS_read_register(int cpuNumber, int registerNumber);
-void SIMICS_write_register(int cpuNumber, int registerNumber, uinteger_t value);
-
-void SIMICS_write_control_register(int cpuNumber, int registerNumber, uinteger_t value);
-void SIMICS_write_window_register(int cpuNumber, int window, int registerNumber, uinteger_t value);
-void SIMICS_write_global_register(int cpuNumber, int globals, int registerNumber, uinteger_t value);
-void SIMICS_write_fp_register_x(int cpuNumber, int registerNumber, uint64 value);
-void SIMICS_enable_processor(int cpuNumber);
-void SIMICS_disable_processor(int cpuNumber);
-void SIMICS_post_disable_processor(int cpuNumber);
-bool SIMICS_processor_enabled(int cpuNumber);
-
-void ruby_abort_transaction(void *cpu, void *parameter);
-void ruby_set_program_counter(void *cpu, void *parameter);
-void ruby_set_pc(void *cpu, void *parameter);
-void ruby_set_npc(void *cpu, void *parameter);
-void ruby_continue_execution(void *cpu, void *parameter);
-void ruby_restart_transaction(void *cpu, void *parameter);
-void ruby_stall_proc(void *cpu, void *parameter);
-void ruby_disable_processor(void *cpu, void *parameter);
-
-#endif //INTERFACE_H
-
diff --git a/src/mem/ruby/simics/simics_api_dummy.c b/src/mem/ruby/simics/simics_api_dummy.c
deleted file mode 100644
index e444b783c..000000000
--- a/src/mem/ruby/simics/simics_api_dummy.c
+++ /dev/null
@@ -1,105 +0,0 @@
-#include <assert.h>
-
-extern "C" {
-
-typedef int generic_transaction_t;
-typedef int generic_transaction;
-typedef int la_t;
-typedef int integer_t;
-typedef int uint64;
-typedef int attr_value_t;
-typedef int data_or_instr_t;
-typedef int sim_exception_t;
-typedef int processor_t;
-typedef int conf_object_t;
-typedef int conf_object;
-typedef int physical_address_t;
-typedef int logical_address_t;
-typedef int read_or_write_t;
-typedef int interface_t;
-typedef int set_error_t;
-typedef int ireg_t;
-typedef int pc_step_t;
-typedef int event_handler_t;
-typedef int lang_void;
-typedef int cycles_t;
-typedef int sync_t;
-typedef int FILE;
-typedef int va_list;
-typedef int log_object;
-typedef int hap_handle_t;
-typedef int str_hap_func_t;
-typedef int hap_type_t;
-typedef int cb_func_em_t;
-typedef int sync_t;
-
-///////////////////////////////////////////////////////////////////////////////
-
-void SIM_number_processors() { assert(0); return; };
-void SIM_c_set_mem_op_value_buf(generic_transaction_t *mem_op, char *buf) { assert(0); return; };
-void SIM_c_get_mem_op_value_buf(generic_transaction_t *mem_op, char *buf) { assert(0); return; };
-sim_exception_t SIM_clear_exception(void) { assert(0); return 0; };
-processor_t *SIM_conf_object_to_processor(conf_object_t* obj) { assert(0); return 0; };
-processor_t *SIM_current_processor(void) { assert(0); return 0; };
-const char *SIM_disassemble(processor_t *cpu_ptr, physical_address_t pa, int type) { assert(0); return 0; };
-interface_t *SIM_get_interface(conf_object const *object, const char *interface_name) { assert(0); return 0; };
-conf_object_t *SIM_get_object(const char *name) { assert(0); return 0; };
-sim_exception_t SIM_get_pending_exception(void) { assert(0); return 0; };
-int SIM_get_proc_no(const processor_t *cpu_ptr) { assert(0); return 0; };
-la_t SIM_get_program_counter(processor_t *cpu) { assert(0); return 0; };
-const char *SIM_last_error(void) { assert(0); return 0; };
-physical_address_t SIM_logical_to_physical(conf_object *cpu_ptr, data_or_instr_t data_or_instr, logical_address_t address) { assert(0); return 0; };
-const char * SIM_get_exception_name( processor_t * p, int exc ) { assert(0); return 0;};
-processor_t *SIM_proc_no_2_ptr(int cpu_nr) { assert(0); return 0; };
-conf_object_t *SIM_processor_to_conf_object(processor_t* p) { assert(0); return 0; };
-ireg_t SIM_read_control_register(processor_t *cpu_ptr, int reg) { assert(0); return 0; };
-double SIM_read_fp_register_d(processor_t *cpu_ptr, int reg) { assert(0); return 0; };
-uint64 SIM_read_fp_register_x(processor_t *cpu_ptr, int reg) { assert(0); return 0; };
-ireg_t SIM_read_global_register(processor_t *cpu_ptr, int globals, int reg) { assert(0); return 0; };
-integer_t SIM_read_phys_memory(conf_object *cpu, physical_address_t address, int len) { assert(0); return 0; };
-ireg_t SIM_read_window_register(processor_t *cpu_ptr, int window, int reg) { assert(0); return 0; };
-set_error_t SIM_set_attribute(conf_object_t *object, char const *name, attr_value_t *value) { assert(0); return 0; };
-void SIM_free_attribute(attr_value_t *value) { assert(0); };
-void SIM_stall_cycle(conf_object_t *obj, cycles_t stall) { assert(0); return; };
-cycles_t SIM_stall_count(conf_object_t *obj) { assert(0); return 0; };
-void SIM_stall(conf_object_t *obj, cycles_t stall) { assert(0); return; };
-pc_step_t SIM_step_count(conf_object_t *p) { assert(0); return 0; };
-cycles_t SIM_cycle_count(conf_object_t *p) { assert(0); return 0; };
-cycles_t SIM_stalled_until(conf_object_t *p) { assert(0); return 0; };
-void SIM_time_clean(conf_object_t *obj, sync_t t, event_handler_t handler, lang_void * arg) { assert(0); return; };
-void SIM_time_post_cycle(conf_object_t * obj, cycles_t delta, sync_t sync, event_handler_t handler, lang_void * arg) { assert(0); return; };
-const char *SIM_version(void) { return 0; };
-void SIM_set_program_counter(conf_object_t *cpu, logical_address_t pc){assert(0);};
-void SIM_write_control_register(processor_t *cpu_ptr, int reg, ireg_t value) { assert(0); return; };
-void SIM_write_fp_register_x(processor_t *cpu_ptr, int reg, uint64 value) { assert(0); return; };
-void SIM_write_global_register(processor_t *cpu_ptr, int globals, int reg, ireg_t value) { assert(0); return; };
-void SIM_write_window_register(processor_t *cpu_ptr, int window, int reg, ireg_t value) { assert(0); return; };
-void SIM_write_phys_memory(conf_object *cpu, physical_address_t address, integer_t value, int len) { assert(0); };
-int __sparc_v9_vtvfprintf(FILE *stream, const char *format, va_list va) { assert(0); return 0; };
-int __l32_p32_vtvfprintf(FILE *stream, const char *format, va_list va) { assert(0); return 0; };
-int __l32_p64_vtvfprintf(FILE *stream, const char *format, va_list va) { assert(0); return 0; };
-int __l64_p64_vtvfprintf(FILE *stream, const char *format, va_list va) { assert(0); return 0; };
-void __sparc_v9_vtdebug_log_vararg(int lvl, log_object *dev, char const *str, va_list va) { assert (0); return; };
-hap_handle_t SIM_hap_add_callback(const char *id, str_hap_func_t cb, lang_void *data) { assert(0); return 0; };
-hap_type_t SIM_hap_get_number(const char *id) { assert(0); return 0; };
-void SIM_hap_delete_callback_id(hap_type_t hap, hap_handle_t hdl) { assert (0); return; };
-int SIM_flush(void) { assert(0); return 0; };
-void SIM_write_register(processor_t *cpu_ptr, int registerNumber, integer_t value){ assert(0); };
-integer_t SIM_read_register(processor_t *cpu_ptr, int registerNumber) { assert(0); return 0; };
-int SIM_get_register_number(processor_t *cpu_ptr, const char * register_name){ assert(0); return 0; }
-const char * SIM_get_register_name(processor_t *cpu_ptr, int reg_num){ assert(0); return 0; }
-
-void SIM_break_simulation(const char * msg){ assert(0); }
-void SIM_printf(const char *format, va_list ap){ assert(0); }
-set_error_t ruby_session_set( void *id, conf_object_t *obj,
- attr_value_t *val, attr_value_t *idx ) { assert (0); return 0; };
-attr_value_t ruby_session_get( void *id, conf_object_t *obj,
- attr_value_t *idx ) { assert (0); return 0; };
- void SIM_stacked_post(conf_object_t *obj, event_handler_t, lang_void *arg){};
- pc_step_t SIM_step_next_occurrence( conf_object_t * obj, event_handler_t, lang_void * arg){ assert(0); return 0;};
-void SIM_enable_processor(conf_object_t *p) { assert(0); };
-void SIM_disable_processor(conf_object_t *p) { assert(0); };
-int SIM_cpu_enabled(conf_object_t *p) { assert(0); };
-
-attr_value_t SIM_get_attribute(conf_object_t *object, const char *name) { assert(0); };
-} // extern "C"
diff --git a/src/mem/ruby/system/DirectoryMemory.cc b/src/mem/ruby/system/DirectoryMemory.cc
index a1ec38cd2..3a37884db 100644
--- a/src/mem/ruby/system/DirectoryMemory.cc
+++ b/src/mem/ruby/system/DirectoryMemory.cc
@@ -42,7 +42,6 @@
#include "RubySlicc_Util.hh"
#include "RubyConfig.hh"
#include "Chip.hh"
-#include "interface.hh"
DirectoryMemory::DirectoryMemory(Chip* chip_ptr, int version)
{
@@ -122,14 +121,15 @@ Directory_Entry& DirectoryMemory::lookup(PhysAddress address)
// entry->getDirOwner() = true; // FIXME - This should not be hard-coded
// load the data from SimICS when first initalizing
- if (g_SIMICS) {
+ if (g_SIMULATING) {
if (DATA_BLOCK) {
- physical_address_t physAddr = address.getAddress();
+ //physical_address_t physAddr = address.getAddress();
for(int j=0; j < RubyConfig::dataBlockBytes(); j++) {
- int8 data_byte = (int8) SIMICS_read_physical_memory( m_chip_ptr->getID(),
- physAddr + j, 1 );
+ //int8 data_byte = (int8) SIMICS_read_physical_memory( m_chip_ptr->getID(),
+ // physAddr + j, 1 );
//printf("SimICS, byte %d: %lld\n", j, data_byte );
+ int8 data_byte = 0;
entry->getDataBlk().setByte(j, data_byte);
}
DEBUG_EXPR(NODE_COMP, MedPrio,entry->getDataBlk());
diff --git a/src/mem/ruby/system/PerfectCacheMemory.hh b/src/mem/ruby/system/PerfectCacheMemory.hh
index 590b265c4..39ad7a7b3 100644
--- a/src/mem/ruby/system/PerfectCacheMemory.hh
+++ b/src/mem/ruby/system/PerfectCacheMemory.hh
@@ -44,7 +44,6 @@
#include "AccessPermission.hh"
#include "RubyConfig.hh"
#include "Address.hh"
-#include "interface.hh"
#include "AbstractChip.hh"
template<class ENTRY>
diff --git a/src/mem/ruby/system/PersistentArbiter.hh b/src/mem/ruby/system/PersistentArbiter.hh
index 0654e3a9e..cf78a5920 100644
--- a/src/mem/ruby/system/PersistentArbiter.hh
+++ b/src/mem/ruby/system/PersistentArbiter.hh
@@ -46,7 +46,6 @@
#include "AccessType.hh"
#include "RubyConfig.hh"
#include "Address.hh"
-#include "interface.hh"
struct ArbiterEntry {
bool valid;
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 43b0df1b1..23efb9a0c 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -46,7 +46,6 @@
#include "SubBlock.hh"
#include "Protocol.hh"
#include "Map.hh"
-#include "interface.hh"
Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
m_chip_ptr = chip_ptr;
@@ -597,14 +596,6 @@ void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMac
if (miss_latency != 0) {
g_system_ptr->getProfiler()->missLatency(miss_latency, type, respondingMach);
-#if 0
- uinteger_t tick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick"));
- uinteger_t tick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick_cmpr"));
- uinteger_t stick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick"));
- uinteger_t stick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick_cmpr"));
- cout << "END PROC " << m_version << hex << " tick = " << tick << " tick_cmpr = " << tick_cmpr << " stick = " << stick << " stick_cmpr = " << stick_cmpr << " cycle = "<< g_eventQueue_ptr->getTime() << dec << endl;
-#endif
-
}
bool write =
@@ -624,7 +615,7 @@ void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMac
m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock);
}
- // Call into the Driver (Tester or Simics) and let it read and/or modify the sub-block
+ // Call into the Driver and let it read and/or modify the sub-block
g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID);
// If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
@@ -636,6 +627,130 @@ void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMac
}
}
+void Sequencer::readConflictCallback(const Address& address) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_readRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
+
+ readConflictCallback(address, GenericMachineType_NULL, thread);
+}
+
+void Sequencer::readConflictCallback(const Address& address, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
+
+ CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread );
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_LD) ||
+ (request.getType() == CacheRequestType_LD_XACT) ||
+ (request.getType() == CacheRequestType_IFETCH)
+ );
+
+ conflictCallback(request, respondingMach, thread);
+}
+
+void Sequencer::writeConflictCallback(const Address& address) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_writeRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
+
+ writeConflictCallback(address, GenericMachineType_NULL, thread);
+}
+
+void Sequencer::writeConflictCallback(const Address& address, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
+ CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread);
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC));
+
+ conflictCallback(request, respondingMach, thread);
+
+}
+
+void Sequencer::conflictCallback(const CacheMsg& request, GenericMachineType respondingMach, int thread) {
+ assert(XACT_MEMORY);
+ int size = request.getSize();
+ Address request_address = request.getAddress();
+ Address request_logical_address = request.getLogicalAddress();
+ Address request_line_address = line_address(request_address);
+ CacheRequestType type = request.getType();
+ int threadID = request.getThreadID();
+ Time issued_time = request.getTime();
+ int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
+
+ assert(g_eventQueue_ptr->getTime() >= issued_time);
+ Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
+
+ if (PROTOCOL_DEBUG_TRACE) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Conflict", "",
+ int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
+ }
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
+ if (request.getPrefetch() == PrefetchBit_Yes) {
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
+ g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
+ return; // Ignore the software prefetch, don't callback the driver
+ }
+
+ bool write =
+ (type == CacheRequestType_ST) ||
+ (type == CacheRequestType_ST_XACT) ||
+ (type == CacheRequestType_LDX_XACT) ||
+ (type == CacheRequestType_ATOMIC);
+
+ // Copy the correct bytes out of the cache line into the subblock
+ SubBlock subblock(request_address, request_logical_address, size);
+
+ // Call into the Driver
+ g_system_ptr->getDriver()->conflictCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID);
+
+ // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
+ // (This is only triggered for the non-TSO case)
+ if (write) {
+ assert(!TSO);
+ }
+}
+
void Sequencer::printDebug(){
//notify driver of debug
g_system_ptr->getDriver()->printDebug();
@@ -710,7 +825,7 @@ Sequencer::isReady(const CacheMsg& request) const
return true;
}
-// Called by Driver (Simics or Tester).
+// Called by Driver
void
Sequencer::makeRequest(const Packet* pkt, void* data)
{
@@ -786,14 +901,6 @@ bool Sequencer::doRequest(const CacheMsg& request) {
return true;
}
-#if 0
- uinteger_t tick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick"));
- uinteger_t tick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick_cmpr"));
- uinteger_t stick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick"));
- uinteger_t stick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick_cmpr"));
- cout << "START PROC " << m_version << hex << " tick = " << tick << " tick_cmpr = " << tick_cmpr << " stick = " << stick << " stick_cmpr = " << stick_cmpr << " cycle = "<< g_eventQueue_ptr->getTime() << dec << endl;;
-#endif
-
if (TSO && (request.getType() == CacheRequestType_LD || request.getType() == CacheRequestType_IFETCH)) {
// See if we can satisfy the load entirely from the store buffer
@@ -936,10 +1043,11 @@ void Sequencer::checkCoherence(const Address& addr) {
bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
unsigned int size_in_bytes ) {
- if(g_SIMICS){
+ if(g_SIMULATING){
for(unsigned int i=0; i < size_in_bytes; i++) {
- value[i] = SIMICS_read_physical_memory( m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version,
- addr.getAddress() + i, 1 );
+ std::cerr << __FILE__ << "(" << __LINE__ << "): Not implemented. " << std::endl;
+ value[i] = 0; // _read_physical_memory( m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version,
+ // addr.getAddress() + i, 1 );
}
return false; // Do nothing?
} else {
@@ -1006,7 +1114,7 @@ bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
unsigned int size_in_bytes) {
char test_buffer[64];
- if(g_SIMICS){
+ if(g_SIMULATING){
return false; // Do nothing?
} else {
// idea here is that coherent cache should find the
@@ -1088,3 +1196,4 @@ bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
return true;
}
}
+
diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc
index 74d4ef90f..a38809e94 100644
--- a/src/mem/ruby/system/System.cc
+++ b/src/mem/ruby/system/System.cc
@@ -43,7 +43,6 @@
#include "Tester.hh"
#include "SyntheticDriver.hh"
#include "DeterministicDriver.hh"
-#include "OpalInterface.hh"
#include "Chip.hh"
//#include "Tracer.hh"
#include "Protocol.hh"
@@ -202,15 +201,6 @@ void RubySystem::recordCacheContents(CacheRecorder& tr) const
}
}
-void System::opalLoadNotify()
-{
- if (OpalInterface::isOpalLoaded()) {
- // change the driver pointer to point to an opal driver
- delete m_driver_ptr;
- m_driver_ptr = new OpalInterface(this);
- }
-}
-
#ifdef CHECK_COHERENCE
// This code will check for cases if the given cache block is exclusive in
// one node and shared in another-- a coherence violation
diff --git a/src/mem/ruby/system/System.hh b/src/mem/ruby/system/System.hh
index b4aa257a3..f5b107240 100644
--- a/src/mem/ruby/system/System.hh
+++ b/src/mem/ruby/system/System.hh
@@ -91,9 +91,6 @@ public:
void printStats(ostream& out);
void clearStats() const;
- // called to notify the system when opal is loaded
- void opalLoadNotify();
-
void print(ostream& out) const;
#ifdef CHECK_COHERENCE
void checkGlobalCoherenceInvariant(const Address& addr);
diff --git a/src/mem/ruby/tester/DeterministicDriver.cc b/src/mem/ruby/tester/DeterministicDriver.cc
index 7317f1c5c..ff9d3da14 100644
--- a/src/mem/ruby/tester/DeterministicDriver.cc
+++ b/src/mem/ruby/tester/DeterministicDriver.cc
@@ -45,8 +45,8 @@
DeterministicDriver::DeterministicDriver(RubySystem* sys_ptr)
{
- if (g_SIMICS) {
- ERROR_MSG("g_SIMICS should not be defined.");
+ if (g_SIMULATING) {
+ ERROR_MSG("g_SIMULATING should not be defined.");
}
m_finish_time = 0;
diff --git a/src/mem/ruby/tester/RaceyDriver.cc b/src/mem/ruby/tester/RaceyDriver.cc
index 5b1e7e3f7..c5cdcaa4b 100644
--- a/src/mem/ruby/tester/RaceyDriver.cc
+++ b/src/mem/ruby/tester/RaceyDriver.cc
@@ -41,8 +41,8 @@
RaceyDriver::RaceyDriver()
{
- if (g_SIMICS) {
- ERROR_MSG("g_SIMICS should not be defined.");
+ if (g_SIMULATING) {
+ ERROR_MSG("g_SIMULATING should not be defined.");
}
// debug transition?
diff --git a/src/mem/ruby/tester/SyntheticDriver.cc b/src/mem/ruby/tester/SyntheticDriver.cc
index f9f7b91b2..f5986de15 100644
--- a/src/mem/ruby/tester/SyntheticDriver.cc
+++ b/src/mem/ruby/tester/SyntheticDriver.cc
@@ -47,8 +47,8 @@
SyntheticDriver::SyntheticDriver(RubySystem* sys_ptr)
{
cout << "SyntheticDriver::SyntheticDriver" << endl;
- if (g_SIMICS) {
- ERROR_MSG("g_SIMICS should not be defined.");
+ if (g_SIMULATING) {
+ ERROR_MSG("g_SIMULATING should not be defined.");
}
m_finish_time = 0;
diff --git a/src/mem/ruby/tester/Tester.cc b/src/mem/ruby/tester/Tester.cc
index 6c12ff471..60b625120 100644
--- a/src/mem/ruby/tester/Tester.cc
+++ b/src/mem/ruby/tester/Tester.cc
@@ -42,8 +42,8 @@
Tester::Tester(RubySystem* sys_ptr)
{
- if (g_SIMICS) {
- ERROR_MSG("g_SIMICS should not be defined.");
+ if (g_SIMULATING) {
+ ERROR_MSG("g_SIMULATING should not be defined.");
}
g_callback_counter = 0;
diff --git a/src/mem/ruby/tester/main.cc b/src/mem/ruby/tester/main.cc
index 9642fd5b7..10cc526be 100644
--- a/src/mem/ruby/tester/main.cc
+++ b/src/mem/ruby/tester/main.cc
@@ -43,8 +43,8 @@
int main(int argc, char *argv[])
{
- if (g_SIMICS) {
- ERROR_MSG("g_SIMICS should not be defined.");
+ if (g_SIMULATING) {
+ ERROR_MSG("g_SIMULATING should not be defined.");
}
tester_main(argc, argv);
diff --git a/src/mem/ruby/tester/test_framework.cc b/src/mem/ruby/tester/test_framework.cc
index 02320c871..e3a16920b 100644
--- a/src/mem/ruby/tester/test_framework.cc
+++ b/src/mem/ruby/tester/test_framework.cc
@@ -35,7 +35,6 @@
#include "protocol_name.hh"
#include "test_framework.hh"
#include "System.hh"
-#include "OpalInterface.hh"
#include "init.hh"
#include "Tester.hh"
#include "RubyEventQueue.hh"
@@ -157,11 +156,7 @@ void tester_destroy()
void tester_install_opal(mf_opal_api_t* opal_api, mf_ruby_api_t* ruby_api)
{
- // initialize our api interface
- OpalInterface::installInterface(ruby_api);
-
- // update the OpalInterface object to point to opal's interface
- ((OpalInterface *) g_system_ptr->getDriver())->setOpalInterface(opal_api);
+ std::cout << __FILE__ << "(" << __LINE__ << "): Not implemented" << std::endl;
}
void tester_record_cache()