summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mem/protocol/MESI_CMP_directory-L1cache.sm12
-rw-r--r--src/mem/protocol/MI_example-cache.sm6
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm10
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm12
-rw-r--r--src/mem/protocol/MOESI_hammer-cache.sm8
-rw-r--r--src/mem/protocol/Network_test-cache.sm4
-rw-r--r--src/mem/protocol/RubySlicc_Exports.sm11
-rw-r--r--src/mem/protocol/RubySlicc_Profiler.sm4
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm12
-rw-r--r--src/mem/ruby/profiler/AddressProfiler.cc2
-rw-r--r--src/mem/ruby/profiler/AddressProfiler.hh2
-rw-r--r--src/mem/ruby/profiler/Profiler.cc4
-rw-r--r--src/mem/ruby/profiler/Profiler.hh4
-rw-r--r--src/mem/ruby/recorder/TraceRecord.cc2
-rw-r--r--src/mem/ruby/slicc_interface/RubyRequest.cc41
-rw-r--r--src/mem/ruby/slicc_interface/RubyRequest.hh104
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh4
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_Util.hh2
-rw-r--r--src/mem/ruby/system/CacheMemory.cc2
-rw-r--r--src/mem/ruby/system/CacheMemory.hh4
-rw-r--r--src/mem/ruby/system/DMASequencer.cc6
-rw-r--r--src/mem/ruby/system/RubyPort.cc2
-rw-r--r--src/mem/ruby/system/Sequencer.cc116
23 files changed, 208 insertions, 166 deletions
diff --git a/src/mem/protocol/MESI_CMP_directory-L1cache.sm b/src/mem/protocol/MESI_CMP_directory-L1cache.sm
index 705b2831c..26f5b1ff6 100644
--- a/src/mem/protocol/MESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MESI_CMP_directory-L1cache.sm
@@ -267,9 +267,9 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
}
// Mandatory Queue betweens Node's CPU and it's L1 caches
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -338,7 +338,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
@@ -355,7 +355,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
}
action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GET_INSTR;
@@ -373,7 +373,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
action(b_issueGETX, "b", desc="Issue GETX") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
@@ -391,7 +391,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
}
action(c_issueUPGRADE, "c", desc="Issue GETX") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency= l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:UPGRADE;
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
index 7923ef65c..7adadbade 100644
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -181,9 +181,9 @@ machine(L1Cache, "MI Example L1 Cache")
}
// Mandatory Queue
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
Entry cache_entry := getCacheEntry(in_msg.LineAddress);
if (is_invalid(cache_entry) &&
@@ -281,7 +281,7 @@ machine(L1Cache, "MI Example L1 Cache")
}
action(p_profileMiss, "p", desc="Profile cache miss") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
cacheMemory.profileMiss(in_msg);
}
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
index 291621af9..50bb710cb 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -303,9 +303,9 @@ machine(L1Cache, "Directory protocol")
// Nothing from the unblock network
// Mandatory Queue betweens Node's CPU and it's L1 caches
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -380,7 +380,7 @@ machine(L1Cache, "Directory protocol")
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
@@ -396,7 +396,7 @@ machine(L1Cache, "Directory protocol")
}
action(b_issueGETX, "b", desc="Issue GETX") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
@@ -820,7 +820,7 @@ machine(L1Cache, "Directory protocol")
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
// profile_miss(in_msg);
}
}
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
index 8537029e7..d7344d779 100644
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -622,9 +622,9 @@ machine(L1Cache, "Token protocol")
}
// Mandatory Queue
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
TBE tbe := L1_TBEs[in_msg.LineAddress];
@@ -1310,7 +1310,7 @@ machine(L1Cache, "Token protocol")
L1_TBEs.allocate(address);
set_tbe(L1_TBEs[address]);
tbe.IssueCount := 0;
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
tbe.PC := in_msg.ProgramCounter;
tbe.AccessType := cache_request_type_to_access_type(in_msg.Type);
if (in_msg.Type == RubyRequestType:ATOMIC) {
@@ -1323,7 +1323,7 @@ machine(L1Cache, "Token protocol")
}
action(ta_traceStalledAddress, "ta", desc="Trace Stalled Address") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
}
}
@@ -1499,7 +1499,7 @@ machine(L1Cache, "Token protocol")
}
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.profileMiss(in_msg);
} else {
@@ -1516,7 +1516,7 @@ machine(L1Cache, "Token protocol")
}
action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
}
stall_and_wait(mandatoryQueue_in, address);
diff --git a/src/mem/protocol/MOESI_hammer-cache.sm b/src/mem/protocol/MOESI_hammer-cache.sm
index ea2a1d6e3..865acf275 100644
--- a/src/mem/protocol/MOESI_hammer-cache.sm
+++ b/src/mem/protocol/MOESI_hammer-cache.sm
@@ -352,9 +352,9 @@ machine(L1Cache, "AMD Hammer-like protocol")
// Nothing from the request network
// Mandatory Queue
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
TBE tbe := TBEs[in_msg.LineAddress];
@@ -695,7 +695,7 @@ machine(L1Cache, "AMD Hammer-like protocol")
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
cache_entry.DataBlk);
@@ -1022,7 +1022,7 @@ machine(L1Cache, "AMD Hammer-like protocol")
}
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
if (L1IcacheMemory.isTagPresent(address)) {
L1IcacheMemory.profileMiss(in_msg);
} else if (L1DcacheMemory.isTagPresent(address)) {
diff --git a/src/mem/protocol/Network_test-cache.sm b/src/mem/protocol/Network_test-cache.sm
index 603c1f5f9..814cd5c29 100644
--- a/src/mem/protocol/Network_test-cache.sm
+++ b/src/mem/protocol/Network_test-cache.sm
@@ -132,9 +132,9 @@ machine(L1Cache, "Network_test L1 Cache")
out_port(responseNetwork_out, RequestMsg, responseFromCache);
// Mandatory Queue
- in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
- peek(mandatoryQueue_in, CacheMsg) {
+ peek(mandatoryQueue_in, RubyRequest) {
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
getCacheEntry(in_msg.LineAddress),
diff --git a/src/mem/protocol/RubySlicc_Exports.sm b/src/mem/protocol/RubySlicc_Exports.sm
index 1f7a1dda2..0ef3df29b 100644
--- a/src/mem/protocol/RubySlicc_Exports.sm
+++ b/src/mem/protocol/RubySlicc_Exports.sm
@@ -214,17 +214,6 @@ enumeration(PrefetchBit, default="PrefetchBit_No", desc="...") {
}
// CacheMsg
-structure(CacheMsg, desc="...", interface="Message") {
- Address LineAddress, desc="Line address for this request";
- Address PhysicalAddress, desc="Physical address for this request";
- RubyRequestType Type, desc="Type of request (LD, ST, etc)";
- Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
- RubyAccessMode AccessMode, desc="user/supervisor access type";
- int Size, desc="size in bytes of access";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
-}
-
-// CacheMsg
structure(SequencerMsg, desc="...", interface="Message") {
Address LineAddress, desc="Line address for this request";
Address PhysicalAddress, desc="Physical address for this request";
diff --git a/src/mem/protocol/RubySlicc_Profiler.sm b/src/mem/protocol/RubySlicc_Profiler.sm
index ed6b10d8e..773bf0025 100644
--- a/src/mem/protocol/RubySlicc_Profiler.sm
+++ b/src/mem/protocol/RubySlicc_Profiler.sm
@@ -34,10 +34,10 @@ void profileCacheCLBsize(int size, int numStaleI);
void profileMemoryCLBsize(int size, int numStaleI);
// used by 2level exclusive cache protocols
-void profile_miss(CacheMsg msg);
+void profile_miss(RubyRequest msg);
// used by non-fast path protocols
-void profile_L1Cache_miss(CacheMsg msg, NodeID l1cacheID);
+void profile_L1Cache_miss(RubyRequest msg, NodeID l1cacheID);
// used by CMP protocols
void profile_request(std::string L1CacheStateStr, std::string L2CacheStateStr,
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
index d9c3077a2..118cbc2f0 100644
--- a/src/mem/protocol/RubySlicc_Types.sm
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -109,6 +109,16 @@ structure (Sequencer, external = "yes") {
void profileNack(Address, int, int, uint64);
}
+structure(RubyRequest, desc="...", interface="Message", external="yes") {
+ Address LineAddress, desc="Line address for this request";
+ Address PhysicalAddress, desc="Physical address for this request";
+ RubyRequestType Type, desc="Type of request (LD, ST, etc)";
+ Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
+ int Size, desc="size in bytes of access";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+}
+
external_type(AbstractEntry, primitive="yes");
structure (DirectoryMemory, external = "yes") {
@@ -126,7 +136,7 @@ structure (CacheMemory, external = "yes") {
void deallocate(Address);
AbstractCacheEntry lookup(Address);
bool isTagPresent(Address);
- void profileMiss(CacheMsg);
+ void profileMiss(RubyRequest);
void profileGenericRequest(GenericRequestType,
RubyAccessMode,
diff --git a/src/mem/ruby/profiler/AddressProfiler.cc b/src/mem/ruby/profiler/AddressProfiler.cc
index 722845c45..a9c9a9591 100644
--- a/src/mem/ruby/profiler/AddressProfiler.cc
+++ b/src/mem/ruby/profiler/AddressProfiler.cc
@@ -29,7 +29,7 @@
#include <vector>
#include "base/stl_helpers.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/profiler/AddressProfiler.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/System.hh"
diff --git a/src/mem/ruby/profiler/AddressProfiler.hh b/src/mem/ruby/profiler/AddressProfiler.hh
index 471feaaa5..e525a792f 100644
--- a/src/mem/ruby/profiler/AddressProfiler.hh
+++ b/src/mem/ruby/profiler/AddressProfiler.hh
@@ -33,7 +33,7 @@
#include "base/hashmap.hh"
#include "mem/protocol/AccessType.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Histogram.hh"
diff --git a/src/mem/ruby/profiler/Profiler.cc b/src/mem/ruby/profiler/Profiler.cc
index 8604d014f..ed7c25c9d 100644
--- a/src/mem/ruby/profiler/Profiler.cc
+++ b/src/mem/ruby/profiler/Profiler.cc
@@ -51,7 +51,7 @@
#include "base/stl_helpers.hh"
#include "base/str.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/network/Network.hh"
@@ -535,7 +535,7 @@ Profiler::clearStats()
}
void
-Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id)
+Profiler::addAddressTraceSample(const RubyRequest& msg, NodeID id)
{
if (msg.getType() != RubyRequestType_IFETCH) {
// Note: The following line should be commented out if you
diff --git a/src/mem/ruby/profiler/Profiler.hh b/src/mem/ruby/profiler/Profiler.hh
index 8e3e7f547..352ba453f 100644
--- a/src/mem/ruby/profiler/Profiler.hh
+++ b/src/mem/ruby/profiler/Profiler.hh
@@ -68,7 +68,7 @@
#include "params/RubyProfiler.hh"
#include "sim/sim_object.hh"
-class CacheMsg;
+class RubyRequest;
class AddressProfiler;
class Profiler : public SimObject, public Consumer
@@ -93,7 +93,7 @@ class Profiler : public SimObject, public Consumer
AddressProfiler* getAddressProfiler() { return m_address_profiler_ptr; }
AddressProfiler* getInstructionProfiler() { return m_inst_profiler_ptr; }
- void addAddressTraceSample(const CacheMsg& msg, NodeID id);
+ void addAddressTraceSample(const RubyRequest& msg, NodeID id);
void profileRequest(const std::string& requestStr);
void profileSharing(const Address& addr, AccessType type,
diff --git a/src/mem/ruby/recorder/TraceRecord.cc b/src/mem/ruby/recorder/TraceRecord.cc
index e8cff9459..aa54ee53c 100644
--- a/src/mem/ruby/recorder/TraceRecord.cc
+++ b/src/mem/ruby/recorder/TraceRecord.cc
@@ -26,7 +26,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/recorder/TraceRecord.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"
diff --git a/src/mem/ruby/slicc_interface/RubyRequest.cc b/src/mem/ruby/slicc_interface/RubyRequest.cc
index 2d8c94ed6..2aae61d7b 100644
--- a/src/mem/ruby/slicc_interface/RubyRequest.cc
+++ b/src/mem/ruby/slicc_interface/RubyRequest.cc
@@ -4,34 +4,17 @@
using namespace std;
-ostream&
-operator<<(ostream& out, const RubyRequest& obj)
+void
+RubyRequest::print(ostream& out) const
{
- out << hex << "0x" << obj.paddr << " data: 0x" << flush;
- for (int i = 0; i < obj.len; i++) {
- out << (int)obj.data[i];
- }
- out << dec << " type: " << RubyRequestType_to_string(obj.type) << endl;
- return out;
-}
-
-vector<string>
-tokenizeString(string str, string delims)
-{
- vector<string> tokens;
- char* pch;
- char* tmp;
- const char* c_delims = delims.c_str();
- tmp = new char[str.length()+1];
- strcpy(tmp, str.c_str());
- pch = strtok(tmp, c_delims);
- while (pch != NULL) {
- string tmp_str(pch);
- if (tmp_str == "null") tmp_str = "";
- tokens.push_back(tmp_str);
-
- pch = strtok(NULL, c_delims);
- }
- delete [] tmp;
- return tokens;
+ out << "[RubyRequest: ";
+ out << "LineAddress = " << m_LineAddress << " ";
+ out << "PhysicalAddress = " << m_PhysicalAddress << " ";
+ out << "Type = " << m_Type << " ";
+ out << "ProgramCounter = " << m_ProgramCounter << " ";
+ out << "AccessMode = " << m_AccessMode << " ";
+ out << "Size = " << m_Size << " ";
+ out << "Prefetch = " << m_Prefetch << " ";
+// out << "Time = " << getTime() << " ";
+ out << "]";
}
diff --git a/src/mem/ruby/slicc_interface/RubyRequest.hh b/src/mem/ruby/slicc_interface/RubyRequest.hh
index d7acfd578..06ca0de1c 100644
--- a/src/mem/ruby/slicc_interface/RubyRequest.hh
+++ b/src/mem/ruby/slicc_interface/RubyRequest.hh
@@ -40,40 +40,102 @@
typedef void* RubyPortHandle;
-class RubyRequest
+class RubyRequest : public Message
{
public:
- uint64_t paddr;
+ Address m_PhysicalAddress;
+ Address m_LineAddress;
+ RubyRequestType m_Type;
+ Address m_ProgramCounter;
+ RubyAccessMode m_AccessMode;
+ int m_Size;
+ PrefetchBit m_Prefetch;
uint8_t* data;
- int len;
- uint64_t pc;
- RubyRequestType type;
- RubyAccessMode access_mode;
PacketPtr pkt;
unsigned proc_id;
RubyRequest() {}
- RubyRequest(uint64_t _paddr,
- uint8_t* _data,
- int _len,
- uint64_t _pc,
- RubyRequestType _type,
- RubyAccessMode _access_mode,
- PacketPtr _pkt,
- unsigned _proc_id = 100)
- : paddr(_paddr),
+ RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc,
+ RubyRequestType _type, RubyAccessMode _access_mode,
+ PacketPtr _pkt, PrefetchBit _pb = PrefetchBit_No,
+ unsigned _proc_id = 100)
+ : m_PhysicalAddress(_paddr),
+ m_Type(_type),
+ m_ProgramCounter(_pc),
+ m_AccessMode(_access_mode),
+ m_Size(_len),
+ m_Prefetch(_pb),
data(_data),
- len(_len),
- pc(_pc),
- type(_type),
- access_mode(_access_mode),
pkt(_pkt),
proc_id(_proc_id)
- {}
+ {
+ m_LineAddress = m_PhysicalAddress;
+ m_LineAddress.makeLineAddress();
+ }
+
+ static RubyRequest*
+ create()
+ {
+ return new RubyRequest();
+ }
+
+ RubyRequest*
+ clone() const
+ {
+ return new RubyRequest(*this);
+ }
+
+ const Address&
+ getLineAddress() const
+ {
+ return m_LineAddress;
+ }
+
+ const Address&
+ getPhysicalAddress() const
+ {
+ return m_PhysicalAddress;
+ }
+
+ const RubyRequestType&
+ getType() const
+ {
+ return m_Type;
+ }
+
+ const Address&
+ getProgramCounter() const
+ {
+ return m_ProgramCounter;
+ }
+
+ const RubyAccessMode&
+ getAccessMode() const
+ {
+ return m_AccessMode;
+ }
+
+ const int&
+ getSize() const
+ {
+ return m_Size;
+ }
+
+ const PrefetchBit&
+ getPrefetch() const
+ {
+ return m_Prefetch;
+ }
void print(std::ostream& out) const;
};
-std::ostream& operator<<(std::ostream& out, const RubyRequest& obj);
+inline std::ostream&
+operator<<(std::ostream& out, const RubyRequest& obj)
+{
+ obj.print(out);
+ out << std::flush;
+ return out;
+}
#endif
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh b/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh
index cabba286a..f23e15c91 100644
--- a/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh
+++ b/src/mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh
@@ -56,8 +56,8 @@ void profile_request(const std::string& L1CacheStateStr,
const std::string& L2CacheStateStr,
const std::string& directoryStateStr,
const std::string& requestTypeStr);
-void profile_miss(const CacheMsg& msg, NodeID id);
-void profile_L1Cache_miss(const CacheMsg& msg, NodeID id);
+void profile_miss(const RubyRequest& msg, NodeID id);
+void profile_L1Cache_miss(const RubyRequest& msg, NodeID id);
void profile_token_retry(const Address& addr, AccessType type, int count);
void profile_filter_action(int action);
void profile_persistent_prediction(const Address& addr, AccessType type);
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
index bb23406b9..058a51bae 100644
--- a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
+++ b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
@@ -36,8 +36,6 @@
#include <cassert>
#include "mem/protocol/AccessType.hh"
-#include "mem/protocol/CacheMsg.hh"
-#include "mem/protocol/RubyRequestType.hh"
#include "mem/protocol/Directory_State.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/L1Cache_State.hh"
diff --git a/src/mem/ruby/system/CacheMemory.cc b/src/mem/ruby/system/CacheMemory.cc
index ea5054e4c..192fa8c87 100644
--- a/src/mem/ruby/system/CacheMemory.cc
+++ b/src/mem/ruby/system/CacheMemory.cc
@@ -344,7 +344,7 @@ CacheMemory::setMRU(const Address& address)
}
void
-CacheMemory::profileMiss(const CacheMsg& msg)
+CacheMemory::profileMiss(const RubyRequest& msg)
{
m_profiler_ptr->addCacheStatSample(msg.getType(),
msg.getAccessMode(),
diff --git a/src/mem/ruby/system/CacheMemory.hh b/src/mem/ruby/system/CacheMemory.hh
index 4e7acd4ec..197ac9f40 100644
--- a/src/mem/ruby/system/CacheMemory.hh
+++ b/src/mem/ruby/system/CacheMemory.hh
@@ -35,7 +35,7 @@
#include "base/hashmap.hh"
#include "mem/protocol/AccessPermission.hh"
-#include "mem/protocol/CacheMsg.hh"
+#include "mem/protocol/RubyRequest.hh"
#include "mem/protocol/RubyRequestType.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/MachineType.hh"
@@ -107,7 +107,7 @@ class CacheMemory : public SimObject
// Set this address to most recently used
void setMRU(const Address& address);
- void profileMiss(const CacheMsg & msg);
+ void profileMiss(const RubyRequest & msg);
void profileGenericRequest(GenericRequestType requestType,
RubyAccessMode accessType,
diff --git a/src/mem/ruby/system/DMASequencer.cc b/src/mem/ruby/system/DMASequencer.cc
index 772bc5142..2889c0c57 100644
--- a/src/mem/ruby/system/DMASequencer.cc
+++ b/src/mem/ruby/system/DMASequencer.cc
@@ -53,11 +53,11 @@ DMASequencer::makeRequest(const RubyRequest &request)
return RequestStatus_BufferFull;
}
- uint64_t paddr = request.paddr;
+ uint64_t paddr = request.m_PhysicalAddress.getAddress();
uint8_t* data = request.data;
- int len = request.len;
+ int len = request.m_Size;
bool write = false;
- switch(request.type) {
+ switch(request.m_Type) {
case RubyRequestType_LD:
write = false;
break;
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
index c79154566..92627740f 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -253,7 +253,7 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt)
pkt->getSize(), pc, type,
RubyAccessMode_Supervisor, pkt);
- assert(Address(ruby_request.paddr).getOffset() + ruby_request.len <=
+ assert(ruby_request.m_PhysicalAddress.getOffset() + ruby_request.m_Size <=
RubySystem::getBlockSizeBytes());
// Submit the ruby request
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index a5f1a06fa..7eb46e006 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -29,7 +29,6 @@
#include "base/str.hh"
#include "base/misc.hh"
#include "cpu/testers/rubytest/RubyTester.hh"
-#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
@@ -104,7 +103,7 @@ Sequencer::wakeup()
panic("Possible Deadlock detected. Aborting!\n"
"version: %d request.paddr: 0x%x m_readRequestTable: %d "
"current time: %u issue_time: %d difference: %d\n", m_version,
- request->ruby_request.paddr, m_readRequestTable.size(),
+ request->ruby_request.m_PhysicalAddress, m_readRequestTable.size(),
current_time, request->issue_time,
current_time - request->issue_time);
}
@@ -119,7 +118,7 @@ Sequencer::wakeup()
panic("Possible Deadlock detected. Aborting!\n"
"version: %d request.paddr: 0x%x m_writeRequestTable: %d "
"current time: %u issue_time: %d difference: %d\n", m_version,
- request->ruby_request.paddr, m_writeRequestTable.size(),
+ request->ruby_request.m_PhysicalAddress, m_writeRequestTable.size(),
current_time, request->issue_time,
current_time - request->issue_time);
}
@@ -227,15 +226,15 @@ Sequencer::insertRequest(SequencerRequest* request)
schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
}
- Address line_addr(request->ruby_request.paddr);
+ Address line_addr(request->ruby_request.m_PhysicalAddress);
line_addr.makeLineAddress();
- if ((request->ruby_request.type == RubyRequestType_ST) ||
- (request->ruby_request.type == RubyRequestType_RMW_Read) ||
- (request->ruby_request.type == RubyRequestType_RMW_Write) ||
- (request->ruby_request.type == RubyRequestType_Load_Linked) ||
- (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
- (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
- (request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
+ if ((request->ruby_request.m_Type == RubyRequestType_ST) ||
+ (request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
+ (request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
+ (request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
+ (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
+ (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
+ (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
pair<RequestTable::iterator, bool> r =
m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
bool success = r.second;
@@ -288,15 +287,15 @@ Sequencer::removeRequest(SequencerRequest* srequest)
m_writeRequestTable.size() + m_readRequestTable.size());
const RubyRequest & ruby_request = srequest->ruby_request;
- Address line_addr(ruby_request.paddr);
+ Address line_addr(ruby_request.m_PhysicalAddress);
line_addr.makeLineAddress();
- if ((ruby_request.type == RubyRequestType_ST) ||
- (ruby_request.type == RubyRequestType_RMW_Read) ||
- (ruby_request.type == RubyRequestType_RMW_Write) ||
- (ruby_request.type == RubyRequestType_Load_Linked) ||
- (ruby_request.type == RubyRequestType_Store_Conditional) ||
- (ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
- (ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
+ if ((ruby_request.m_Type == RubyRequestType_ST) ||
+ (ruby_request.m_Type == RubyRequestType_RMW_Read) ||
+ (ruby_request.m_Type == RubyRequestType_RMW_Write) ||
+ (ruby_request.m_Type == RubyRequestType_Load_Linked) ||
+ (ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
+ (ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
+ (ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
m_writeRequestTable.erase(line_addr);
} else {
m_readRequestTable.erase(line_addr);
@@ -314,7 +313,7 @@ Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
// longer locked.
//
bool success = true;
- if (request->ruby_request.type == RubyRequestType_Store_Conditional) {
+ if (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) {
if (!m_dataCache_ptr->isLocked(address, m_version)) {
//
// For failed SC requests, indicate the failure to the cpu by
@@ -333,7 +332,7 @@ Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
// Independent of success, all SC operations must clear the lock
//
m_dataCache_ptr->clearLocked(address);
- } else if (request->ruby_request.type == RubyRequestType_Load_Linked) {
+ } else if (request->ruby_request.m_Type == RubyRequestType_Load_Linked) {
//
// Note: To fully follow Alpha LLSC semantics, should the LL clear any
// previously locked cache lines?
@@ -380,13 +379,13 @@ Sequencer::writeCallback(const Address& address,
m_writeRequestTable.erase(i);
markRemoved();
- assert((request->ruby_request.type == RubyRequestType_ST) ||
- (request->ruby_request.type == RubyRequestType_RMW_Read) ||
- (request->ruby_request.type == RubyRequestType_RMW_Write) ||
- (request->ruby_request.type == RubyRequestType_Load_Linked) ||
- (request->ruby_request.type == RubyRequestType_Store_Conditional) ||
- (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
- (request->ruby_request.type == RubyRequestType_Locked_RMW_Write));
+ assert((request->ruby_request.m_Type == RubyRequestType_ST) ||
+ (request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
+ (request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
+ (request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
+ (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
+ (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
+ (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write));
//
// For Alpha, properly handle LL, SC, and write requests with respect to
@@ -398,9 +397,9 @@ Sequencer::writeCallback(const Address& address,
if(!m_usingNetworkTester)
success = handleLlsc(address, request);
- if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) {
+ if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) {
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
- } else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) {
+ } else if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) {
m_controller->unblock(address);
}
@@ -440,8 +439,8 @@ Sequencer::readCallback(const Address& address,
m_readRequestTable.erase(i);
markRemoved();
- assert((request->ruby_request.type == RubyRequestType_LD) ||
- (request->ruby_request.type == RubyRequestType_IFETCH));
+ assert((request->ruby_request.m_Type == RubyRequestType_LD) ||
+ (request->ruby_request.m_Type == RubyRequestType_IFETCH));
hitCallback(request, mach, data, true,
initialRequestTime, forwardRequestTime, firstResponseTime);
@@ -457,10 +456,10 @@ Sequencer::hitCallback(SequencerRequest* srequest,
Time firstResponseTime)
{
const RubyRequest & ruby_request = srequest->ruby_request;
- Address request_address(ruby_request.paddr);
- Address request_line_address(ruby_request.paddr);
+ Address request_address(ruby_request.m_PhysicalAddress);
+ Address request_line_address(ruby_request.m_PhysicalAddress);
request_line_address.makeLineAddress();
- RubyRequestType type = ruby_request.type;
+ RubyRequestType type = ruby_request.m_Type;
Time issued_time = srequest->issue_time;
// Set this cache entry to the most recently used
@@ -498,7 +497,7 @@ Sequencer::hitCallback(SequencerRequest* srequest,
DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n",
g_eventQueue_ptr->getTime(), m_version, "Seq",
success ? "Done" : "SC_Failed", "", "",
- Address(ruby_request.paddr), miss_latency);
+ ruby_request.m_PhysicalAddress, miss_latency);
}
#if 0
if (request.getPrefetch() == PrefetchBit_Yes) {
@@ -514,11 +513,11 @@ Sequencer::hitCallback(SequencerRequest* srequest,
(type == RubyRequestType_Locked_RMW_Read) ||
(type == RubyRequestType_Load_Linked)) {
memcpy(ruby_request.data,
- data.getData(request_address.getOffset(), ruby_request.len),
- ruby_request.len);
+ data.getData(request_address.getOffset(), ruby_request.m_Size),
+ ruby_request.m_Size);
} else {
data.setData(ruby_request.data, request_address.getOffset(),
- ruby_request.len);
+ ruby_request.m_Size);
}
} else {
DPRINTF(MemoryAccess,
@@ -548,21 +547,21 @@ RequestStatus
Sequencer::getRequestStatus(const RubyRequest& request)
{
bool is_outstanding_store =
- !!m_writeRequestTable.count(line_address(Address(request.paddr)));
+ !!m_writeRequestTable.count(line_address(request.m_PhysicalAddress));
bool is_outstanding_load =
- !!m_readRequestTable.count(line_address(Address(request.paddr)));
+ !!m_readRequestTable.count(line_address(request.m_PhysicalAddress));
if (is_outstanding_store) {
- if ((request.type == RubyRequestType_LD) ||
- (request.type == RubyRequestType_IFETCH) ||
- (request.type == RubyRequestType_RMW_Read)) {
+ if ((request.m_Type == RubyRequestType_LD) ||
+ (request.m_Type == RubyRequestType_IFETCH) ||
+ (request.m_Type == RubyRequestType_RMW_Read)) {
m_store_waiting_on_load_cycles++;
} else {
m_store_waiting_on_store_cycles++;
}
return RequestStatus_Aliased;
} else if (is_outstanding_load) {
- if ((request.type == RubyRequestType_ST) ||
- (request.type == RubyRequestType_RMW_Write)) {
+ if ((request.m_Type == RubyRequestType_ST) ||
+ (request.m_Type == RubyRequestType_RMW_Write)) {
m_load_waiting_on_store_cycles++;
} else {
m_load_waiting_on_load_cycles++;
@@ -586,7 +585,7 @@ Sequencer::empty() const
RequestStatus
Sequencer::makeRequest(const RubyRequest &request)
{
- assert(Address(request.paddr).getOffset() + request.len <=
+ assert(request.m_PhysicalAddress.getOffset() + request.m_Size <=
RubySystem::getBlockSizeBytes());
RequestStatus status = getRequestStatus(request);
if (status != RequestStatus_Ready)
@@ -610,11 +609,10 @@ Sequencer::makeRequest(const RubyRequest &request)
void
Sequencer::issueRequest(const RubyRequest& request)
{
- // TODO: get rid of CacheMsg, RubyRequestType, and
- // AccessModeTYpe, & have SLICC use RubyRequest and subtypes
- // natively
+ // TODO: Eliminate RubyRequest being copied again.
+
RubyRequestType ctype;
- switch(request.type) {
+ switch(request.m_Type) {
case RubyRequestType_IFETCH:
ctype = RubyRequestType_IFETCH;
break;
@@ -651,7 +649,7 @@ Sequencer::issueRequest(const RubyRequest& request)
}
RubyAccessMode amtype;
- switch(request.access_mode){
+ switch(request.m_AccessMode){
case RubyAccessMode_User:
amtype = RubyAccessMode_User;
break;
@@ -665,19 +663,21 @@ Sequencer::issueRequest(const RubyRequest& request)
assert(0);
}
- Address line_addr(request.paddr);
+ Address line_addr(request.m_PhysicalAddress);
line_addr.makeLineAddress();
- CacheMsg *msg = new CacheMsg(line_addr, Address(request.paddr), ctype,
- Address(request.pc), amtype, request.len, PrefetchBit_No,
- request.proc_id);
+ RubyRequest *msg = new RubyRequest(request.m_PhysicalAddress.getAddress(),
+ request.data, request.m_Size,
+ request.m_ProgramCounter.getAddress(),
+ ctype, amtype, request.pkt,
+ PrefetchBit_No, request.proc_id);
DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n",
g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "",
- Address(request.paddr), RubyRequestType_to_string(request.type));
+ request.m_PhysicalAddress, RubyRequestType_to_string(request.m_Type));
Time latency = 0; // initialzed to an null value
- if (request.type == RubyRequestType_IFETCH)
+ if (request.m_Type == RubyRequestType_IFETCH)
latency = m_instCache_ptr->getLatency();
else
latency = m_dataCache_ptr->getLatency();