summaryrefslogtreecommitdiff
path: root/src/mem/protocol
diff options
context:
space:
mode:
authorNilay Vaish ext:(%2C%20Malek%20Musleh%20%3Cmalek.musleh%40gmail.com%3E) <nilay@cs.wisc.edu>2013-05-21 11:31:31 -0500
committerNilay Vaish ext:(%2C%20Malek%20Musleh%20%3Cmalek.musleh%40gmail.com%3E) <nilay@cs.wisc.edu>2013-05-21 11:31:31 -0500
commit59a7abff29aa5a687e1693f003c20d7e2000c40a (patch)
treee1cf2cf822cf5b1002a6b72d8d613f65e0e1df8d /src/mem/protocol
parentd3c33d91b68e917478dba48c03a674b21ebd2747 (diff)
downloadgem5-59a7abff29aa5a687e1693f003c20d7e2000c40a.tar.xz
ruby: add stats to .sm files, remove cache profiler
This patch changes the way cache statistics are collected in ruby. As of now, there is separate entity called CacheProfiler which holds statistical variables for caches. The CacheMemory class defines different functions for accessing the CacheProfiler. These functions are then invoked in the .sm files. I find this approach opaque and prone to error. Secondly, we probably should not be paying the cost of a function call for recording statistics. Instead, this patch allows for accessing statistical variables in the .sm files. The collection would become transparent. Secondly, it would happen in place, so no function calls. The patch also removes the CacheProfiler class. --HG-- rename : src/mem/slicc/ast/InfixOperatorExprAST.py => src/mem/slicc/ast/OperatorExprAST.py
Diffstat (limited to 'src/mem/protocol')
-rw-r--r--src/mem/protocol/MESI_CMP_directory-L1cache.sm46
-rw-r--r--src/mem/protocol/MESI_CMP_directory-L2cache.sm28
-rw-r--r--src/mem/protocol/MI_example-cache.sm12
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm58
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L2cache.sm22
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm98
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L2cache.sm30
-rw-r--r--src/mem/protocol/MOESI_hammer-cache.sm155
-rw-r--r--src/mem/protocol/RubySlicc_Exports.sm33
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm10
10 files changed, 287 insertions, 205 deletions
diff --git a/src/mem/protocol/MESI_CMP_directory-L1cache.sm b/src/mem/protocol/MESI_CMP_directory-L1cache.sm
index f8d731ee1..12e3a618b 100644
--- a/src/mem/protocol/MESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MESI_CMP_directory-L1cache.sm
@@ -874,16 +874,20 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
wakeUpBuffers(address);
}
- action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- L1IcacheMemory.profileMiss(in_msg);
- }
+ action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1IcacheMemory.demand_misses;
}
- action(uu_profileDataMiss, "\ud", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- L1DcacheMemory.profileMiss(in_msg);
- }
+ action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
+ ++L1IcacheMemory.demand_hits;
+ }
+
+ action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1DcacheMemory.demand_misses;
+ }
+
+ action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
+ ++L1DcacheMemory.demand_hits;
}
action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
@@ -1024,8 +1028,15 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
// Transitions from Shared
- transition(S, {Load,Ifetch}) {
+ transition({S,E,M}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S,E,M}, Ifetch) {
h_load_hit;
+ uu_profileInstHit;
k_popMandatoryQueue;
}
@@ -1049,13 +1060,9 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Transitions from Exclusive
- transition(E, {Load, Ifetch}) {
- h_load_hit;
- k_popMandatoryQueue;
- }
-
- transition(E, Store, M) {
+ transition({E,M}, Store, M) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
@@ -1087,15 +1094,6 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
// Transitions from Modified
- transition(M, {Load, Ifetch}) {
- h_load_hit;
- k_popMandatoryQueue;
- }
-
- transition(M, Store) {
- hh_store_hit;
- k_popMandatoryQueue;
- }
transition(M, L1_Replacement, M_I) {
forward_eviction_to_cpu;
diff --git a/src/mem/protocol/MESI_CMP_directory-L2cache.sm b/src/mem/protocol/MESI_CMP_directory-L2cache.sm
index 122faaaf1..dda0d0286 100644
--- a/src/mem/protocol/MESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MESI_CMP_directory-L2cache.sm
@@ -720,26 +720,12 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
}
}
- GenericRequestType convertToGenericType(CoherenceRequestType type) {
- if(type == CoherenceRequestType:GETS) {
- return GenericRequestType:GETS;
- } else if(type == CoherenceRequestType:GETX) {
- return GenericRequestType:GETX;
- } else if(type == CoherenceRequestType:GET_INSTR) {
- return GenericRequestType:GET_INSTR;
- } else if(type == CoherenceRequestType:UPGRADE) {
- return GenericRequestType:UPGRADE;
- } else {
- DPRINTF(RubySlicc, "%s\n", type);
- error("Invalid CoherenceRequestType\n");
- }
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++L2cacheMemory.demand_misses;
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
- in_msg.AccessMode, in_msg.Prefetch);
- }
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++L2cacheMemory.demand_hits;
}
action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
@@ -922,6 +908,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
ds_sendSharedDataToRequestor;
nn_addSharer;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
@@ -931,6 +918,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
// fw_sendFwdInvToSharers;
fwm_sendFwdInvToSharersMinusRequestor;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
@@ -938,6 +926,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
fwm_sendFwdInvToSharersMinusRequestor;
ts_sendInvAckToUpgrader;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
@@ -957,6 +946,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
transition(M, L1_GETX, MT_MB) {
d_sendDataToRequestor;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
@@ -964,12 +954,14 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
d_sendDataToRequestor;
nn_addSharer;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
transition(M, L1_GETS, MT_MB) {
dd_sendExclusiveDataToRequestor;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
index f0bd7b99e..a0a23c308 100644
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -335,10 +335,12 @@ machine(L1Cache, "MI Example L1 Cache")
profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
}
- action(p_profileMiss, "p", desc="Profile cache miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- cacheMemory.profileMiss(in_msg);
- }
+ action(p_profileMiss, "pi", desc="Profile cache miss") {
+ ++cacheMemory.demand_misses;
+ }
+
+ action(p_profileHit, "ph", desc="Profile cache miss") {
+ ++cacheMemory.demand_hits;
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
@@ -427,11 +429,13 @@ machine(L1Cache, "MI Example L1 Cache")
transition(M, Store) {
s_store_hit;
+ p_profileHit;
m_popMandatoryQueue;
}
transition(M, {Load, Ifetch}) {
r_load_hit;
+ p_profileHit;
m_popMandatoryQueue;
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
index 101b7abd6..341deba4a 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -859,10 +858,20 @@ machine(L1Cache, "Directory protocol")
}
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- // profile_miss(in_msg);
- }
+ action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1Icache.demand_misses;
+ }
+
+ action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
+ ++L1Icache.demand_hits;
+ }
+
+ action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1Dcache.demand_misses;
+ }
+
+ action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
+ ++L1Dcache.demand_hits;
}
action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
@@ -899,7 +908,7 @@ machine(L1Cache, "Directory protocol")
ii_allocateL1DCacheBlock;
i_allocateTBE;
a_issueGETS;
- // uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -907,7 +916,7 @@ machine(L1Cache, "Directory protocol")
jj_allocateL1ICacheBlock;
i_allocateTBE;
a_issueGETS;
- // uu_profileMiss;
+ uu_profileInstMiss;
k_popMandatoryQueue;
}
@@ -915,7 +924,7 @@ machine(L1Cache, "Directory protocol")
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueGETX;
- // uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -928,16 +937,23 @@ machine(L1Cache, "Directory protocol")
l_popForwardQueue;
}
- // Transitions from Shared
- transition({S, SM}, {Load, Ifetch}) {
+ transition({S, SM, O, OM, MM, MM_W, M, M_W}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
h_load_hit;
+ uu_profileInstHit;
k_popMandatoryQueue;
}
+ // Transitions from Shared
transition(S, Store, SM) {
i_allocateTBE;
b_issueGETX;
- // uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -966,15 +982,10 @@ machine(L1Cache, "Directory protocol")
}
// Transitions from Owned
- transition({O, OM}, {Load, Ifetch}) {
- h_load_hit;
- k_popMandatoryQueue;
- }
-
transition(O, Store, OM) {
i_allocateTBE;
b_issueGETX;
- // uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -1003,13 +1014,9 @@ machine(L1Cache, "Directory protocol")
}
// Transitions from MM
- transition({MM, MM_W}, {Load, Ifetch}) {
- h_load_hit;
- k_popMandatoryQueue;
- }
-
transition({MM, MM_W}, Store) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
@@ -1039,18 +1046,15 @@ machine(L1Cache, "Directory protocol")
}
// Transitions from M
- transition({M, M_W}, {Load, Ifetch}) {
- h_load_hit;
- k_popMandatoryQueue;
- }
-
transition(M, Store, MM) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(M_W, Store, MM_W) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
index 6c61d3eb6..53a7ee027 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
@@ -1475,11 +1475,12 @@ machine(L2Cache, "Token protocol")
}
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(L1requestNetwork_in, RequestMsg) {
- // AccessModeType not implemented
- // profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
- }
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++L2cache.demand_misses;
+ }
+
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++L2cache.demand_hits;
}
action(y_copyCacheStateToDir, "y", desc="Copy cache state to directory state") {
@@ -1909,7 +1910,7 @@ machine(L2Cache, "Token protocol")
y_copyCacheStateToDir;
r_setMRU;
rr_deallocateL2CacheBlock;
- uu_profileMiss;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -1922,6 +1923,7 @@ machine(L2Cache, "Token protocol")
transition(OLSX, L1_GETS, OLSXS) {
d_sendDataToL1GETS;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -2311,6 +2313,7 @@ machine(L2Cache, "Token protocol")
transition(SLS, L1_GETS, SLSS ) {
d_sendDataToL1GETS;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -2333,6 +2336,7 @@ machine(L2Cache, "Token protocol")
transition(OLS, L1_GETS, OLSS) {
d_sendDataToL1GETS;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -2361,10 +2365,11 @@ machine(L2Cache, "Token protocol")
i_allocateTBE;
// should count 0 of course
h_countLocalSharersExceptRequestor;
- d_sendDataToL1GETX
+ d_sendDataToL1GETX;
y_copyCacheStateToDir;
rr_deallocateL2CacheBlock;
s_deallocateTBE;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -2380,12 +2385,14 @@ machine(L2Cache, "Token protocol")
d_sendDataToL1GETX;
r_setMRU;
s_deallocateTBE;
+ uu_profileHit;
o_popL1RequestQueue;
}
transition(S, L1_GETS, SS) {
d_sendDataToL1GETS;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -2397,6 +2404,7 @@ machine(L2Cache, "Token protocol")
transition(O, L1_GETS, OO) {
d_sendDataToL1GETS;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
index 02737a4f6..f1931264e 100644
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -1,6 +1,5 @@
-
/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -665,7 +664,8 @@ machine(L1Cache, "Token protocol")
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
- // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ // The tag matches for the L1, so the L1 fetches the line.
+ // We know it can't be in the L2 due to exclusion.
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry, tbe);
} else {
@@ -695,7 +695,8 @@ machine(L1Cache, "Token protocol")
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
- // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ // The tag matches for the L1, so the L1 fetches the line.
+ // We know it can't be in the L2 due to exclusion.
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry, tbe);
} else {
@@ -1534,14 +1535,20 @@ machine(L1Cache, "Token protocol")
}
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- if (L1DcacheMemory.isTagPresent(address)) {
- L1DcacheMemory.profileMiss(in_msg);
- } else {
- L1IcacheMemory.profileMiss(in_msg);
- }
- }
+ action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1IcacheMemory.demand_misses;
+ }
+
+ action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
+ ++L1IcacheMemory.demand_hits;
+ }
+
+ action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1DcacheMemory.demand_misses;
+ }
+
+ action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
+ ++L1DcacheMemory.demand_hits;
}
action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
@@ -1594,7 +1601,7 @@ machine(L1Cache, "Token protocol")
ii_allocateL1DCacheBlock;
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -1602,7 +1609,7 @@ machine(L1Cache, "Token protocol")
pp_allocateL1ICacheBlock;
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileInstMiss;
k_popMandatoryQueue;
}
@@ -1610,7 +1617,7 @@ machine(L1Cache, "Token protocol")
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -1631,21 +1638,21 @@ machine(L1Cache, "Token protocol")
transition(I, Load, IS) {
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
transition(I, Ifetch, IS) {
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileInstMiss;
k_popMandatoryQueue;
}
transition(I, {Store, Atomic}, IM) {
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -1698,15 +1705,22 @@ machine(L1Cache, "Token protocol")
}
// Transitions from Shared
- transition({S, SM, S_L, SM_L}, {Load, Ifetch}) {
+ transition({S, SM, S_L, SM_L}, Load) {
h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, SM, S_L, SM_L}, Ifetch) {
+ h_load_hit;
+ uu_profileInstHit;
k_popMandatoryQueue;
}
transition(S, {Store, Atomic}, SM) {
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -1779,15 +1793,22 @@ machine(L1Cache, "Token protocol")
}
// Transitions from Owned
- transition({O, OM}, {Load, Ifetch}) {
+ transition({O, OM}, Ifetch) {
h_load_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({O, OM}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(O, {Store, Atomic}, OM) {
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -1862,23 +1883,33 @@ machine(L1Cache, "Token protocol")
}
// Transitions from Modified
- transition({MM, MM_W}, {Load, Ifetch}) {
+ transition({MM, MM_W}, Ifetch) {
h_load_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM, MM_W}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition({MM_W}, {Store, Atomic}) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(MM, Store) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(MM, Atomic, M) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
@@ -1927,28 +1958,39 @@ machine(L1Cache, "Token protocol")
}
// Transitions from Dirty Exclusive
- transition({M, M_W}, {Load, Ifetch}) {
+ transition({M, M_W}, Ifetch) {
+ h_load_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({M, M_W}, Load) {
h_load_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(M, Store, MM) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(M, Atomic) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(M_W, Store, MM_W) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(M_W, Atomic) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
@@ -2243,7 +2285,7 @@ machine(L1Cache, "Token protocol")
ii_allocateL1DCacheBlock;
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -2251,7 +2293,7 @@ machine(L1Cache, "Token protocol")
pp_allocateL1ICacheBlock;
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileInstMiss;
k_popMandatoryQueue;
}
@@ -2259,7 +2301,7 @@ machine(L1Cache, "Token protocol")
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
@@ -2269,7 +2311,7 @@ machine(L1Cache, "Token protocol")
transition(S_L, {Store, Atomic}, SM_L) {
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
index da8bcc6fa..fe953a82e 100644
--- a/src/mem/protocol/MOESI_CMP_token-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
@@ -296,17 +296,6 @@ machine(L2Cache, "Token protocol")
}
}
- GenericRequestType convertToGenericType(CoherenceRequestType type) {
- if(type == CoherenceRequestType:GETS) {
- return GenericRequestType:GETS;
- } else if(type == CoherenceRequestType:GETX) {
- return GenericRequestType:GETX;
- } else {
- DPRINTF(RubySlicc, "%s\n", type);
- error("invalid CoherenceRequestType");
- }
- }
-
// ** OUT_PORTS **
out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
@@ -976,14 +965,13 @@ machine(L2Cache, "Token protocol")
unset_cache_entry();
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(L1requestNetwork_in, RequestMsg) {
- L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
- in_msg.AccessMode,
- in_msg.Prefetch);
- }
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++L2cacheMemory.demand_misses;
}
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++L2cacheMemory.demand_hits;
+ }
action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
peek(responseNetwork_in, ResponseMsg) {
@@ -1257,6 +1245,7 @@ machine(L2Cache, "Token protocol")
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -1265,6 +1254,7 @@ machine(L2Cache, "Token protocol")
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -1351,6 +1341,7 @@ machine(L2Cache, "Token protocol")
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -1358,6 +1349,7 @@ machine(L2Cache, "Token protocol")
k_dataOwnerFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -1394,6 +1386,7 @@ machine(L2Cache, "Token protocol")
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -1401,6 +1394,7 @@ machine(L2Cache, "Token protocol")
k_dataAndAllTokensFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -1471,6 +1465,7 @@ machine(L2Cache, "Token protocol")
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
@@ -1478,6 +1473,7 @@ machine(L2Cache, "Token protocol")
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
diff --git a/src/mem/protocol/MOESI_hammer-cache.sm b/src/mem/protocol/MOESI_hammer-cache.sm
index bc3b700d3..3680294de 100644
--- a/src/mem/protocol/MOESI_hammer-cache.sm
+++ b/src/mem/protocol/MOESI_hammer-cache.sm
@@ -1231,17 +1231,28 @@ machine(L1Cache, "AMD Hammer-like protocol")
}
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- if (L1IcacheMemory.isTagPresent(address)) {
- L1IcacheMemory.profileMiss(in_msg);
- } else if (L1DcacheMemory.isTagPresent(address)) {
- L1DcacheMemory.profileMiss(in_msg);
- }
- if (L2cacheMemory.isTagPresent(address) == false) {
- L2cacheMemory.profileMiss(in_msg);
- }
- }
+ action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1DcacheMemory.demand_misses;
+ }
+
+ action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
+ ++L1DcacheMemory.demand_hits;
+ }
+
+ action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1IcacheMemory.demand_misses;
+ }
+
+ action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
+ ++L1IcacheMemory.demand_hits;
+ }
+
+ action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
+ ++L2cacheMemory.demand_misses;
+ }
+
+ action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
+ ++L2cacheMemory.demand_hits;
}
action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
@@ -1317,7 +1328,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
ii_allocateL1DCacheBlock;
nb_copyFromTBEToL1; // Not really needed for state I
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
@@ -1328,7 +1338,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
ii_allocateL1DCacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
@@ -1339,7 +1348,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
ii_allocateL1DCacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
@@ -1350,7 +1358,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
ii_allocateL1DCacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
@@ -1361,7 +1368,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
ii_allocateL1DCacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
@@ -1372,7 +1378,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
jj_allocateL1ICacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
@@ -1383,7 +1388,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
jj_allocateL1ICacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
@@ -1394,7 +1398,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
jj_allocateL1ICacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
@@ -1405,7 +1408,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
jj_allocateL1ICacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
@@ -1416,7 +1418,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
jj_allocateL1ICacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
@@ -1447,34 +1448,36 @@ machine(L1Cache, "AMD Hammer-like protocol")
}
// Transitions from Idle
- transition({I, IR}, Load, IS) {
+ transition({I,IR}, Load, IS) {
ii_allocateL1DCacheBlock;
i_allocateTBE;
a_issueGETS;
- uu_profileMiss;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
k_popMandatoryQueue;
}
- transition({I, IR}, Ifetch, IS) {
+ transition({I,IR}, Ifetch, IS) {
jj_allocateL1ICacheBlock;
i_allocateTBE;
a_issueGETS;
- uu_profileMiss;
+ uu_profileL1InstMiss;
+ uu_profileL2Miss;
k_popMandatoryQueue;
}
- transition({I, IR}, Store, IM) {
+ transition({I,IR}, Store, IM) {
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueGETX;
- uu_profileMiss;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
k_popMandatoryQueue;
}
transition({I, IR}, Flush_line, IM_F) {
it_allocateTBE;
bf_issueGETF;
- uu_profileMiss;
k_popMandatoryQueue;
}
@@ -1489,28 +1492,45 @@ machine(L1Cache, "AMD Hammer-like protocol")
}
// Transitions from Shared
- transition({S, SM, ISM}, {Load, Ifetch}) {
+ transition({S, SM, ISM}, Load) {
+ h_load_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, SM, ISM}, Ifetch) {
h_load_hit;
+ uu_profileL1InstHit;
k_popMandatoryQueue;
}
- transition(SR, {Load, Ifetch}, S) {
+ transition(SR, Load, S) {
h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
- transition({S, SR}, Store, SM) {
+ transition(SR, Ifetch, S) {
+ h_load_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition({S,SR}, Store, SM) {
i_allocateTBE;
b_issueGETX;
- uu_profileMiss;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
k_popMandatoryQueue;
}
transition({S, SR}, Flush_line, SM_F) {
i_allocateTBE;
bf_issueGETF;
- uu_profileMiss;
forward_eviction_to_cpu;
gg_deallocateL1CacheBlock;
k_popMandatoryQueue;
@@ -1534,29 +1554,47 @@ machine(L1Cache, "AMD Hammer-like protocol")
}
// Transitions from Owned
- transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
+ transition({O, OM, SS, MM_W, M_W}, {Load}) {
h_load_hit;
+ uu_profileL1DataHit;
k_popMandatoryQueue;
}
- transition(OR, {Load, Ifetch}, O) {
+ transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
h_load_hit;
+ uu_profileL1InstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(OR, Load, O) {
+ h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(OR, Ifetch, O) {
+ h_load_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
- transition({O, OR}, Store, OM) {
+ transition({O,OR}, Store, OM) {
i_allocateTBE;
b_issueGETX;
p_decrementNumberOfMessagesByOne;
- uu_profileMiss;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
k_popMandatoryQueue;
}
+
transition({O, OR}, Flush_line, OM_F) {
i_allocateTBE;
bf_issueGETF;
p_decrementNumberOfMessagesByOne;
- uu_profileMiss;
forward_eviction_to_cpu;
gg_deallocateL1CacheBlock;
k_popMandatoryQueue;
@@ -1587,24 +1625,44 @@ machine(L1Cache, "AMD Hammer-like protocol")
}
// Transitions from Modified
- transition({MM, M}, {Load, Ifetch}) {
+ transition({MM, M}, {Ifetch}) {
h_load_hit;
+ uu_profileL1InstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM, M}, {Load}) {
+ h_load_hit;
+ uu_profileL1DataHit;
k_popMandatoryQueue;
}
transition(MM, Store) {
hh_store_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MMR, Load, MM) {
+ h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
}
- transition(MMR, {Load, Ifetch}, MM) {
+ transition(MMR, Ifetch, MM) {
h_load_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
transition(MMR, Store, MM) {
hh_store_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
@@ -1662,17 +1720,30 @@ machine(L1Cache, "AMD Hammer-like protocol")
// Transitions from Dirty Exclusive
transition(M, Store, MM) {
hh_store_hit;
+ uu_profileL1DataHit;
k_popMandatoryQueue;
}
- transition(MR, {Load, Ifetch}, M) {
+ transition(MR, Load, M) {
+ h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(MR, Ifetch, M) {
h_load_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
transition(MR, Store, MM) {
hh_store_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
@@ -1947,6 +2018,7 @@ machine(L1Cache, "AMD Hammer-like protocol")
transition(MM_W, Store) {
hh_store_hit;
+ uu_profileL1DataHit;
k_popMandatoryQueue;
}
@@ -1972,6 +2044,7 @@ machine(L1Cache, "AMD Hammer-like protocol")
transition(M_W, Store, MM_W) {
hh_store_hit;
+ uu_profileL1DataHit;
k_popMandatoryQueue;
}
diff --git a/src/mem/protocol/RubySlicc_Exports.sm b/src/mem/protocol/RubySlicc_Exports.sm
index 036419095..015ae8cb3 100644
--- a/src/mem/protocol/RubySlicc_Exports.sm
+++ b/src/mem/protocol/RubySlicc_Exports.sm
@@ -144,39 +144,6 @@ enumeration(SequencerRequestType, desc="...", default="SequencerRequestType_NULL
NULL, desc="Invalid request type";
}
-enumeration(GenericRequestType, desc="...", default="GenericRequestType_NULL") {
- GETS, desc="gets request";
- GET_INSTR, desc="get instr request";
- GETX, desc="getx request";
- UPGRADE, desc="upgrade request";
- DOWNGRADE, desc="downgrade request";
- INV, desc="invalidate request";
- INV_S, desc="invalidate shared copy request";
- PUTS, desc="puts request";
- PUTO, desc="puto request";
- PUTX, desc="putx request";
- L2_PF, desc="L2 prefetch";
- LD, desc="Load";
- ST, desc="Store";
- ATOMIC, desc="Atomic Load/Store";
- IFETCH, desc="Instruction fetch";
- IO, desc="I/O";
- NACK, desc="Nack";
- REPLACEMENT, desc="Replacement";
- WB_ACK, desc="WriteBack ack";
- EXE_ACK, desc="Execlusive ack";
- COMMIT, desc="Commit version";
- LD_XACT, desc="Transactional Load";
- LDX_XACT, desc="Transactional Load-Intend-Modify";
- ST_XACT, desc="Transactional Store";
- BEGIN_XACT, desc="Begin Transaction";
- COMMIT_XACT, desc="Commit Transaction";
- ABORT_XACT, desc="Abort Transaction";
- DMA_READ, desc="DMA READ";
- DMA_WRITE, desc="DMA WRITE";
- NULL, desc="null request type";
-}
-
enumeration(CacheRequestType, desc="...", default="CacheRequestType_NULL") {
DataArrayRead, desc="Read access to the cache's data array";
DataArrayWrite, desc="Write access to the cache's data array";
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
index c94020792..acd86a8fe 100644
--- a/src/mem/protocol/RubySlicc_Types.sm
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -37,6 +37,7 @@
external_type(MessageBuffer, buffer="yes", inport="yes", outport="yes");
external_type(OutPort, primitive="yes");
+external_type(Scalar, primitive="yes");
structure(InPort, external = "yes", primitive="yes") {
bool isReady();
@@ -148,15 +149,12 @@ structure (CacheMemory, external = "yes") {
void deallocate(Address);
AbstractCacheEntry lookup(Address);
bool isTagPresent(Address);
- void profileMiss(RubyRequest);
-
- void profileGenericRequest(GenericRequestType,
- RubyAccessMode,
- PrefetchBit);
-
void setMRU(Address);
void recordRequestType(CacheRequestType);
bool checkResourceAvailable(CacheResourceType, Address);
+
+ Scalar demand_misses;
+ Scalar demand_hits;
}
structure (WireBuffer, inport="yes", outport="yes", external = "yes") {