summaryrefslogtreecommitdiff
path: root/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
diff options
context:
space:
mode:
authorNilay Vaish <nilay@cs.wisc.edu>2011-02-12 11:41:20 -0600
committerNilay Vaish <nilay@cs.wisc.edu>2011-02-12 11:41:20 -0600
commit0cede15d6c5213d83e4cd143014321b2ee7ec5eb (patch)
tree2bff094d29f493d61a63cba2fb9a04f409d9e120 /src/mem/protocol/MOESI_CMP_directory-L1cache.sm
parent2971b8401a4a76a774962900d9aed6e9eb4b2950 (diff)
downloadgem5-0cede15d6c5213d83e4cd143014321b2ee7ec5eb.tar.xz
Ruby: Reorder Cache Lookup in Protocol Files
The patch changes the order in which L1 dcache and icache are looked up when a request comes in. Earlier, if a request came in for instruction fetch, the dcache was looked up before the icache, to correctly handle self-modifying code. But, in the common case, dcache is going to report a miss and the subsequent icache lookup is going to report a hit. Given the invariant - caches under the same controller keep track of disjoint sets of cache blocks, we can move the icache lookup before the dcache lookup. In case of a hit in the icache, using our invariant, we know that the dcache would have reported a miss. In case of a miss in the icache, we know that icache would have missed even if the dcache was looked up before looking up the icache. Effectively, we are doing the same thing as before, though in the common case, we expect reduction in the number of lookups. This was empirically confirmed for MOESI hammer. The ratio lookups to access requests is now about 1.1 to 1.
Diffstat (limited to 'src/mem/protocol/MOESI_CMP_directory-L1cache.sm')
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm32
1 files changed, 16 insertions, 16 deletions
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
index 4082f23c9..7f0ab62a8 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -338,14 +338,6 @@ machine(L1Cache, "Directory protocol")
if (in_msg.Type == CacheRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- // Check to see if it is in the OTHER L1
- if (is_valid(L1Dcache_entry)) {
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
- TBEs[in_msg.LineAddress]);
- }
-
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
@@ -353,6 +345,14 @@ machine(L1Cache, "Directory protocol")
in_msg.LineAddress, L1Icache_entry,
TBEs[in_msg.LineAddress]);
} else {
+
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ // Check to see if it is in the OTHER L1
+ if (is_valid(L1Dcache_entry)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
+ TBEs[in_msg.LineAddress]);
+ }
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
@@ -369,14 +369,6 @@ machine(L1Cache, "Directory protocol")
} else {
// *** DATA ACCESS ***
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- // Check to see if it is in the OTHER L1
- if (is_valid(L1Icache_entry)) {
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
- }
-
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
// The tag matches for the L1, so the L1 ask the L2 for it
@@ -384,6 +376,14 @@ machine(L1Cache, "Directory protocol")
in_msg.LineAddress, L1Dcache_entry,
TBEs[in_msg.LineAddress]);
} else {
+
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ // Check to see if it is in the OTHER L1
+ if (is_valid(L1Icache_entry)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.LineAddress,
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
+ }
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),