summaryrefslogtreecommitdiff
path: root/src/mem/protocol
diff options
context:
space:
mode:
authorNilay Vaish <nilay@cs.wisc.edu>2015-09-16 11:59:56 -0500
committerNilay Vaish <nilay@cs.wisc.edu>2015-09-16 11:59:56 -0500
commitcd9e4458139658c4ce8f038e3a44bdecd17fa75d (patch)
treec7403c142a9bf36869f75016c683b9c7ef731399 /src/mem/protocol
parent78a1245b4115373856514eacf2264141e6cd4aca (diff)
downloadgem5-cd9e4458139658c4ce8f038e3a44bdecd17fa75d.tar.xz
ruby: message buffer, timer table: significant changes
This patch changes MessageBuffer and TimerTable, two structures used for buffering messages by components in ruby. These structures would no longer maintain pointers to clock objects. Functions in these structures have been changed to take as input current time in Tick. Similarly, these structures will not operate on Cycle valued latencies for different operations. The corresponding functions would need to be provided with these latencies by components invoking the relevant functions. These latencies should also be in Ticks. I felt the need for these changes while trying to speed up ruby. The ultimate aim is to eliminate Consumer class and replace it with an EventManager object in the MessageBuffer and TimerTable classes. This object would be used for scheduling events. The event itself would contain information on the object and function to be invoked. In hindsight, it seems I should have done this while I was moving away from use of a single global clock in the memory system. That change led to introduction of clock objects that replaced the global clock object. It never crossed my mind that having clock object pointers is not a good design. And now I really don't like the fact that we have separate consumer, receiver and sender pointers in message buffers.
Diffstat (limited to 'src/mem/protocol')
-rw-r--r--src/mem/protocol/MESI_Three_Level-L0cache.sm14
-rw-r--r--src/mem/protocol/MESI_Three_Level-L1cache.sm16
-rw-r--r--src/mem/protocol/MESI_Two_Level-L1cache.sm20
-rw-r--r--src/mem/protocol/MESI_Two_Level-L2cache.sm21
-rw-r--r--src/mem/protocol/MESI_Two_Level-dir.sm18
-rw-r--r--src/mem/protocol/MESI_Two_Level-dma.sm9
-rw-r--r--src/mem/protocol/MI_example-cache.sm16
-rw-r--r--src/mem/protocol/MI_example-dir.sm19
-rw-r--r--src/mem/protocol/MI_example-dma.sm11
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm33
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L2cache.sm24
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dir.sm16
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dma.sm13
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm77
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L2cache.sm17
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dir.sm43
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dma.sm10
-rw-r--r--src/mem/protocol/MOESI_hammer-cache.sm17
-rw-r--r--src/mem/protocol/MOESI_hammer-dir.sm25
-rw-r--r--src/mem/protocol/MOESI_hammer-dma.sm10
-rw-r--r--src/mem/protocol/Network_test-cache.sm5
-rw-r--r--src/mem/protocol/Network_test-dir.sm16
-rw-r--r--src/mem/protocol/RubySlicc_Defines.sm2
-rw-r--r--src/mem/protocol/RubySlicc_Exports.sm1
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm12
25 files changed, 265 insertions, 200 deletions
diff --git a/src/mem/protocol/MESI_Three_Level-L0cache.sm b/src/mem/protocol/MESI_Three_Level-L0cache.sm
index 7e8626dc9..3f22a4906 100644
--- a/src/mem/protocol/MESI_Three_Level-L0cache.sm
+++ b/src/mem/protocol/MESI_Three_Level-L0cache.sm
@@ -135,6 +135,8 @@ machine(L0Cache, "MESI Directory L0 Cache")
TBETable TBEs, template="<L0Cache_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
@@ -255,7 +257,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
// Messages for this L0 cache from the L1 cache
in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
- if (messgeBuffer_in.isReady()) {
+ if (messgeBuffer_in.isReady(clockEdge())) {
peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
assert(in_msg.Dest == machineID);
@@ -289,7 +291,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
// Mandatory Queue betweens Node's CPU and it's L0 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -513,17 +515,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popRequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, messgeBuffer_in.dequeue());
+ Tick delay := messgeBuffer_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(o_popIncomingResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, messgeBuffer_in.dequeue());
+ Tick delay := messgeBuffer_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
diff --git a/src/mem/protocol/MESI_Three_Level-L1cache.sm b/src/mem/protocol/MESI_Three_Level-L1cache.sm
index 6c8df8d75..0eb9a43b5 100644
--- a/src/mem/protocol/MESI_Three_Level-L1cache.sm
+++ b/src/mem/protocol/MESI_Three_Level-L1cache.sm
@@ -151,6 +151,8 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
@@ -266,7 +268,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Response From the L2 Cache to this L1 cache
in_port(responseNetwork_in, ResponseMsg, responseFromL2, rank = 3) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
@@ -303,7 +305,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Request to this L1 cache from the shared L2
in_port(requestNetwork_in, RequestMsg, requestFromL2, rank = 2) {
- if(requestNetwork_in.isReady()) {
+ if(requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -340,7 +342,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Requests to this L1 cache from the L0 cache.
in_port(messageBufferFromL0_in, CoherenceMsg, bufferFromL0, rank = 0) {
- if (messageBufferFromL0_in.isReady()) {
+ if (messageBufferFromL0_in.isReady(clockEdge())) {
peek(messageBufferFromL0_in, CoherenceMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -634,17 +636,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
action(k_popL0RequestQueue, "k", desc="Pop mandatory queue.") {
- messageBufferFromL0_in.dequeue();
+ messageBufferFromL0_in.dequeue(clockEdge());
}
action(l_popL2RequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, requestNetwork_in.dequeue());
+ Tick delay := requestNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(o_popL2ResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, responseNetwork_in.dequeue());
+ Tick delay := responseNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
diff --git a/src/mem/protocol/MESI_Two_Level-L1cache.sm b/src/mem/protocol/MESI_Two_Level-L1cache.sm
index b9be4663f..c40a47cae 100644
--- a/src/mem/protocol/MESI_Two_Level-L1cache.sm
+++ b/src/mem/protocol/MESI_Two_Level-L1cache.sm
@@ -156,6 +156,8 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
@@ -296,7 +298,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// searches of all entries in the queue, not just the head msg. All
// msgs in the structure can be invalidated if a demand miss matches.
in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
- if (optionalQueue_in.isReady()) {
+ if (optionalQueue_in.isReady(clockEdge())) {
peek(optionalQueue_in, RubyRequest) {
// Instruction Prefetch
if (in_msg.Type == RubyRequestType:IFETCH) {
@@ -373,7 +375,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Response L1 Network - response msg to this L1 cache
in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
- if (responseL1Network_in.isReady()) {
+ if (responseL1Network_in.isReady(clockEdge())) {
peek(responseL1Network_in, ResponseMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
@@ -413,7 +415,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Request InterChip network - request from this L1 cache to the shared L2
in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
- if(requestL1Network_in.isReady()) {
+ if(requestL1Network_in.isReady(clockEdge())) {
peek(requestL1Network_in, RequestMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
@@ -439,7 +441,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -866,17 +868,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popRequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, requestL1Network_in.dequeue());
+ Tick delay := requestL1Network_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(o_popIncomingResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, responseL1Network_in.dequeue());
+ Tick delay := responseL1Network_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
@@ -963,7 +967,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
- optionalQueue_in.dequeue();
+ optionalQueue_in.dequeue(clockEdge());
}
action(mp_markPrefetched, "mp", desc="Write data from response queue to cache") {
diff --git a/src/mem/protocol/MESI_Two_Level-L2cache.sm b/src/mem/protocol/MESI_Two_Level-L2cache.sm
index e4f719d9f..4134b7964 100644
--- a/src/mem/protocol/MESI_Two_Level-L2cache.sm
+++ b/src/mem/protocol/MESI_Two_Level-L2cache.sm
@@ -148,6 +148,10 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+ Cycles ticksToCycles(Tick t);
+
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
@@ -285,7 +289,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache, rank = 2) {
- if(L1unblockNetwork_in.isReady()) {
+ if(L1unblockNetwork_in.isReady(clockEdge())) {
peek(L1unblockNetwork_in, ResponseMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -307,7 +311,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
// Response L2 Network - response msg to this particular L2 bank
in_port(responseL2Network_in, ResponseMsg, responseToL2Cache, rank = 1) {
- if (responseL2Network_in.isReady()) {
+ if (responseL2Network_in.isReady(clockEdge())) {
peek(responseL2Network_in, ResponseMsg) {
// test wether it's from a local L1 or an off chip source
assert(in_msg.Destination.isElement(machineID));
@@ -348,7 +352,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
// L1 Request
in_port(L1RequestL2Network_in, RequestMsg, L1RequestToL2Cache, rank = 0) {
- if(L1RequestL2Network_in.isReady()) {
+ if(L1RequestL2Network_in.isReady(clockEdge())) {
peek(L1RequestL2Network_in, RequestMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -604,15 +608,18 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
}
action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
- profileMsgDelay(0, L1RequestL2Network_in.dequeue());
+ Tick delay := L1RequestL2Network_in.dequeue(clockEdge());
+ profileMsgDelay(0, ticksToCycles(delay));
}
action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
- profileMsgDelay(0, L1unblockNetwork_in.dequeue());
+ Tick delay := L1unblockNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(0, ticksToCycles(delay));
}
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
- profileMsgDelay(1, responseL2Network_in.dequeue());
+ Tick delay := responseL2Network_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
@@ -769,7 +776,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
}
action(zn_recycleResponseNetwork, "zn", desc="recycle memory request") {
- responseL2Network_in.recycle();
+ responseL2Network_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
diff --git a/src/mem/protocol/MESI_Two_Level-dir.sm b/src/mem/protocol/MESI_Two_Level-dir.sm
index 7484d001c..c9fbe3875 100644
--- a/src/mem/protocol/MESI_Two_Level-dir.sm
+++ b/src/mem/protocol/MESI_Two_Level-dir.sm
@@ -98,6 +98,8 @@ machine(Directory, "MESI Two Level directory protocol")
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE tbe);
void unset_tbe();
void wakeUpBuffers(Addr a);
@@ -190,7 +192,7 @@ machine(Directory, "MESI Two Level directory protocol")
// ** IN_PORTS **
in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
@@ -210,7 +212,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
@@ -227,7 +229,7 @@ machine(Directory, "MESI Two Level directory protocol")
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory, rank = 2) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
@@ -286,15 +288,15 @@ machine(Directory, "MESI Two Level directory protocol")
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
@@ -322,7 +324,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
@@ -359,7 +361,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
diff --git a/src/mem/protocol/MESI_Two_Level-dma.sm b/src/mem/protocol/MESI_Two_Level-dma.sm
index cbd32cd44..84774ede8 100644
--- a/src/mem/protocol/MESI_Two_Level-dma.sm
+++ b/src/mem/protocol/MESI_Two_Level-dma.sm
@@ -51,6 +51,7 @@ machine(DMA, "DMA Controller")
}
State cur_state;
+ Tick clockEdge();
State getState(Addr addr) {
return cur_state;
@@ -78,7 +79,7 @@ machine(DMA, "DMA Controller")
out_port(requestToDir_out, RequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -92,7 +93,7 @@ machine(DMA, "DMA Controller")
}
in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, makeLineAddress(in_msg.addr));
@@ -142,11 +143,11 @@ machine(DMA, "DMA Controller")
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
index 334106615..0a1570494 100644
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -103,6 +103,8 @@ machine(L1Cache, "MI Example L1 Cache")
TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
// PROTOTYPES
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE b);
@@ -200,7 +202,7 @@ machine(L1Cache, "MI Example L1 Cache")
out_port(responseNetwork_out, ResponseMsg, responseFromCache);
in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
- if (forwardRequestNetwork_in.isReady()) {
+ if (forwardRequestNetwork_in.isReady(clockEdge())) {
peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -226,7 +228,7 @@ machine(L1Cache, "MI Example L1 Cache")
}
in_port(responseNetwork_in, ResponseMsg, responseToCache) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -244,7 +246,7 @@ machine(L1Cache, "MI Example L1 Cache")
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
Entry cache_entry := getCacheEntry(in_msg.LineAddress);
@@ -330,15 +332,17 @@ machine(L1Cache, "MI Example L1 Cache")
}
action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(n_popResponseQueue, "n", desc="Pop the response queue") {
- profileMsgDelay(1, responseNetwork_in.dequeue());
+ Tick delay := responseNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
- profileMsgDelay(2, forwardRequestNetwork_in.dequeue());
+ Tick delay := forwardRequestNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(p_profileMiss, "pi", desc="Profile cache miss") {
diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm
index bb4373901..f12e474b0 100644
--- a/src/mem/protocol/MI_example-dir.sm
+++ b/src/mem/protocol/MI_example-dir.sm
@@ -108,6 +108,9 @@ machine(Directory, "Directory protocol")
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE b);
void unset_tbe();
@@ -204,7 +207,7 @@ machine(Directory, "Directory protocol")
// ** IN_PORTS **
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == DMARequestType:READ) {
@@ -219,7 +222,7 @@ machine(Directory, "Directory protocol")
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETS) {
@@ -242,7 +245,7 @@ machine(Directory, "Directory protocol")
//added by SS
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
@@ -392,11 +395,11 @@ machine(Directory, "Directory protocol")
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
@@ -432,11 +435,11 @@ machine(Directory, "Directory protocol")
}
action(z_recycleRequestQueue, "z", desc="recycle request queue") {
- requestQueue_in.recycle();
+ requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
- dmaRequestQueue_in.recycle();
+ dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
@@ -476,7 +479,7 @@ machine(Directory, "Directory protocol")
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
// TRANSITIONS
diff --git a/src/mem/protocol/MI_example-dma.sm b/src/mem/protocol/MI_example-dma.sm
index ce7b44630..76d87516a 100644
--- a/src/mem/protocol/MI_example-dma.sm
+++ b/src/mem/protocol/MI_example-dma.sm
@@ -52,6 +52,9 @@ machine(DMA, "DMA Controller")
State cur_state;
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+
State getState(Addr addr) {
return cur_state;
}
@@ -78,7 +81,7 @@ machine(DMA, "DMA Controller")
out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -92,7 +95,7 @@ machine(DMA, "DMA Controller")
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
@@ -148,11 +151,11 @@ machine(DMA, "DMA Controller")
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
index 2ef80efd2..1b1fd4ac7 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -133,6 +133,8 @@ machine(L1Cache, "Directory protocol")
bool isPresent(Addr);
}
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
@@ -266,16 +268,16 @@ machine(L1Cache, "Directory protocol")
// Use Timer
in_port(useTimerTable_in, Addr, useTimerTable) {
- if (useTimerTable_in.isReady()) {
- trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()),
- TBEs[useTimerTable.readyAddress()]);
+ if (useTimerTable_in.isReady(clockEdge())) {
+ Addr readyAddress := useTimerTable.nextAddress();
+ trigger(Event:Use_Timeout, readyAddress, getCacheEntry(readyAddress),
+ TBEs.lookup(readyAddress));
}
}
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks, in_msg.addr,
@@ -291,7 +293,7 @@ machine(L1Cache, "Directory protocol")
// Request Network
in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
@@ -331,7 +333,7 @@ machine(L1Cache, "Directory protocol")
// Response Network
in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
- if (responseToL1Cache_in.isReady()) {
+ if (responseToL1Cache_in.isReady(clockEdge())) {
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr,
@@ -352,7 +354,7 @@ machine(L1Cache, "Directory protocol")
// Nothing from the unblock network
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -684,7 +686,7 @@ machine(L1Cache, "Directory protocol")
}
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
@@ -692,11 +694,11 @@ machine(L1Cache, "Directory protocol")
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
@@ -715,7 +717,7 @@ machine(L1Cache, "Directory protocol")
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToL1Cache_in.dequeue();
+ responseToL1Cache_in.dequeue(clockEdge());
}
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
@@ -729,7 +731,8 @@ machine(L1Cache, "Directory protocol")
}
action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
- useTimerTable.set(address, use_timeout_latency);
+ useTimerTable.set(address,
+ clockEdge() + cyclesToTicks(use_timeout_latency));
}
action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
@@ -908,11 +911,11 @@ machine(L1Cache, "Directory protocol")
}
action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
- mandatoryQueue_in.recycle();
+ mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
//*****************************************************
diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
index 0b288709e..84fb276e3 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
@@ -227,6 +227,8 @@ machine(L2Cache, "Token protocol")
TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
@@ -577,7 +579,7 @@ machine(L2Cache, "Token protocol")
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.addr,
@@ -592,7 +594,7 @@ machine(L2Cache, "Token protocol")
// Request Network
in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID) {
@@ -625,7 +627,7 @@ machine(L2Cache, "Token protocol")
}
in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
- if (L1requestNetwork_in.isReady()) {
+ if (L1requestNetwork_in.isReady(clockEdge())) {
peek(L1requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETX) {
@@ -660,7 +662,7 @@ machine(L2Cache, "Token protocol")
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:ACK) {
@@ -1366,7 +1368,7 @@ machine(L2Cache, "Token protocol")
}
action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(m_decrementNumberOfMessagesInt, "\m", desc="Decrement the number of messages for which we're waiting") {
@@ -1391,15 +1393,15 @@ machine(L2Cache, "Token protocol")
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(n_popTriggerQueue, "\n", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
- L1requestNetwork_in.dequeue();
+ L1requestNetwork_in.dequeue(clockEdge());
}
@@ -1538,21 +1540,21 @@ machine(L2Cache, "Token protocol")
peek(L1requestNetwork_in, RequestMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
}
- L1requestNetwork_in.recycle();
+ L1requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(zz_recycleRequestQueue, "\zz", desc="Send the head of the mandatory queue to the back of the queue.") {
peek(requestNetwork_in, RequestMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
}
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(zz_recycleResponseQueue, "\z\z", desc="Send the head of the mandatory queue to the back of the queue.") {
peek(responseNetwork_in, ResponseMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Sender);
}
- responseNetwork_in.recycle();
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(da_sendDmaAckUnblock, "da", desc="Send dma ack to global directory") {
diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm
index 6ee7cd260..7175edc8d 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm
@@ -119,6 +119,8 @@ machine(Directory, "Directory protocol")
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE b);
void unset_tbe();
@@ -228,7 +230,7 @@ machine(Directory, "Directory protocol")
// ** IN_PORTS **
in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
- if (unblockNetwork_in.isReady()) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
peek(unblockNetwork_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
@@ -261,7 +263,7 @@ machine(Directory, "Directory protocol")
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
@@ -288,7 +290,7 @@ machine(Directory, "Directory protocol")
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
@@ -438,11 +440,11 @@ machine(Directory, "Directory protocol")
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
- unblockNetwork_in.dequeue();
+ unblockNetwork_in.dequeue(clockEdge());
}
action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
@@ -461,7 +463,7 @@ machine(Directory, "Directory protocol")
}
action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
@@ -501,7 +503,7 @@ machine(Directory, "Directory protocol")
}
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
- requestQueue_in.recycle();
+ requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
diff --git a/src/mem/protocol/MOESI_CMP_directory-dma.sm b/src/mem/protocol/MOESI_CMP_directory-dma.sm
index 10fc94abe..72dec6466 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dma.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dma.sm
@@ -74,6 +74,7 @@ machine(DMA, "DMA Controller")
TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
State cur_state;
+ Tick clockEdge();
void set_tbe(TBE b);
void unset_tbe();
@@ -104,7 +105,7 @@ machine(DMA, "DMA Controller")
out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress,
@@ -120,7 +121,7 @@ machine(DMA, "DMA Controller")
}
in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
@@ -141,7 +142,7 @@ machine(DMA, "DMA Controller")
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
@@ -215,15 +216,15 @@ machine(DMA, "DMA Controller")
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(t_updateTBEData, "t", desc="Update TBE Data") {
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
index 230adfc4b..dac2027b9 100644
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -184,6 +184,8 @@ machine(L1Cache, "Token protocol")
int countReadStarvingForAddress(Addr);
}
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
@@ -456,25 +458,26 @@ machine(L1Cache, "Token protocol")
// Use Timer
in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
- if (useTimerTable_in.isReady()) {
- TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
-
- if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
- (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
- if (persistentTable.typeOfSmallest(useTimerTable.readyAddress()) == AccessType:Write) {
- trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ if (useTimerTable_in.isReady(clockEdge())) {
+ Addr readyAddress := useTimerTable.nextAddress();
+ TBE tbe := L1_TBEs.lookup(readyAddress);
+
+ if (persistentTable.isLocked(readyAddress) &&
+ (persistentTable.findSmallest(readyAddress) != machineID)) {
+ if (persistentTable.typeOfSmallest(readyAddress) == AccessType:Write) {
+ trigger(Event:Use_TimeoutStarverX, readyAddress,
+ getCacheEntry(readyAddress), tbe);
} else {
- trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ trigger(Event:Use_TimeoutStarverS, readyAddress,
+ getCacheEntry(readyAddress), tbe);
}
} else {
if (no_mig_atomic && IsAtomic(tbe)) {
- trigger(Event:Use_TimeoutNoStarvers_NoMig, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ trigger(Event:Use_TimeoutNoStarvers_NoMig, readyAddress,
+ getCacheEntry(readyAddress), tbe);
} else {
- trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ trigger(Event:Use_TimeoutNoStarvers, readyAddress,
+ getCacheEntry(readyAddress), tbe);
}
}
}
@@ -482,16 +485,17 @@ machine(L1Cache, "Token protocol")
// Reissue Timer
in_port(reissueTimerTable_in, Addr, reissueTimerTable, rank=4) {
- if (reissueTimerTable_in.isReady()) {
- trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
- getCacheEntry(reissueTimerTable.readyAddress()),
- L1_TBEs[reissueTimerTable.readyAddress()]);
+ Tick current_time := clockEdge();
+ if (reissueTimerTable_in.isReady(current_time)) {
+ Addr addr := reissueTimerTable.nextAddress();
+ trigger(Event:Request_Timeout, addr, getCacheEntry(addr),
+ L1_TBEs.lookup(addr));
}
}
// Persistent Network
in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache, rank=3) {
- if (persistentNetwork_in.isReady()) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
peek(persistentNetwork_in, PersistentMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
@@ -541,7 +545,7 @@ machine(L1Cache, "Token protocol")
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToL1Cache, rank=2) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
@@ -612,7 +616,7 @@ machine(L1Cache, "Token protocol")
// Request Network
in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
@@ -659,7 +663,7 @@ machine(L1Cache, "Token protocol")
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -792,7 +796,8 @@ machine(L1Cache, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
}
} else {
@@ -844,9 +849,11 @@ machine(L1Cache, "Token protocol")
// Set a wakeup timer
if (dynamic_timeout_enabled) {
- reissueTimerTable.set(address, (5 * averageLatencyEstimate()) / 4);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
} else {
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
}
}
@@ -911,7 +918,8 @@ machine(L1Cache, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
}
} else {
@@ -968,9 +976,11 @@ machine(L1Cache, "Token protocol")
// Set a wakeup timer
if (dynamic_timeout_enabled) {
- reissueTimerTable.set(address, (5 * averageLatencyEstimate()) / 4);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
} else {
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
}
}
}
@@ -1376,23 +1386,24 @@ machine(L1Cache, "Token protocol")
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
- persistentNetwork_in.dequeue();
+ persistentNetwork_in.dequeue(clockEdge());
}
action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
- useTimerTable.set(address, use_timeout_latency);
+ useTimerTable.set(
+ address, clockEdge() + cyclesToTicks(use_timeout_latency));
}
action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
index 52bd19bcc..2ab593394 100644
--- a/src/mem/protocol/MOESI_CMP_token-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
@@ -149,6 +149,7 @@ machine(L2Cache, "Token protocol")
PersistentTable persistentTable;
PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
+ Tick clockEdge();
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
@@ -326,7 +327,7 @@ machine(L2Cache, "Token protocol")
// Persistent Network
in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
- if (persistentNetwork_in.isReady()) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
peek(persistentNetwork_in, PersistentMsg) {
assert(in_msg.Destination.isElement(machineID));
@@ -366,7 +367,7 @@ machine(L2Cache, "Token protocol")
// Request Network
in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
@@ -389,7 +390,7 @@ machine(L2Cache, "Token protocol")
}
in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
- if (L1requestNetwork_in.isReady()) {
+ if (L1requestNetwork_in.isReady(clockEdge())) {
peek(L1requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -413,7 +414,7 @@ machine(L2Cache, "Token protocol")
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -870,19 +871,19 @@ machine(L2Cache, "Token protocol")
}
action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
- persistentNetwork_in.dequeue();
+ persistentNetwork_in.dequeue(clockEdge());
}
action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
- L1requestNetwork_in.dequeue();
+ L1requestNetwork_in.dequeue(clockEdge());
}
diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm
index ffef01eb0..63790531f 100644
--- a/src/mem/protocol/MOESI_CMP_token-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dir.sm
@@ -172,6 +172,8 @@ machine(Directory, "Token protocol")
bool starving, default="false";
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE b);
void unset_tbe();
@@ -276,7 +278,7 @@ machine(Directory, "Token protocol")
// ** IN_PORTS **
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
@@ -292,14 +294,15 @@ machine(Directory, "Token protocol")
// Reissue Timer
in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
- if (reissueTimerTable_in.isReady()) {
- trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
- TBEs[reissueTimerTable.readyAddress()]);
+ Tick current_time := clockEdge();
+ if (reissueTimerTable_in.isReady(current_time)) {
+ Addr addr := reissueTimerTable.nextAddress();
+ trigger(Event:Request_Timeout, addr, TBEs.lookup(addr));
}
}
in_port(responseNetwork_in, ResponseMsg, responseToDir) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (getDirectoryEntry(in_msg.addr).Tokens + in_msg.Tokens == max_tokens()) {
@@ -338,7 +341,7 @@ machine(Directory, "Token protocol")
}
in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
- if (persistentNetwork_in.isReady()) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
peek(persistentNetwork_in, PersistentMsg) {
assert(in_msg.Destination.isElement(machineID));
@@ -400,7 +403,7 @@ machine(Directory, "Token protocol")
}
in_port(requestNetwork_in, RequestMsg, requestToDir) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETS) {
@@ -415,7 +418,7 @@ machine(Directory, "Token protocol")
}
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, DMARequestMsg) {
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
@@ -490,7 +493,7 @@ machine(Directory, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(address, cyclesToTicks(reissue_wakeup_latency));
}
}
@@ -558,7 +561,7 @@ machine(Directory, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(address, cyclesToTicks(reissue_wakeup_latency));
}
}
@@ -752,35 +755,35 @@ machine(Directory, "Token protocol")
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(z_recycleRequest, "z", desc="Recycle the request queue") {
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
- responseNetwork_in.recycle();
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
- persistentNetwork_in.dequeue();
+ persistentNetwork_in.dequeue(clockEdge());
}
action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
- dmaRequestQueue_in.recycle();
+ dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
@@ -804,7 +807,7 @@ machine(Directory, "Token protocol")
//
if (reissueTimerTable.isSet(address)) {
reissueTimerTable.unset(address);
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(address, cyclesToTicks(fixed_timeout_latency));
}
}
@@ -812,7 +815,7 @@ machine(Directory, "Token protocol")
//
// currently only support a fixed timeout latency
//
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(address, cyclesToTicks(fixed_timeout_latency));
}
action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
diff --git a/src/mem/protocol/MOESI_CMP_token-dma.sm b/src/mem/protocol/MOESI_CMP_token-dma.sm
index 4bb80d4ba..efe3db3cd 100644
--- a/src/mem/protocol/MOESI_CMP_token-dma.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dma.sm
@@ -54,6 +54,8 @@ machine(DMA, "DMA Controller")
State cur_state;
+ Tick clockEdge();
+
State getState(Addr addr) {
return cur_state;
}
@@ -80,7 +82,7 @@ machine(DMA, "DMA Controller")
out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -94,7 +96,7 @@ machine(DMA, "DMA Controller")
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
@@ -150,11 +152,11 @@ machine(DMA, "DMA Controller")
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
diff --git a/src/mem/protocol/MOESI_hammer-cache.sm b/src/mem/protocol/MOESI_hammer-cache.sm
index 88b7308ed..5d2383541 100644
--- a/src/mem/protocol/MOESI_hammer-cache.sm
+++ b/src/mem/protocol/MOESI_hammer-cache.sm
@@ -181,6 +181,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
@@ -329,7 +330,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -352,7 +353,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
// Response Network
in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
- if (responseToCache_in.isReady()) {
+ if (responseToCache_in.isReady(clockEdge())) {
peek(responseToCache_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -377,7 +378,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
// Forward Network
in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
- if (forwardToCache_in.isReady()) {
+ if (forwardToCache_in.isReady(clockEdge())) {
peek(forwardToCache_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -421,7 +422,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -950,15 +951,15 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
- forwardToCache_in.dequeue();
+ forwardToCache_in.dequeue(clockEdge());
}
action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
@@ -1017,7 +1018,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToCache_in.dequeue();
+ responseToCache_in.dequeue(clockEdge());
}
action(ll_L2toL1Transfer, "ll", desc="") {
diff --git a/src/mem/protocol/MOESI_hammer-dir.sm b/src/mem/protocol/MOESI_hammer-dir.sm
index 4948a8108..4f5b00658 100644
--- a/src/mem/protocol/MOESI_hammer-dir.sm
+++ b/src/mem/protocol/MOESI_hammer-dir.sm
@@ -184,6 +184,7 @@ machine(Directory, "AMD Hammer-like protocol")
bool isPresent(Addr);
}
+ Tick clockEdge();
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE a);
@@ -314,7 +315,7 @@ machine(Directory, "AMD Hammer-like protocol")
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -338,7 +339,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
- if (unblockNetwork_in.isReady()) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
peek(unblockNetwork_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -367,7 +368,7 @@ machine(Directory, "AMD Hammer-like protocol")
// Response Network
in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
- if (responseToDir_in.isReady()) {
+ if (responseToDir_in.isReady(clockEdge())) {
peek(responseToDir_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -390,7 +391,7 @@ machine(Directory, "AMD Hammer-like protocol")
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=2) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -407,7 +408,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -441,7 +442,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, DMARequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
TBE tbe := TBEs[in_msg.LineAddress];
@@ -682,7 +683,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToDir_in.dequeue();
+ responseToDir_in.dequeue(clockEdge());
}
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
@@ -1115,14 +1116,14 @@ machine(Directory, "AMD Hammer-like protocol")
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
peek(unblockNetwork_in, ResponseMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Sender);
}
- unblockNetwork_in.dequeue();
+ unblockNetwork_in.dequeue(clockEdge());
}
action(k_wakeUpDependents, "k", desc="wake-up dependents") {
@@ -1130,15 +1131,15 @@ machine(Directory, "AMD Hammer-like protocol")
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
diff --git a/src/mem/protocol/MOESI_hammer-dma.sm b/src/mem/protocol/MOESI_hammer-dma.sm
index 4691e2490..7157082c4 100644
--- a/src/mem/protocol/MOESI_hammer-dma.sm
+++ b/src/mem/protocol/MOESI_hammer-dma.sm
@@ -52,6 +52,8 @@ machine(DMA, "DMA Controller")
State cur_state;
+ Tick clockEdge();
+
State getState(Addr addr) {
return cur_state;
}
@@ -77,7 +79,7 @@ machine(DMA, "DMA Controller")
out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -91,7 +93,7 @@ machine(DMA, "DMA Controller")
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
@@ -147,11 +149,11 @@ machine(DMA, "DMA Controller")
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
diff --git a/src/mem/protocol/Network_test-cache.sm b/src/mem/protocol/Network_test-cache.sm
index 82829a6ea..dab8f1089 100644
--- a/src/mem/protocol/Network_test-cache.sm
+++ b/src/mem/protocol/Network_test-cache.sm
@@ -68,6 +68,7 @@ machine(L1Cache, "Network_test L1 Cache")
}
// FUNCTIONS
+ Tick clockEdge();
// cpu/testers/networktest/networktest.cc generates packets of the type
// ReadReq, INST_FETCH, and WriteReq.
@@ -129,7 +130,7 @@ machine(L1Cache, "Network_test L1 Cache")
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest) {
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, getCacheEntry(in_msg.LineAddress));
@@ -174,7 +175,7 @@ machine(L1Cache, "Network_test L1 Cache")
}
action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
diff --git a/src/mem/protocol/Network_test-dir.sm b/src/mem/protocol/Network_test-dir.sm
index d618e98ff..6bd6920b3 100644
--- a/src/mem/protocol/Network_test-dir.sm
+++ b/src/mem/protocol/Network_test-dir.sm
@@ -60,7 +60,9 @@ machine(Directory, "Network_test Directory")
DataBlock DataBlk, desc="data for the block";
}
- // ** OBJECTS **
+ // ** FUNCTIONS **
+ Tick clockEdge();
+
State getState(Addr addr) {
return State:I;
}
@@ -87,7 +89,7 @@ machine(Directory, "Network_test Directory")
// ** IN_PORTS **
in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Request, in_msg.addr);
@@ -98,7 +100,7 @@ machine(Directory, "Network_test Directory")
}
}
in_port(forwardQueue_in, RequestMsg, forwardToDir) {
- if (forwardQueue_in.isReady()) {
+ if (forwardQueue_in.isReady(clockEdge())) {
peek(forwardQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Forward, in_msg.addr);
@@ -109,7 +111,7 @@ machine(Directory, "Network_test Directory")
}
}
in_port(responseQueue_in, RequestMsg, responseToDir) {
- if (responseQueue_in.isReady()) {
+ if (responseQueue_in.isReady(clockEdge())) {
peek(responseQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Response, in_msg.addr);
@@ -123,15 +125,15 @@ machine(Directory, "Network_test Directory")
// Actions
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(f_popIncomingForwardQueue, "f", desc="Pop incoming forward queue") {
- forwardQueue_in.dequeue();
+ forwardQueue_in.dequeue(clockEdge());
}
action(r_popIncomingResponseQueue, "r", desc="Pop incoming response queue") {
- responseQueue_in.dequeue();
+ responseQueue_in.dequeue(clockEdge());
}
// TRANSITIONS
diff --git a/src/mem/protocol/RubySlicc_Defines.sm b/src/mem/protocol/RubySlicc_Defines.sm
index d4f7fa58f..eb235f8f3 100644
--- a/src/mem/protocol/RubySlicc_Defines.sm
+++ b/src/mem/protocol/RubySlicc_Defines.sm
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -31,6 +30,7 @@
NodeID version;
MachineID machineID;
NodeID clusterID;
+Cycles recycle_latency;
// Functions implemented in the AbstractController class for
// making timing access to the memory maintained by the
diff --git a/src/mem/protocol/RubySlicc_Exports.sm b/src/mem/protocol/RubySlicc_Exports.sm
index eeec185fc..7c2069203 100644
--- a/src/mem/protocol/RubySlicc_Exports.sm
+++ b/src/mem/protocol/RubySlicc_Exports.sm
@@ -37,6 +37,7 @@ external_type(PacketPtr, primitive="yes");
external_type(Packet, primitive="yes");
external_type(Addr, primitive="yes");
external_type(Cycles, primitive="yes", default="Cycles(0)");
+external_type(Tick, primitive="yes", default="0");
structure(DataBlock, external = "yes", desc="..."){
void clear();
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
index f464b3c7d..a8bf93bcc 100644
--- a/src/mem/protocol/RubySlicc_Types.sm
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -41,9 +41,9 @@ external_type(OutPort, primitive="yes");
external_type(Scalar, primitive="yes");
structure(InPort, external = "yes", primitive="yes") {
- bool isReady();
- Cycles dequeue();
- void recycle();
+ bool isReady(Tick current_time);
+ Tick dequeue(Tick current_time);
+ void recycle(Tick current_time, Tick recycle_latency);
bool isEmpty();
bool isStallMapEmpty();
int getStallMapSize();
@@ -179,9 +179,9 @@ structure (DMASequencer, external = "yes") {
}
structure (TimerTable, inport="yes", external = "yes") {
- bool isReady();
- Addr readyAddress();
- void set(Addr, Cycles);
+ bool isReady(Tick);
+ Addr nextAddress();
+ void set(Addr, Tick);
void unset(Addr);
bool isSet(Addr);
}