summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mem/protocol/MESI_Three_Level-L0cache.sm14
-rw-r--r--src/mem/protocol/MESI_Three_Level-L1cache.sm16
-rw-r--r--src/mem/protocol/MESI_Two_Level-L1cache.sm20
-rw-r--r--src/mem/protocol/MESI_Two_Level-L2cache.sm21
-rw-r--r--src/mem/protocol/MESI_Two_Level-dir.sm18
-rw-r--r--src/mem/protocol/MESI_Two_Level-dma.sm9
-rw-r--r--src/mem/protocol/MI_example-cache.sm16
-rw-r--r--src/mem/protocol/MI_example-dir.sm19
-rw-r--r--src/mem/protocol/MI_example-dma.sm11
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm33
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L2cache.sm24
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dir.sm16
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dma.sm13
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm77
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L2cache.sm17
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dir.sm43
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dma.sm10
-rw-r--r--src/mem/protocol/MOESI_hammer-cache.sm17
-rw-r--r--src/mem/protocol/MOESI_hammer-dir.sm25
-rw-r--r--src/mem/protocol/MOESI_hammer-dma.sm10
-rw-r--r--src/mem/protocol/Network_test-cache.sm5
-rw-r--r--src/mem/protocol/Network_test-dir.sm16
-rw-r--r--src/mem/protocol/RubySlicc_Defines.sm2
-rw-r--r--src/mem/protocol/RubySlicc_Exports.sm1
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm12
-rw-r--r--src/mem/ruby/network/MessageBuffer.cc104
-rw-r--r--src/mem/ruby/network/MessageBuffer.hh57
-rw-r--r--src/mem/ruby/network/MessageBuffer.py1
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc16
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc15
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.cc12
-rw-r--r--src/mem/ruby/network/simple/SimpleNetwork.py6
-rw-r--r--src/mem/ruby/network/simple/Switch.cc13
-rw-r--r--src/mem/ruby/network/simple/Throttle.cc20
-rw-r--r--src/mem/ruby/slicc_interface/AbstractController.cc13
-rw-r--r--src/mem/ruby/structures/TBETable.hh4
-rw-r--r--src/mem/ruby/structures/TimerTable.cc17
-rw-r--r--src/mem/ruby/structures/TimerTable.hh21
-rw-r--r--src/mem/ruby/system/DMASequencer.cc5
-rw-r--r--src/mem/ruby/system/RubyPort.cc1
-rw-r--r--src/mem/ruby/system/Sequencer.cc2
-rw-r--r--src/mem/slicc/ast/EnqueueStatementAST.py5
-rw-r--r--src/mem/slicc/ast/ObjDeclAST.py2
-rw-r--r--src/mem/slicc/ast/PeekStatementAST.py2
-rw-r--r--src/mem/slicc/ast/StallAndWaitStatementAST.py2
-rw-r--r--src/mem/slicc/symbols/StateMachine.py19
46 files changed, 383 insertions, 419 deletions
diff --git a/src/mem/protocol/MESI_Three_Level-L0cache.sm b/src/mem/protocol/MESI_Three_Level-L0cache.sm
index 7e8626dc9..3f22a4906 100644
--- a/src/mem/protocol/MESI_Three_Level-L0cache.sm
+++ b/src/mem/protocol/MESI_Three_Level-L0cache.sm
@@ -135,6 +135,8 @@ machine(L0Cache, "MESI Directory L0 Cache")
TBETable TBEs, template="<L0Cache_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
@@ -255,7 +257,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
// Messages for this L0 cache from the L1 cache
in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
- if (messgeBuffer_in.isReady()) {
+ if (messgeBuffer_in.isReady(clockEdge())) {
peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
assert(in_msg.Dest == machineID);
@@ -289,7 +291,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
// Mandatory Queue betweens Node's CPU and it's L0 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -513,17 +515,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popRequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, messgeBuffer_in.dequeue());
+ Tick delay := messgeBuffer_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(o_popIncomingResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, messgeBuffer_in.dequeue());
+ Tick delay := messgeBuffer_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
diff --git a/src/mem/protocol/MESI_Three_Level-L1cache.sm b/src/mem/protocol/MESI_Three_Level-L1cache.sm
index 6c8df8d75..0eb9a43b5 100644
--- a/src/mem/protocol/MESI_Three_Level-L1cache.sm
+++ b/src/mem/protocol/MESI_Three_Level-L1cache.sm
@@ -151,6 +151,8 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
@@ -266,7 +268,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Response From the L2 Cache to this L1 cache
in_port(responseNetwork_in, ResponseMsg, responseFromL2, rank = 3) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
@@ -303,7 +305,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Request to this L1 cache from the shared L2
in_port(requestNetwork_in, RequestMsg, requestFromL2, rank = 2) {
- if(requestNetwork_in.isReady()) {
+ if(requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -340,7 +342,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Requests to this L1 cache from the L0 cache.
in_port(messageBufferFromL0_in, CoherenceMsg, bufferFromL0, rank = 0) {
- if (messageBufferFromL0_in.isReady()) {
+ if (messageBufferFromL0_in.isReady(clockEdge())) {
peek(messageBufferFromL0_in, CoherenceMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -634,17 +636,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
action(k_popL0RequestQueue, "k", desc="Pop mandatory queue.") {
- messageBufferFromL0_in.dequeue();
+ messageBufferFromL0_in.dequeue(clockEdge());
}
action(l_popL2RequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, requestNetwork_in.dequeue());
+ Tick delay := requestNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(o_popL2ResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, responseNetwork_in.dequeue());
+ Tick delay := responseNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
diff --git a/src/mem/protocol/MESI_Two_Level-L1cache.sm b/src/mem/protocol/MESI_Two_Level-L1cache.sm
index b9be4663f..c40a47cae 100644
--- a/src/mem/protocol/MESI_Two_Level-L1cache.sm
+++ b/src/mem/protocol/MESI_Two_Level-L1cache.sm
@@ -156,6 +156,8 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
@@ -296,7 +298,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// searches of all entries in the queue, not just the head msg. All
// msgs in the structure can be invalidated if a demand miss matches.
in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
- if (optionalQueue_in.isReady()) {
+ if (optionalQueue_in.isReady(clockEdge())) {
peek(optionalQueue_in, RubyRequest) {
// Instruction Prefetch
if (in_msg.Type == RubyRequestType:IFETCH) {
@@ -373,7 +375,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Response L1 Network - response msg to this L1 cache
in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
- if (responseL1Network_in.isReady()) {
+ if (responseL1Network_in.isReady(clockEdge())) {
peek(responseL1Network_in, ResponseMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
@@ -413,7 +415,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Request InterChip network - request from this L1 cache to the shared L2
in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
- if(requestL1Network_in.isReady()) {
+ if(requestL1Network_in.isReady(clockEdge())) {
peek(requestL1Network_in, RequestMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
@@ -439,7 +441,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -866,17 +868,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popRequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, requestL1Network_in.dequeue());
+ Tick delay := requestL1Network_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(o_popIncomingResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, responseL1Network_in.dequeue());
+ Tick delay := responseL1Network_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
@@ -963,7 +967,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
}
action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
- optionalQueue_in.dequeue();
+ optionalQueue_in.dequeue(clockEdge());
}
action(mp_markPrefetched, "mp", desc="Write data from response queue to cache") {
diff --git a/src/mem/protocol/MESI_Two_Level-L2cache.sm b/src/mem/protocol/MESI_Two_Level-L2cache.sm
index e4f719d9f..4134b7964 100644
--- a/src/mem/protocol/MESI_Two_Level-L2cache.sm
+++ b/src/mem/protocol/MESI_Two_Level-L2cache.sm
@@ -148,6 +148,10 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+ Cycles ticksToCycles(Tick t);
+
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
@@ -285,7 +289,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache, rank = 2) {
- if(L1unblockNetwork_in.isReady()) {
+ if(L1unblockNetwork_in.isReady(clockEdge())) {
peek(L1unblockNetwork_in, ResponseMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -307,7 +311,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
// Response L2 Network - response msg to this particular L2 bank
in_port(responseL2Network_in, ResponseMsg, responseToL2Cache, rank = 1) {
- if (responseL2Network_in.isReady()) {
+ if (responseL2Network_in.isReady(clockEdge())) {
peek(responseL2Network_in, ResponseMsg) {
// test wether it's from a local L1 or an off chip source
assert(in_msg.Destination.isElement(machineID));
@@ -348,7 +352,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
// L1 Request
in_port(L1RequestL2Network_in, RequestMsg, L1RequestToL2Cache, rank = 0) {
- if(L1RequestL2Network_in.isReady()) {
+ if(L1RequestL2Network_in.isReady(clockEdge())) {
peek(L1RequestL2Network_in, RequestMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -604,15 +608,18 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
}
action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
- profileMsgDelay(0, L1RequestL2Network_in.dequeue());
+ Tick delay := L1RequestL2Network_in.dequeue(clockEdge());
+ profileMsgDelay(0, ticksToCycles(delay));
}
action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
- profileMsgDelay(0, L1unblockNetwork_in.dequeue());
+ Tick delay := L1unblockNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(0, ticksToCycles(delay));
}
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
- profileMsgDelay(1, responseL2Network_in.dequeue());
+ Tick delay := responseL2Network_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
@@ -769,7 +776,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
}
action(zn_recycleResponseNetwork, "zn", desc="recycle memory request") {
- responseL2Network_in.recycle();
+ responseL2Network_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
diff --git a/src/mem/protocol/MESI_Two_Level-dir.sm b/src/mem/protocol/MESI_Two_Level-dir.sm
index 7484d001c..c9fbe3875 100644
--- a/src/mem/protocol/MESI_Two_Level-dir.sm
+++ b/src/mem/protocol/MESI_Two_Level-dir.sm
@@ -98,6 +98,8 @@ machine(Directory, "MESI Two Level directory protocol")
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE tbe);
void unset_tbe();
void wakeUpBuffers(Addr a);
@@ -190,7 +192,7 @@ machine(Directory, "MESI Two Level directory protocol")
// ** IN_PORTS **
in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
@@ -210,7 +212,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
@@ -227,7 +229,7 @@ machine(Directory, "MESI Two Level directory protocol")
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory, rank = 2) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
@@ -286,15 +288,15 @@ machine(Directory, "MESI Two Level directory protocol")
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
@@ -322,7 +324,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
@@ -359,7 +361,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
diff --git a/src/mem/protocol/MESI_Two_Level-dma.sm b/src/mem/protocol/MESI_Two_Level-dma.sm
index cbd32cd44..84774ede8 100644
--- a/src/mem/protocol/MESI_Two_Level-dma.sm
+++ b/src/mem/protocol/MESI_Two_Level-dma.sm
@@ -51,6 +51,7 @@ machine(DMA, "DMA Controller")
}
State cur_state;
+ Tick clockEdge();
State getState(Addr addr) {
return cur_state;
@@ -78,7 +79,7 @@ machine(DMA, "DMA Controller")
out_port(requestToDir_out, RequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -92,7 +93,7 @@ machine(DMA, "DMA Controller")
}
in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, makeLineAddress(in_msg.addr));
@@ -142,11 +143,11 @@ machine(DMA, "DMA Controller")
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
index 334106615..0a1570494 100644
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -103,6 +103,8 @@ machine(L1Cache, "MI Example L1 Cache")
TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
// PROTOTYPES
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE b);
@@ -200,7 +202,7 @@ machine(L1Cache, "MI Example L1 Cache")
out_port(responseNetwork_out, ResponseMsg, responseFromCache);
in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
- if (forwardRequestNetwork_in.isReady()) {
+ if (forwardRequestNetwork_in.isReady(clockEdge())) {
peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -226,7 +228,7 @@ machine(L1Cache, "MI Example L1 Cache")
}
in_port(responseNetwork_in, ResponseMsg, responseToCache) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -244,7 +246,7 @@ machine(L1Cache, "MI Example L1 Cache")
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
Entry cache_entry := getCacheEntry(in_msg.LineAddress);
@@ -330,15 +332,17 @@ machine(L1Cache, "MI Example L1 Cache")
}
action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(n_popResponseQueue, "n", desc="Pop the response queue") {
- profileMsgDelay(1, responseNetwork_in.dequeue());
+ Tick delay := responseNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
- profileMsgDelay(2, forwardRequestNetwork_in.dequeue());
+ Tick delay := forwardRequestNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(p_profileMiss, "pi", desc="Profile cache miss") {
diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm
index bb4373901..f12e474b0 100644
--- a/src/mem/protocol/MI_example-dir.sm
+++ b/src/mem/protocol/MI_example-dir.sm
@@ -108,6 +108,9 @@ machine(Directory, "Directory protocol")
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE b);
void unset_tbe();
@@ -204,7 +207,7 @@ machine(Directory, "Directory protocol")
// ** IN_PORTS **
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == DMARequestType:READ) {
@@ -219,7 +222,7 @@ machine(Directory, "Directory protocol")
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETS) {
@@ -242,7 +245,7 @@ machine(Directory, "Directory protocol")
//added by SS
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
@@ -392,11 +395,11 @@ machine(Directory, "Directory protocol")
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
@@ -432,11 +435,11 @@ machine(Directory, "Directory protocol")
}
action(z_recycleRequestQueue, "z", desc="recycle request queue") {
- requestQueue_in.recycle();
+ requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
- dmaRequestQueue_in.recycle();
+ dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
@@ -476,7 +479,7 @@ machine(Directory, "Directory protocol")
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
// TRANSITIONS
diff --git a/src/mem/protocol/MI_example-dma.sm b/src/mem/protocol/MI_example-dma.sm
index ce7b44630..76d87516a 100644
--- a/src/mem/protocol/MI_example-dma.sm
+++ b/src/mem/protocol/MI_example-dma.sm
@@ -52,6 +52,9 @@ machine(DMA, "DMA Controller")
State cur_state;
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+
State getState(Addr addr) {
return cur_state;
}
@@ -78,7 +81,7 @@ machine(DMA, "DMA Controller")
out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -92,7 +95,7 @@ machine(DMA, "DMA Controller")
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
@@ -148,11 +151,11 @@ machine(DMA, "DMA Controller")
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
index 2ef80efd2..1b1fd4ac7 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -133,6 +133,8 @@ machine(L1Cache, "Directory protocol")
bool isPresent(Addr);
}
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
@@ -266,16 +268,16 @@ machine(L1Cache, "Directory protocol")
// Use Timer
in_port(useTimerTable_in, Addr, useTimerTable) {
- if (useTimerTable_in.isReady()) {
- trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()),
- TBEs[useTimerTable.readyAddress()]);
+ if (useTimerTable_in.isReady(clockEdge())) {
+ Addr readyAddress := useTimerTable.nextAddress();
+ trigger(Event:Use_Timeout, readyAddress, getCacheEntry(readyAddress),
+ TBEs.lookup(readyAddress));
}
}
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks, in_msg.addr,
@@ -291,7 +293,7 @@ machine(L1Cache, "Directory protocol")
// Request Network
in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
@@ -331,7 +333,7 @@ machine(L1Cache, "Directory protocol")
// Response Network
in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
- if (responseToL1Cache_in.isReady()) {
+ if (responseToL1Cache_in.isReady(clockEdge())) {
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr,
@@ -352,7 +354,7 @@ machine(L1Cache, "Directory protocol")
// Nothing from the unblock network
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -684,7 +686,7 @@ machine(L1Cache, "Directory protocol")
}
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
@@ -692,11 +694,11 @@ machine(L1Cache, "Directory protocol")
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
@@ -715,7 +717,7 @@ machine(L1Cache, "Directory protocol")
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToL1Cache_in.dequeue();
+ responseToL1Cache_in.dequeue(clockEdge());
}
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
@@ -729,7 +731,8 @@ machine(L1Cache, "Directory protocol")
}
action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
- useTimerTable.set(address, use_timeout_latency);
+ useTimerTable.set(address,
+ clockEdge() + cyclesToTicks(use_timeout_latency));
}
action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
@@ -908,11 +911,11 @@ machine(L1Cache, "Directory protocol")
}
action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
- mandatoryQueue_in.recycle();
+ mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
//*****************************************************
diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
index 0b288709e..84fb276e3 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
@@ -227,6 +227,8 @@ machine(L2Cache, "Token protocol")
TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
@@ -577,7 +579,7 @@ machine(L2Cache, "Token protocol")
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.addr,
@@ -592,7 +594,7 @@ machine(L2Cache, "Token protocol")
// Request Network
in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID) {
@@ -625,7 +627,7 @@ machine(L2Cache, "Token protocol")
}
in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
- if (L1requestNetwork_in.isReady()) {
+ if (L1requestNetwork_in.isReady(clockEdge())) {
peek(L1requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETX) {
@@ -660,7 +662,7 @@ machine(L2Cache, "Token protocol")
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:ACK) {
@@ -1366,7 +1368,7 @@ machine(L2Cache, "Token protocol")
}
action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(m_decrementNumberOfMessagesInt, "\m", desc="Decrement the number of messages for which we're waiting") {
@@ -1391,15 +1393,15 @@ machine(L2Cache, "Token protocol")
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(n_popTriggerQueue, "\n", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
- L1requestNetwork_in.dequeue();
+ L1requestNetwork_in.dequeue(clockEdge());
}
@@ -1538,21 +1540,21 @@ machine(L2Cache, "Token protocol")
peek(L1requestNetwork_in, RequestMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
}
- L1requestNetwork_in.recycle();
+ L1requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(zz_recycleRequestQueue, "\zz", desc="Send the head of the mandatory queue to the back of the queue.") {
peek(requestNetwork_in, RequestMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
}
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(zz_recycleResponseQueue, "\z\z", desc="Send the head of the mandatory queue to the back of the queue.") {
peek(responseNetwork_in, ResponseMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Sender);
}
- responseNetwork_in.recycle();
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(da_sendDmaAckUnblock, "da", desc="Send dma ack to global directory") {
diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm
index 6ee7cd260..7175edc8d 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm
@@ -119,6 +119,8 @@ machine(Directory, "Directory protocol")
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE b);
void unset_tbe();
@@ -228,7 +230,7 @@ machine(Directory, "Directory protocol")
// ** IN_PORTS **
in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
- if (unblockNetwork_in.isReady()) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
peek(unblockNetwork_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
@@ -261,7 +263,7 @@ machine(Directory, "Directory protocol")
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
@@ -288,7 +290,7 @@ machine(Directory, "Directory protocol")
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
@@ -438,11 +440,11 @@ machine(Directory, "Directory protocol")
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
- unblockNetwork_in.dequeue();
+ unblockNetwork_in.dequeue(clockEdge());
}
action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
@@ -461,7 +463,7 @@ machine(Directory, "Directory protocol")
}
action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
@@ -501,7 +503,7 @@ machine(Directory, "Directory protocol")
}
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
- requestQueue_in.recycle();
+ requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
diff --git a/src/mem/protocol/MOESI_CMP_directory-dma.sm b/src/mem/protocol/MOESI_CMP_directory-dma.sm
index 10fc94abe..72dec6466 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dma.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dma.sm
@@ -74,6 +74,7 @@ machine(DMA, "DMA Controller")
TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
State cur_state;
+ Tick clockEdge();
void set_tbe(TBE b);
void unset_tbe();
@@ -104,7 +105,7 @@ machine(DMA, "DMA Controller")
out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress,
@@ -120,7 +121,7 @@ machine(DMA, "DMA Controller")
}
in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
@@ -141,7 +142,7 @@ machine(DMA, "DMA Controller")
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
@@ -215,15 +216,15 @@ machine(DMA, "DMA Controller")
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(t_updateTBEData, "t", desc="Update TBE Data") {
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
index 230adfc4b..dac2027b9 100644
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -184,6 +184,8 @@ machine(L1Cache, "Token protocol")
int countReadStarvingForAddress(Addr);
}
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
@@ -456,25 +458,26 @@ machine(L1Cache, "Token protocol")
// Use Timer
in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
- if (useTimerTable_in.isReady()) {
- TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
-
- if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
- (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
- if (persistentTable.typeOfSmallest(useTimerTable.readyAddress()) == AccessType:Write) {
- trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ if (useTimerTable_in.isReady(clockEdge())) {
+ Addr readyAddress := useTimerTable.nextAddress();
+ TBE tbe := L1_TBEs.lookup(readyAddress);
+
+ if (persistentTable.isLocked(readyAddress) &&
+ (persistentTable.findSmallest(readyAddress) != machineID)) {
+ if (persistentTable.typeOfSmallest(readyAddress) == AccessType:Write) {
+ trigger(Event:Use_TimeoutStarverX, readyAddress,
+ getCacheEntry(readyAddress), tbe);
} else {
- trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ trigger(Event:Use_TimeoutStarverS, readyAddress,
+ getCacheEntry(readyAddress), tbe);
}
} else {
if (no_mig_atomic && IsAtomic(tbe)) {
- trigger(Event:Use_TimeoutNoStarvers_NoMig, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ trigger(Event:Use_TimeoutNoStarvers_NoMig, readyAddress,
+ getCacheEntry(readyAddress), tbe);
} else {
- trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ trigger(Event:Use_TimeoutNoStarvers, readyAddress,
+ getCacheEntry(readyAddress), tbe);
}
}
}
@@ -482,16 +485,17 @@ machine(L1Cache, "Token protocol")
// Reissue Timer
in_port(reissueTimerTable_in, Addr, reissueTimerTable, rank=4) {
- if (reissueTimerTable_in.isReady()) {
- trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
- getCacheEntry(reissueTimerTable.readyAddress()),
- L1_TBEs[reissueTimerTable.readyAddress()]);
+ Tick current_time := clockEdge();
+ if (reissueTimerTable_in.isReady(current_time)) {
+ Addr addr := reissueTimerTable.nextAddress();
+ trigger(Event:Request_Timeout, addr, getCacheEntry(addr),
+ L1_TBEs.lookup(addr));
}
}
// Persistent Network
in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache, rank=3) {
- if (persistentNetwork_in.isReady()) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
peek(persistentNetwork_in, PersistentMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
@@ -541,7 +545,7 @@ machine(L1Cache, "Token protocol")
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToL1Cache, rank=2) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
@@ -612,7 +616,7 @@ machine(L1Cache, "Token protocol")
// Request Network
in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
@@ -659,7 +663,7 @@ machine(L1Cache, "Token protocol")
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -792,7 +796,8 @@ machine(L1Cache, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
}
} else {
@@ -844,9 +849,11 @@ machine(L1Cache, "Token protocol")
// Set a wakeup timer
if (dynamic_timeout_enabled) {
- reissueTimerTable.set(address, (5 * averageLatencyEstimate()) / 4);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
} else {
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
}
}
@@ -911,7 +918,8 @@ machine(L1Cache, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
}
} else {
@@ -968,9 +976,11 @@ machine(L1Cache, "Token protocol")
// Set a wakeup timer
if (dynamic_timeout_enabled) {
- reissueTimerTable.set(address, (5 * averageLatencyEstimate()) / 4);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
} else {
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
}
}
}
@@ -1376,23 +1386,24 @@ machine(L1Cache, "Token protocol")
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
- persistentNetwork_in.dequeue();
+ persistentNetwork_in.dequeue(clockEdge());
}
action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
- useTimerTable.set(address, use_timeout_latency);
+ useTimerTable.set(
+ address, clockEdge() + cyclesToTicks(use_timeout_latency));
}
action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
index 52bd19bcc..2ab593394 100644
--- a/src/mem/protocol/MOESI_CMP_token-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
@@ -149,6 +149,7 @@ machine(L2Cache, "Token protocol")
PersistentTable persistentTable;
PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
+ Tick clockEdge();
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
@@ -326,7 +327,7 @@ machine(L2Cache, "Token protocol")
// Persistent Network
in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
- if (persistentNetwork_in.isReady()) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
peek(persistentNetwork_in, PersistentMsg) {
assert(in_msg.Destination.isElement(machineID));
@@ -366,7 +367,7 @@ machine(L2Cache, "Token protocol")
// Request Network
in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
@@ -389,7 +390,7 @@ machine(L2Cache, "Token protocol")
}
in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
- if (L1requestNetwork_in.isReady()) {
+ if (L1requestNetwork_in.isReady(clockEdge())) {
peek(L1requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -413,7 +414,7 @@ machine(L2Cache, "Token protocol")
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -870,19 +871,19 @@ machine(L2Cache, "Token protocol")
}
action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
- persistentNetwork_in.dequeue();
+ persistentNetwork_in.dequeue(clockEdge());
}
action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
- L1requestNetwork_in.dequeue();
+ L1requestNetwork_in.dequeue(clockEdge());
}
diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm
index ffef01eb0..63790531f 100644
--- a/src/mem/protocol/MOESI_CMP_token-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dir.sm
@@ -172,6 +172,8 @@ machine(Directory, "Token protocol")
bool starving, default="false";
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE b);
void unset_tbe();
@@ -276,7 +278,7 @@ machine(Directory, "Token protocol")
// ** IN_PORTS **
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
@@ -292,14 +294,15 @@ machine(Directory, "Token protocol")
// Reissue Timer
in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
- if (reissueTimerTable_in.isReady()) {
- trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
- TBEs[reissueTimerTable.readyAddress()]);
+ Tick current_time := clockEdge();
+ if (reissueTimerTable_in.isReady(current_time)) {
+ Addr addr := reissueTimerTable.nextAddress();
+ trigger(Event:Request_Timeout, addr, TBEs.lookup(addr));
}
}
in_port(responseNetwork_in, ResponseMsg, responseToDir) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (getDirectoryEntry(in_msg.addr).Tokens + in_msg.Tokens == max_tokens()) {
@@ -338,7 +341,7 @@ machine(Directory, "Token protocol")
}
in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
- if (persistentNetwork_in.isReady()) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
peek(persistentNetwork_in, PersistentMsg) {
assert(in_msg.Destination.isElement(machineID));
@@ -400,7 +403,7 @@ machine(Directory, "Token protocol")
}
in_port(requestNetwork_in, RequestMsg, requestToDir) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETS) {
@@ -415,7 +418,7 @@ machine(Directory, "Token protocol")
}
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, DMARequestMsg) {
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
@@ -490,7 +493,7 @@ machine(Directory, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(address, cyclesToTicks(reissue_wakeup_latency));
}
}
@@ -558,7 +561,7 @@ machine(Directory, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(address, cyclesToTicks(reissue_wakeup_latency));
}
}
@@ -752,35 +755,35 @@ machine(Directory, "Token protocol")
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(z_recycleRequest, "z", desc="Recycle the request queue") {
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
- responseNetwork_in.recycle();
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
- persistentNetwork_in.dequeue();
+ persistentNetwork_in.dequeue(clockEdge());
}
action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
- dmaRequestQueue_in.recycle();
+ dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
@@ -804,7 +807,7 @@ machine(Directory, "Token protocol")
//
if (reissueTimerTable.isSet(address)) {
reissueTimerTable.unset(address);
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(address, cyclesToTicks(fixed_timeout_latency));
}
}
@@ -812,7 +815,7 @@ machine(Directory, "Token protocol")
//
// currently only support a fixed timeout latency
//
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(address, cyclesToTicks(fixed_timeout_latency));
}
action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
diff --git a/src/mem/protocol/MOESI_CMP_token-dma.sm b/src/mem/protocol/MOESI_CMP_token-dma.sm
index 4bb80d4ba..efe3db3cd 100644
--- a/src/mem/protocol/MOESI_CMP_token-dma.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dma.sm
@@ -54,6 +54,8 @@ machine(DMA, "DMA Controller")
State cur_state;
+ Tick clockEdge();
+
State getState(Addr addr) {
return cur_state;
}
@@ -80,7 +82,7 @@ machine(DMA, "DMA Controller")
out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -94,7 +96,7 @@ machine(DMA, "DMA Controller")
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
@@ -150,11 +152,11 @@ machine(DMA, "DMA Controller")
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
diff --git a/src/mem/protocol/MOESI_hammer-cache.sm b/src/mem/protocol/MOESI_hammer-cache.sm
index 88b7308ed..5d2383541 100644
--- a/src/mem/protocol/MOESI_hammer-cache.sm
+++ b/src/mem/protocol/MOESI_hammer-cache.sm
@@ -181,6 +181,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
@@ -329,7 +330,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -352,7 +353,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
// Response Network
in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
- if (responseToCache_in.isReady()) {
+ if (responseToCache_in.isReady(clockEdge())) {
peek(responseToCache_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -377,7 +378,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
// Forward Network
in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
- if (forwardToCache_in.isReady()) {
+ if (forwardToCache_in.isReady(clockEdge())) {
peek(forwardToCache_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
@@ -421,7 +422,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@@ -950,15 +951,15 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
- forwardToCache_in.dequeue();
+ forwardToCache_in.dequeue(clockEdge());
}
action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
@@ -1017,7 +1018,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToCache_in.dequeue();
+ responseToCache_in.dequeue(clockEdge());
}
action(ll_L2toL1Transfer, "ll", desc="") {
diff --git a/src/mem/protocol/MOESI_hammer-dir.sm b/src/mem/protocol/MOESI_hammer-dir.sm
index 4948a8108..4f5b00658 100644
--- a/src/mem/protocol/MOESI_hammer-dir.sm
+++ b/src/mem/protocol/MOESI_hammer-dir.sm
@@ -184,6 +184,7 @@ machine(Directory, "AMD Hammer-like protocol")
bool isPresent(Addr);
}
+ Tick clockEdge();
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE a);
@@ -314,7 +315,7 @@ machine(Directory, "AMD Hammer-like protocol")
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -338,7 +339,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
- if (unblockNetwork_in.isReady()) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
peek(unblockNetwork_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -367,7 +368,7 @@ machine(Directory, "AMD Hammer-like protocol")
// Response Network
in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
- if (responseToDir_in.isReady()) {
+ if (responseToDir_in.isReady(clockEdge())) {
peek(responseToDir_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -390,7 +391,7 @@ machine(Directory, "AMD Hammer-like protocol")
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=2) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -407,7 +408,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
@@ -441,7 +442,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, DMARequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
TBE tbe := TBEs[in_msg.LineAddress];
@@ -682,7 +683,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToDir_in.dequeue();
+ responseToDir_in.dequeue(clockEdge());
}
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
@@ -1115,14 +1116,14 @@ machine(Directory, "AMD Hammer-like protocol")
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
peek(unblockNetwork_in, ResponseMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Sender);
}
- unblockNetwork_in.dequeue();
+ unblockNetwork_in.dequeue(clockEdge());
}
action(k_wakeUpDependents, "k", desc="wake-up dependents") {
@@ -1130,15 +1131,15 @@ machine(Directory, "AMD Hammer-like protocol")
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
diff --git a/src/mem/protocol/MOESI_hammer-dma.sm b/src/mem/protocol/MOESI_hammer-dma.sm
index 4691e2490..7157082c4 100644
--- a/src/mem/protocol/MOESI_hammer-dma.sm
+++ b/src/mem/protocol/MOESI_hammer-dma.sm
@@ -52,6 +52,8 @@ machine(DMA, "DMA Controller")
State cur_state;
+ Tick clockEdge();
+
State getState(Addr addr) {
return cur_state;
}
@@ -77,7 +79,7 @@ machine(DMA, "DMA Controller")
out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
@@ -91,7 +93,7 @@ machine(DMA, "DMA Controller")
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
@@ -147,11 +149,11 @@ machine(DMA, "DMA Controller")
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
diff --git a/src/mem/protocol/Network_test-cache.sm b/src/mem/protocol/Network_test-cache.sm
index 82829a6ea..dab8f1089 100644
--- a/src/mem/protocol/Network_test-cache.sm
+++ b/src/mem/protocol/Network_test-cache.sm
@@ -68,6 +68,7 @@ machine(L1Cache, "Network_test L1 Cache")
}
// FUNCTIONS
+ Tick clockEdge();
// cpu/testers/networktest/networktest.cc generates packets of the type
// ReadReq, INST_FETCH, and WriteReq.
@@ -129,7 +130,7 @@ machine(L1Cache, "Network_test L1 Cache")
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest) {
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, getCacheEntry(in_msg.LineAddress));
@@ -174,7 +175,7 @@ machine(L1Cache, "Network_test L1 Cache")
}
action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
diff --git a/src/mem/protocol/Network_test-dir.sm b/src/mem/protocol/Network_test-dir.sm
index d618e98ff..6bd6920b3 100644
--- a/src/mem/protocol/Network_test-dir.sm
+++ b/src/mem/protocol/Network_test-dir.sm
@@ -60,7 +60,9 @@ machine(Directory, "Network_test Directory")
DataBlock DataBlk, desc="data for the block";
}
- // ** OBJECTS **
+ // ** FUNCTIONS **
+ Tick clockEdge();
+
State getState(Addr addr) {
return State:I;
}
@@ -87,7 +89,7 @@ machine(Directory, "Network_test Directory")
// ** IN_PORTS **
in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Request, in_msg.addr);
@@ -98,7 +100,7 @@ machine(Directory, "Network_test Directory")
}
}
in_port(forwardQueue_in, RequestMsg, forwardToDir) {
- if (forwardQueue_in.isReady()) {
+ if (forwardQueue_in.isReady(clockEdge())) {
peek(forwardQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Forward, in_msg.addr);
@@ -109,7 +111,7 @@ machine(Directory, "Network_test Directory")
}
}
in_port(responseQueue_in, RequestMsg, responseToDir) {
- if (responseQueue_in.isReady()) {
+ if (responseQueue_in.isReady(clockEdge())) {
peek(responseQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Response, in_msg.addr);
@@ -123,15 +125,15 @@ machine(Directory, "Network_test Directory")
// Actions
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(f_popIncomingForwardQueue, "f", desc="Pop incoming forward queue") {
- forwardQueue_in.dequeue();
+ forwardQueue_in.dequeue(clockEdge());
}
action(r_popIncomingResponseQueue, "r", desc="Pop incoming response queue") {
- responseQueue_in.dequeue();
+ responseQueue_in.dequeue(clockEdge());
}
// TRANSITIONS
diff --git a/src/mem/protocol/RubySlicc_Defines.sm b/src/mem/protocol/RubySlicc_Defines.sm
index d4f7fa58f..eb235f8f3 100644
--- a/src/mem/protocol/RubySlicc_Defines.sm
+++ b/src/mem/protocol/RubySlicc_Defines.sm
@@ -1,4 +1,3 @@
-
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -31,6 +30,7 @@
NodeID version;
MachineID machineID;
NodeID clusterID;
+Cycles recycle_latency;
// Functions implemented in the AbstractController class for
// making timing access to the memory maintained by the
diff --git a/src/mem/protocol/RubySlicc_Exports.sm b/src/mem/protocol/RubySlicc_Exports.sm
index eeec185fc..7c2069203 100644
--- a/src/mem/protocol/RubySlicc_Exports.sm
+++ b/src/mem/protocol/RubySlicc_Exports.sm
@@ -37,6 +37,7 @@ external_type(PacketPtr, primitive="yes");
external_type(Packet, primitive="yes");
external_type(Addr, primitive="yes");
external_type(Cycles, primitive="yes", default="Cycles(0)");
+external_type(Tick, primitive="yes", default="0");
structure(DataBlock, external = "yes", desc="..."){
void clear();
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
index f464b3c7d..a8bf93bcc 100644
--- a/src/mem/protocol/RubySlicc_Types.sm
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -41,9 +41,9 @@ external_type(OutPort, primitive="yes");
external_type(Scalar, primitive="yes");
structure(InPort, external = "yes", primitive="yes") {
- bool isReady();
- Cycles dequeue();
- void recycle();
+ bool isReady(Tick current_time);
+ Tick dequeue(Tick current_time);
+ void recycle(Tick current_time, Tick recycle_latency);
bool isEmpty();
bool isStallMapEmpty();
int getStallMapSize();
@@ -179,9 +179,9 @@ structure (DMASequencer, external = "yes") {
}
structure (TimerTable, inport="yes", external = "yes") {
- bool isReady();
- Addr readyAddress();
- void set(Addr, Cycles);
+ bool isReady(Tick);
+ Addr nextAddress();
+ void set(Addr, Tick);
void unset(Addr);
bool isSet(Addr);
}
diff --git a/src/mem/ruby/network/MessageBuffer.cc b/src/mem/ruby/network/MessageBuffer.cc
index b07bdbdca..35850f61e 100644
--- a/src/mem/ruby/network/MessageBuffer.cc
+++ b/src/mem/ruby/network/MessageBuffer.cc
@@ -40,7 +40,7 @@ using namespace std;
using m5::stl_helpers::operator<<;
MessageBuffer::MessageBuffer(const Params *p)
- : SimObject(p), m_recycle_latency(p->recycle_latency),
+ : SimObject(p),
m_max_size(p->buffer_size), m_time_last_time_size_checked(0),
m_time_last_time_enqueue(0), m_time_last_time_pop(0),
m_last_arrival_time(0), m_strict_fifo(p->ordered),
@@ -48,9 +48,6 @@ MessageBuffer::MessageBuffer(const Params *p)
{
m_msg_counter = 0;
m_consumer = NULL;
- m_sender = NULL;
- m_receiver = NULL;
-
m_size_last_time_size_checked = 0;
m_size_at_cycle_start = 0;
m_msgs_this_cycle = 0;
@@ -63,10 +60,10 @@ MessageBuffer::MessageBuffer(const Params *p)
}
unsigned int
-MessageBuffer::getSize()
+MessageBuffer::getSize(Tick curTime)
{
- if (m_time_last_time_size_checked != m_receiver->curCycle()) {
- m_time_last_time_size_checked = m_receiver->curCycle();
+ if (m_time_last_time_size_checked != curTime) {
+ m_time_last_time_size_checked = curTime;
m_size_last_time_size_checked = m_prio_heap.size();
}
@@ -74,7 +71,7 @@ MessageBuffer::getSize()
}
bool
-MessageBuffer::areNSlotsAvailable(unsigned int n)
+MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
{
// fast path when message buffers have infinite size
@@ -88,11 +85,11 @@ MessageBuffer::areNSlotsAvailable(unsigned int n)
// size immediately
unsigned int current_size = 0;
- if (m_time_last_time_pop < m_sender->clockEdge()) {
+ if (m_time_last_time_pop < current_time) {
// no pops this cycle - heap size is correct
current_size = m_prio_heap.size();
} else {
- if (m_time_last_time_enqueue < m_sender->curCycle()) {
+ if (m_time_last_time_enqueue < current_time) {
// no enqueues this cycle - m_size_at_cycle_start is correct
current_size = m_size_at_cycle_start;
} else {
@@ -118,8 +115,6 @@ const Message*
MessageBuffer::peek() const
{
DPRINTF(RubyQueue, "Peeking at head of queue.\n");
- assert(isReady());
-
const Message* msg_ptr = m_prio_heap.front().get();
assert(msg_ptr);
@@ -128,24 +123,24 @@ MessageBuffer::peek() const
}
// FIXME - move me somewhere else
-Cycles
+Tick
random_time()
{
- Cycles time(1);
- time += Cycles(random_mt.random(0, 3)); // [0...3]
+ Tick time = 1;
+ time += random_mt.random(0, 3); // [0...3]
if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
- time += Cycles(100 + random_mt.random(1, 15)); // 100 + [1...15]
+ time += 100 + random_mt.random(1, 15); // 100 + [1...15]
}
return time;
}
void
-MessageBuffer::enqueue(MsgPtr message, Cycles delta)
+MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta)
{
// record current time incase we have a pop that also adjusts my size
- if (m_time_last_time_enqueue < m_sender->curCycle()) {
+ if (m_time_last_time_enqueue < current_time) {
m_msgs_this_cycle = 0; // first msg this cycle
- m_time_last_time_enqueue = m_sender->curCycle();
+ m_time_last_time_enqueue = current_time;
}
m_msg_counter++;
@@ -154,23 +149,20 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta)
// Calculate the arrival time of the message, that is, the first
// cycle the message can be dequeued.
assert(delta > 0);
- Tick current_time = m_sender->clockEdge();
Tick arrival_time = 0;
if (!RubySystem::getRandomization() || !m_randomization) {
// No randomization
- arrival_time = current_time + delta * m_sender->clockPeriod();
+ arrival_time = current_time + delta;
} else {
// Randomization - ignore delta
if (m_strict_fifo) {
if (m_last_arrival_time < current_time) {
m_last_arrival_time = current_time;
}
- arrival_time = m_last_arrival_time +
- random_time() * m_sender->clockPeriod();
+ arrival_time = m_last_arrival_time + random_time();
} else {
- arrival_time = current_time +
- random_time() * m_sender->clockPeriod();
+ arrival_time = current_time + random_time();
}
}
@@ -180,9 +172,8 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta)
if (arrival_time < m_last_arrival_time) {
panic("FIFO ordering violated: %s name: %s current time: %d "
"delta: %d arrival_time: %d last arrival_time: %d\n",
- *this, name(), current_time,
- delta * m_sender->clockPeriod(),
- arrival_time, m_last_arrival_time);
+ *this, name(), current_time, delta, arrival_time,
+ m_last_arrival_time);
}
}
@@ -195,10 +186,10 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta)
Message* msg_ptr = message.get();
assert(msg_ptr != NULL);
- assert(m_sender->clockEdge() >= msg_ptr->getLastEnqueueTime() &&
+ assert(current_time >= msg_ptr->getLastEnqueueTime() &&
"ensure we aren't dequeued early");
- msg_ptr->updateDelayedTicks(m_sender->clockEdge());
+ msg_ptr->updateDelayedTicks(current_time);
msg_ptr->setLastEnqueueTime(arrival_time);
msg_ptr->setMsgCounter(m_msg_counter);
@@ -215,32 +206,30 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta)
m_consumer->storeEventInfo(m_vnet_id);
}
-Cycles
-MessageBuffer::dequeue()
+Tick
+MessageBuffer::dequeue(Tick current_time)
{
DPRINTF(RubyQueue, "Popping\n");
- assert(isReady());
+ assert(isReady(current_time));
// get MsgPtr of the message about to be dequeued
MsgPtr message = m_prio_heap.front();
// get the delay cycles
- message->updateDelayedTicks(m_receiver->clockEdge());
- Cycles delayCycles =
- m_receiver->ticksToCycles(message->getDelayedTicks());
+ message->updateDelayedTicks(current_time);
+ Tick delay = message->getDelayedTicks();
// record previous size and time so the current buffer size isn't
// adjusted until schd cycle
- if (m_time_last_time_pop < m_receiver->clockEdge()) {
+ if (m_time_last_time_pop < current_time) {
m_size_at_cycle_start = m_prio_heap.size();
- m_time_last_time_pop = m_receiver->clockEdge();
+ m_time_last_time_pop = current_time;
}
- pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
- greater<MsgPtr>());
+ pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
m_prio_heap.pop_back();
- return delayCycles;
+ return delay;
}
void
@@ -249,25 +238,26 @@ MessageBuffer::clear()
m_prio_heap.clear();
m_msg_counter = 0;
- m_time_last_time_enqueue = Cycles(0);
+ m_time_last_time_enqueue = 0;
m_time_last_time_pop = 0;
m_size_at_cycle_start = 0;
m_msgs_this_cycle = 0;
}
void
-MessageBuffer::recycle()
+MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
{
DPRINTF(RubyQueue, "Recycling.\n");
- assert(isReady());
+ assert(isReady(current_time));
MsgPtr node = m_prio_heap.front();
pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
- node->setLastEnqueueTime(m_receiver->clockEdge(m_recycle_latency));
+ Tick future_time = current_time + recycle_latency;
+ node->setLastEnqueueTime(future_time);
+
m_prio_heap.back() = node;
push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
- m_consumer->
- scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency));
+ m_consumer->scheduleEventAbsolute(future_time);
}
void
@@ -289,11 +279,10 @@ MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
}
void
-MessageBuffer::reanalyzeMessages(Addr addr)
+MessageBuffer::reanalyzeMessages(Addr addr, Tick current_time)
{
DPRINTF(RubyQueue, "ReanalyzeMessages %s\n", addr);
assert(m_stall_msg_map.count(addr) > 0);
- Tick curTick = m_receiver->clockEdge();
//
// Put all stalled messages associated with this address back on the
@@ -301,15 +290,14 @@ MessageBuffer::reanalyzeMessages(Addr addr)
// scheduled for the current cycle so that the previously stalled messages
// will be observed before any younger messages that may arrive this cycle
//
- reanalyzeList(m_stall_msg_map[addr], curTick);
+ reanalyzeList(m_stall_msg_map[addr], current_time);
m_stall_msg_map.erase(addr);
}
void
-MessageBuffer::reanalyzeAllMessages()
+MessageBuffer::reanalyzeAllMessages(Tick current_time)
{
DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
- Tick curTick = m_receiver->clockEdge();
//
// Put all stalled messages associated with this address back on the
@@ -319,20 +307,20 @@ MessageBuffer::reanalyzeAllMessages()
//
for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
map_iter != m_stall_msg_map.end(); ++map_iter) {
- reanalyzeList(map_iter->second, curTick);
+ reanalyzeList(map_iter->second, current_time);
}
m_stall_msg_map.clear();
}
void
-MessageBuffer::stallMessage(Addr addr)
+MessageBuffer::stallMessage(Addr addr, Tick current_time)
{
DPRINTF(RubyQueue, "Stalling due to %s\n", addr);
- assert(isReady());
+ assert(isReady(current_time));
assert(getOffset(addr) == 0);
MsgPtr message = m_prio_heap.front();
- dequeue();
+ dequeue(current_time);
//
// Note: no event is scheduled to analyze the map at a later time.
@@ -356,10 +344,10 @@ MessageBuffer::print(ostream& out) const
}
bool
-MessageBuffer::isReady() const
+MessageBuffer::isReady(Tick current_time) const
{
return ((m_prio_heap.size() > 0) &&
- (m_prio_heap.front()->getLastEnqueueTime() <= m_receiver->clockEdge()));
+ (m_prio_heap.front()->getLastEnqueueTime() <= current_time));
}
bool
diff --git a/src/mem/ruby/network/MessageBuffer.hh b/src/mem/ruby/network/MessageBuffer.hh
index 4209aea0f..4fdf4978d 100644
--- a/src/mem/ruby/network/MessageBuffer.hh
+++ b/src/mem/ruby/network/MessageBuffer.hh
@@ -55,24 +55,24 @@ class MessageBuffer : public SimObject
typedef MessageBufferParams Params;
MessageBuffer(const Params *p);
- void reanalyzeMessages(Addr addr);
- void reanalyzeAllMessages();
- void stallMessage(Addr addr);
+ void reanalyzeMessages(Addr addr, Tick current_time);
+ void reanalyzeAllMessages(Tick current_time);
+ void stallMessage(Addr addr, Tick current_time);
// TRUE if head of queue timestamp <= SystemTime
- bool isReady() const;
+ bool isReady(Tick current_time) const;
void
- delayHead()
+ delayHead(Tick current_time, Tick delta)
{
MsgPtr m = m_prio_heap.front();
std::pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
std::greater<MsgPtr>());
m_prio_heap.pop_back();
- enqueue(m, Cycles(1));
+ enqueue(m, current_time, delta);
}
- bool areNSlotsAvailable(unsigned int n);
+ bool areNSlotsAvailable(unsigned int n, Tick curTime);
int getPriority() { return m_priority_rank; }
void setPriority(int rank) { m_priority_rank = rank; }
void setConsumer(Consumer* consumer)
@@ -86,20 +86,6 @@ class MessageBuffer : public SimObject
m_consumer = consumer;
}
- void setSender(ClockedObject* obj)
- {
- DPRINTF(RubyQueue, "Setting sender: %s\n", obj->name());
- assert(m_sender == NULL || m_sender == obj);
- m_sender = obj;
- }
-
- void setReceiver(ClockedObject* obj)
- {
- DPRINTF(RubyQueue, "Setting receiver: %s\n", obj->name());
- assert(m_receiver == NULL || m_receiver == obj);
- m_receiver = obj;
- }
-
Consumer* getConsumer() { return m_consumer; }
bool getOrdered() { return m_strict_fifo; }
@@ -108,26 +94,20 @@ class MessageBuffer : public SimObject
//! message queue. The function assumes that the queue is nonempty.
const Message* peek() const;
- const MsgPtr&
- peekMsgPtr() const
- {
- assert(isReady());
- return m_prio_heap.front();
- }
+ const MsgPtr &peekMsgPtr() const { return m_prio_heap.front(); }
- void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); }
- void enqueue(MsgPtr message, Cycles delta);
+ void enqueue(MsgPtr message, Tick curTime, Tick delta);
//! Updates the delay cycles of the message at the head of the queue,
//! removes it from the queue and returns its total delay.
- Cycles dequeue();
+ Tick dequeue(Tick current_time);
- void recycle();
+ void recycle(Tick current_time, Tick recycle_latency);
bool isEmpty() const { return m_prio_heap.size() == 0; }
bool isStallMapEmpty() { return m_stall_msg_map.size() == 0; }
unsigned int getStallMapSize() { return m_stall_msg_map.size(); }
- unsigned int getSize();
+ unsigned int getSize(Tick curTime);
void clear();
void print(std::ostream& out) const;
@@ -148,17 +128,10 @@ class MessageBuffer : public SimObject
uint32_t functionalWrite(Packet *pkt);
private:
- //added by SS
- const Cycles m_recycle_latency;
-
void reanalyzeList(std::list<MsgPtr> &, Tick);
private:
// Data Members (m_ prefix)
- //! The two ends of the buffer.
- ClockedObject* m_sender;
- ClockedObject* m_receiver;
-
//! Consumer to signal a wakeup(), can be NULL
Consumer* m_consumer;
std::vector<MsgPtr> m_prio_heap;
@@ -170,12 +143,12 @@ class MessageBuffer : public SimObject
StallMsgMapType m_stall_msg_map;
const unsigned int m_max_size;
- Cycles m_time_last_time_size_checked;
+ Tick m_time_last_time_size_checked;
unsigned int m_size_last_time_size_checked;
// variables used so enqueues appear to happen immediately, while
// pop happen the next cycle
- Cycles m_time_last_time_enqueue;
+ Tick m_time_last_time_enqueue;
Tick m_time_last_time_pop;
Tick m_last_arrival_time;
@@ -193,7 +166,7 @@ class MessageBuffer : public SimObject
int m_vnet_id;
};
-Cycles random_time();
+Tick random_time();
inline std::ostream&
operator<<(std::ostream& out, const MessageBuffer& obj)
diff --git a/src/mem/ruby/network/MessageBuffer.py b/src/mem/ruby/network/MessageBuffer.py
index 88c528e30..d8a028532 100644
--- a/src/mem/ruby/network/MessageBuffer.py
+++ b/src/mem/ruby/network/MessageBuffer.py
@@ -37,7 +37,6 @@ class MessageBuffer(SimObject):
ordered = Param.Bool(False, "Whether the buffer is ordered")
buffer_size = Param.Unsigned(0, "Maximum number of entries to buffer \
(0 allows infinite entries)")
- recycle_latency = Param.Cycles(Parent.recycle_latency, "")
randomization = Param.Bool(False, "")
master = MasterPort("Master port to MessageBuffer receiver")
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
index c7bd6178a..e350eba6b 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
@@ -115,13 +115,6 @@ NetworkInterface_d::addNode(vector<MessageBuffer *>& in,
for (auto& it : in) {
if (it != nullptr) {
it->setConsumer(this);
- it->setReceiver(this);
- }
- }
-
- for (auto& it : out) {
- if (it != nullptr) {
- it->setSender(this);
}
}
}
@@ -222,6 +215,7 @@ NetworkInterface_d::wakeup()
DPRINTF(RubyNetwork, "m_id: %d woke up at time: %lld", m_id, curCycle());
MsgPtr msg_ptr;
+ Tick curTime = clockEdge();
// Checking for messages coming from the protocol
// can pick up a message/cycle for each virtual net
@@ -231,10 +225,10 @@ NetworkInterface_d::wakeup()
continue;
}
- while (b->isReady()) { // Is there a message waiting
+ while (b->isReady(curTime)) { // Is there a message waiting
msg_ptr = b->peekMsgPtr();
if (flitisizeMessage(msg_ptr, vnet)) {
- b->dequeue();
+ b->dequeue(curTime);
} else {
break;
}
@@ -253,7 +247,7 @@ NetworkInterface_d::wakeup()
free_signal = true;
outNode_ptr[t_flit->get_vnet()]->enqueue(
- t_flit->get_msg_ptr(), Cycles(1));
+ t_flit->get_msg_ptr(), curTime, cyclesToTicks(Cycles(1)));
}
// Simply send a credit back since we are not buffering
// this flit in the NI
@@ -363,7 +357,7 @@ NetworkInterface_d::checkReschedule()
continue;
}
- while (it->isReady()) { // Is there a message waiting
+ while (it->isReady(clockEdge())) { // Is there a message waiting
scheduleEvent(Cycles(1));
return;
}
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
index d834ea1a3..3d75ef8c2 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
@@ -99,13 +99,6 @@ NetworkInterface::addNode(vector<MessageBuffer*>& in,
for (auto& it: in) {
if (it != nullptr) {
it->setConsumer(this);
- it->setReceiver(this);
- }
- }
-
- for (auto& it : out) {
- if (it != nullptr) {
- it->setSender(this);
}
}
}
@@ -250,10 +243,10 @@ NetworkInterface::wakeup()
continue;
}
- while (b->isReady()) { // Is there a message waiting
+ while (b->isReady(clockEdge())) { // Is there a message waiting
msg_ptr = b->peekMsgPtr();
if (flitisizeMessage(msg_ptr, vnet)) {
- b->dequeue();
+ b->dequeue(clockEdge());
} else {
break;
}
@@ -272,7 +265,7 @@ NetworkInterface::wakeup()
m_id, curCycle());
outNode_ptr[t_flit->get_vnet()]->enqueue(
- t_flit->get_msg_ptr(), Cycles(1));
+ t_flit->get_msg_ptr(), clockEdge(), cyclesToTicks(Cycles(1)));
// signal the upstream router that this vc can be freed now
inNetLink->release_vc_link(t_flit->get_vc(),
@@ -334,7 +327,7 @@ NetworkInterface::checkReschedule()
continue;
}
- while (it->isReady()) { // Is there a message waiting
+ while (it->isReady(clockEdge())) { // Is there a message waiting
scheduleEvent(Cycles(1));
return;
}
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc
index 697357ccb..301d453c5 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -144,8 +144,9 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
// temporary vectors to store the routing results
vector<LinkID> output_links;
vector<NetDest> output_link_destinations;
+ Tick current_time = m_switch->clockEdge();
- while (buffer->isReady()) {
+ while (buffer->isReady(current_time)) {
DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
// Peek at message
@@ -176,7 +177,7 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
for (int out = 0; out < m_out.size(); out++) {
int out_queue_length = 0;
for (int v = 0; v < m_virtual_networks; v++) {
- out_queue_length += m_out[out][v]->getSize();
+ out_queue_length += m_out[out][v]->getSize(current_time);
}
int value =
(out_queue_length << 8) |
@@ -220,7 +221,7 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
for (int i = 0; i < output_links.size(); i++) {
int outgoing = output_links[i];
- if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
+ if (!m_out[outgoing][vnet]->areNSlotsAvailable(1, current_time))
enough = false;
DPRINTF(RubyNetwork, "Checking if node is blocked ..."
@@ -251,7 +252,7 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
}
// Dequeue msg
- buffer->dequeue();
+ buffer->dequeue(current_time);
m_pending_message_count[vnet]--;
// Enqueue it - for all outgoing queues
@@ -273,7 +274,8 @@ PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
"inport[%d][%d] to outport [%d][%d].\n",
incoming, vnet, outgoing, vnet);
- m_out[outgoing][vnet]->enqueue(msg_ptr);
+ m_out[outgoing][vnet]->enqueue(msg_ptr, current_time,
+ m_switch->cyclesToTicks(Cycles(1)));
}
}
}
diff --git a/src/mem/ruby/network/simple/SimpleNetwork.py b/src/mem/ruby/network/simple/SimpleNetwork.py
index f4ec440a3..87de0fb46 100644
--- a/src/mem/ruby/network/simple/SimpleNetwork.py
+++ b/src/mem/ruby/network/simple/SimpleNetwork.py
@@ -41,9 +41,6 @@ class SimpleNetwork(RubyNetwork):
endpoint_bandwidth = Param.Int(1000, "bandwidth adjustment factor");
adaptive_routing = Param.Bool(False, "enable adaptive routing");
int_link_buffers = VectorParam.MessageBuffer("Buffers for int_links")
- # int_links do not recycle buffers, so this parameter is not used.
- # TODO: Move recycle_latency out of MessageBuffers and into controllers
- recycle_latency = Param.Cycles(0, "")
def setup_buffers(self):
# Note that all SimpleNetwork MessageBuffers are currently ordered
@@ -82,6 +79,3 @@ class Switch(BasicRouter):
virt_nets = Param.Int(Parent.number_of_virtual_networks,
"number of virtual networks")
port_buffers = VectorParam.MessageBuffer("Port buffers")
- # Ports do not recycle buffers, so this parameter is not used.
- # TODO: Move recycle_latency out of MessageBuffers and into controllers
- recycle_latency = Param.Cycles(0, "")
diff --git a/src/mem/ruby/network/simple/Switch.cc b/src/mem/ruby/network/simple/Switch.cc
index b9d0b8010..0951ef138 100644
--- a/src/mem/ruby/network/simple/Switch.cc
+++ b/src/mem/ruby/network/simple/Switch.cc
@@ -69,12 +69,6 @@ void
Switch::addInPort(const vector<MessageBuffer*>& in)
{
m_perfect_switch->addInPort(in);
-
- for (auto& it : in) {
- if (it != nullptr) {
- it->setReceiver(this);
- }
- }
}
void
@@ -95,17 +89,10 @@ Switch::addOutPort(const vector<MessageBuffer*>& out,
vector<MessageBuffer*> intermediateBuffers;
for (int i = 0; i < out.size(); ++i) {
- if (out[i] != nullptr) {
- out[i]->setSender(this);
- }
-
assert(m_num_connected_buffers < m_port_buffers.size());
MessageBuffer* buffer_ptr = m_port_buffers[m_num_connected_buffers];
m_num_connected_buffers++;
intermediateBuffers.push_back(buffer_ptr);
-
- buffer_ptr->setSender(this);
- buffer_ptr->setReceiver(this);
}
// Hook the queues to the PerfectSwitch
diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc
index 01d1f6fbe..3863ab944 100644
--- a/src/mem/ruby/network/simple/Throttle.cc
+++ b/src/mem/ruby/network/simple/Throttle.cc
@@ -94,14 +94,16 @@ Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup,
if (out == nullptr || in == nullptr) {
return;
}
- assert(m_units_remaining[vnet] >= 0);
- while (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
- out->areNSlotsAvailable(1)) {
+ assert(m_units_remaining[vnet] >= 0);
+ Tick current_time = m_switch->clockEdge();
+ while (bw_remaining > 0 && (in->isReady(current_time) ||
+ m_units_remaining[vnet] > 0) &&
+ out->areNSlotsAvailable(1, current_time)) {
// See if we are done transferring the previous message on
// this virtual network
- if (m_units_remaining[vnet] == 0 && in->isReady()) {
+ if (m_units_remaining[vnet] == 0 && in->isReady(current_time)) {
// Find the size of the message we are moving
MsgPtr msg_ptr = in->peekMsgPtr();
Message *net_msg_ptr = msg_ptr.get();
@@ -114,8 +116,9 @@ Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup,
m_ruby_system->curCycle());
// Move the message
- in->dequeue();
- out->enqueue(msg_ptr, m_link_latency);
+ in->dequeue(current_time);
+ out->enqueue(msg_ptr, current_time,
+ m_switch->cyclesToTicks(m_link_latency));
// Count the message
m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
@@ -128,8 +131,9 @@ Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup,
bw_remaining = max(0, -diff);
}
- if (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
- !out->areNSlotsAvailable(1)) {
+ if (bw_remaining > 0 && (in->isReady(current_time) ||
+ m_units_remaining[vnet] > 0) &&
+ !out->areNSlotsAvailable(1, current_time)) {
DPRINTF(RubyNetwork, "vnet: %d", vnet);
// schedule me to wakeup again because I'm waiting for my
diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc
index 5bd38195a..be6438711 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.cc
+++ b/src/mem/ruby/slicc_interface/AbstractController.cc
@@ -60,9 +60,6 @@ AbstractController::init()
m_delayVCHistogram.push_back(new Stats::Histogram());
m_delayVCHistogram[i]->init(10);
}
- if (getMemoryQueue()) {
- getMemoryQueue()->setSender(this);
- }
}
void
@@ -118,7 +115,8 @@ AbstractController::wakeUpBuffers(Addr addr)
in_port_rank >= 0;
in_port_rank--) {
if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
- (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
+ (*(m_waiting_buffers[addr]))[in_port_rank]->
+ reanalyzeMessages(addr, clockEdge());
}
}
delete m_waiting_buffers[addr];
@@ -138,7 +136,8 @@ AbstractController::wakeUpAllBuffers(Addr addr)
in_port_rank >= 0;
in_port_rank--) {
if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
- (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
+ (*(m_waiting_buffers[addr]))[in_port_rank]->
+ reanalyzeMessages(addr, clockEdge());
}
}
delete m_waiting_buffers[addr];
@@ -168,7 +167,7 @@ AbstractController::wakeUpAllBuffers()
//
if (*vec_iter != NULL &&
(wokeUpMsgBufs.count(*vec_iter) == 0)) {
- (*vec_iter)->reanalyzeAllMessages();
+ (*vec_iter)->reanalyzeAllMessages(clockEdge());
wokeUpMsgBufs.insert(*vec_iter);
}
}
@@ -328,7 +327,7 @@ AbstractController::recvTimingResp(PacketPtr pkt)
panic("Incorrect packet type received from memory controller!");
}
- getMemoryQueue()->enqueue(msg);
+ getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
delete pkt;
}
diff --git a/src/mem/ruby/structures/TBETable.hh b/src/mem/ruby/structures/TBETable.hh
index cbc51dae5..4a24a5b13 100644
--- a/src/mem/ruby/structures/TBETable.hh
+++ b/src/mem/ruby/structures/TBETable.hh
@@ -47,12 +47,12 @@ class TBETable
void allocate(Addr address);
void deallocate(Addr address);
bool
- areNSlotsAvailable(int n) const
+ areNSlotsAvailable(int n, Tick current_time) const
{
return (m_number_of_TBEs - m_map.size()) >= n;
}
- ENTRY* lookup(Addr address);
+ ENTRY *lookup(Addr address);
// Print cache contents
void print(std::ostream& out) const;
diff --git a/src/mem/ruby/structures/TimerTable.cc b/src/mem/ruby/structures/TimerTable.cc
index 17dac6fc0..4809c8a47 100644
--- a/src/mem/ruby/structures/TimerTable.cc
+++ b/src/mem/ruby/structures/TimerTable.cc
@@ -34,14 +34,12 @@ TimerTable::TimerTable()
: m_next_time(0)
{
m_consumer_ptr = NULL;
- m_clockobj_ptr = NULL;
-
m_next_valid = false;
m_next_address = 0;
}
bool
-TimerTable::isReady() const
+TimerTable::isReady(Tick curTime) const
{
if (m_map.empty())
return false;
@@ -50,14 +48,12 @@ TimerTable::isReady() const
updateNext();
}
assert(m_next_valid);
- return (m_clockobj_ptr->curCycle() >= m_next_time);
+ return (curTime >= m_next_time);
}
Addr
-TimerTable::readyAddress() const
+TimerTable::nextAddress() const
{
- assert(isReady());
-
if (!m_next_valid) {
updateNext();
}
@@ -66,17 +62,14 @@ TimerTable::readyAddress() const
}
void
-TimerTable::set(Addr address, Cycles relative_latency)
+TimerTable::set(Addr address, Tick ready_time)
{
assert(address == makeLineAddress(address));
- assert(relative_latency > 0);
assert(!m_map.count(address));
- Cycles ready_time = m_clockobj_ptr->curCycle() + relative_latency;
m_map[address] = ready_time;
assert(m_consumer_ptr != NULL);
- m_consumer_ptr->
- scheduleEventAbsolute(m_clockobj_ptr->clockPeriod() * ready_time);
+ m_consumer_ptr->scheduleEventAbsolute(ready_time);
m_next_valid = false;
// Don't always recalculate the next ready address
diff --git a/src/mem/ruby/structures/TimerTable.hh b/src/mem/ruby/structures/TimerTable.hh
index 606201eb4..9efe7ca04 100644
--- a/src/mem/ruby/structures/TimerTable.hh
+++ b/src/mem/ruby/structures/TimerTable.hh
@@ -49,25 +49,16 @@ class TimerTable
m_consumer_ptr = consumer_ptr;
}
- void setClockObj(ClockedObject* obj)
- {
- assert(m_clockobj_ptr == NULL);
- m_clockobj_ptr = obj;
- }
-
void
setDescription(const std::string& name)
{
m_name = name;
}
- bool isReady() const;
- Addr readyAddress() const;
+ bool isReady(Tick curTime) const;
+ Addr nextAddress() const;
bool isSet(Addr address) const { return !!m_map.count(address); }
- void set(Addr address, Cycles relative_latency);
- void set(Addr address, uint64_t relative_latency)
- { set(address, Cycles(relative_latency)); }
-
+ void set(Addr address, Tick ready_time);
void unset(Addr address);
void print(std::ostream& out) const;
@@ -82,14 +73,12 @@ class TimerTable
// use a std::map for the address map as this container is sorted
// and ensures a well-defined iteration order
- typedef std::map<Addr, Cycles> AddressMap;
+ typedef std::map<Addr, Tick> AddressMap;
AddressMap m_map;
mutable bool m_next_valid;
- mutable Cycles m_next_time; // Only valid if m_next_valid is true
+ mutable Tick m_next_time; // Only valid if m_next_valid is true
mutable Addr m_next_address; // Only valid if m_next_valid is true
- //! Object used for querying time.
- ClockedObject* m_clockobj_ptr;
//! Consumer to signal a wakeup()
Consumer* m_consumer_ptr;
diff --git a/src/mem/ruby/system/DMASequencer.cc b/src/mem/ruby/system/DMASequencer.cc
index 85b476cfd..3c895e627 100644
--- a/src/mem/ruby/system/DMASequencer.cc
+++ b/src/mem/ruby/system/DMASequencer.cc
@@ -54,7 +54,6 @@ DMASequencer::init()
MemObject::init();
assert(m_controller != NULL);
m_mandatory_q_ptr = m_controller->getMandatoryQueue();
- m_mandatory_q_ptr->setSender(this);
m_is_busy = false;
m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
@@ -256,7 +255,7 @@ DMASequencer::makeRequest(PacketPtr pkt)
}
assert(m_mandatory_q_ptr != NULL);
- m_mandatory_q_ptr->enqueue(msg);
+ m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
active_request.bytes_issued += msg->getLen();
return RequestStatus_Issued;
@@ -302,7 +301,7 @@ DMASequencer::issueNext()
}
assert(m_mandatory_q_ptr != NULL);
- m_mandatory_q_ptr->enqueue(msg);
+ m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
active_request.bytes_issued += msg->getLen();
DPRINTF(RubyDma,
"DMA request bytes issued %d, bytes completed %d, total len %d\n",
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
index c13aed97e..e03d23774 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -81,7 +81,6 @@ RubyPort::init()
{
assert(m_controller != NULL);
m_mandatory_q_ptr = m_controller->getMandatoryQueue();
- m_mandatory_q_ptr->setSender(this);
}
BaseMasterPort &
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 186e62d55..815e270b6 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -629,7 +629,7 @@ Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
assert(latency > 0);
assert(m_mandatory_q_ptr != NULL);
- m_mandatory_q_ptr->enqueue(msg, latency);
+ m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(latency));
}
template <class KEY, class VALUE>
diff --git a/src/mem/slicc/ast/EnqueueStatementAST.py b/src/mem/slicc/ast/EnqueueStatementAST.py
index 930540494..556643e4e 100644
--- a/src/mem/slicc/ast/EnqueueStatementAST.py
+++ b/src/mem/slicc/ast/EnqueueStatementAST.py
@@ -65,9 +65,10 @@ class EnqueueStatementAST(StatementAST):
if self.latexpr != None:
ret_type, rcode = self.latexpr.inline(True)
code("(${{self.queue_name.var.code}}).enqueue(" \
- "out_msg, Cycles($rcode));")
+ "out_msg, clockEdge(), cyclesToTicks(Cycles($rcode)));")
else:
- code("(${{self.queue_name.var.code}}).enqueue(out_msg);")
+ code("(${{self.queue_name.var.code}}).enqueue(out_msg, "\
+ "clockEdge(), cyclesToTicks(Cycles(1)));")
# End scope
self.symtab.popFrame()
diff --git a/src/mem/slicc/ast/ObjDeclAST.py b/src/mem/slicc/ast/ObjDeclAST.py
index 7cea70b32..efc7ef928 100644
--- a/src/mem/slicc/ast/ObjDeclAST.py
+++ b/src/mem/slicc/ast/ObjDeclAST.py
@@ -55,6 +55,8 @@ class ObjDeclAST(DeclAST):
c_code = "m_machineID"
elif self.ident == "clusterID":
c_code = "m_clusterID"
+ elif self.ident == "recycle_latency":
+ c_code = "m_recycle_latency"
else:
c_code = "(*m_%s_ptr)" % (self.ident)
diff --git a/src/mem/slicc/ast/PeekStatementAST.py b/src/mem/slicc/ast/PeekStatementAST.py
index f5ef91daf..00d26e908 100644
--- a/src/mem/slicc/ast/PeekStatementAST.py
+++ b/src/mem/slicc/ast/PeekStatementAST.py
@@ -77,7 +77,7 @@ class PeekStatementAST(StatementAST):
if (m_is_blocking &&
(m_block_map.count(in_msg_ptr->m_$address_field) == 1) &&
(m_block_map[in_msg_ptr->m_$address_field] != &$qcode)) {
- $qcode.delayHead();
+ $qcode.delayHead(clockEdge(), cyclesToTicks(Cycles(1)));
continue;
}
''')
diff --git a/src/mem/slicc/ast/StallAndWaitStatementAST.py b/src/mem/slicc/ast/StallAndWaitStatementAST.py
index b2f622871..6ab2888b7 100644
--- a/src/mem/slicc/ast/StallAndWaitStatementAST.py
+++ b/src/mem/slicc/ast/StallAndWaitStatementAST.py
@@ -45,5 +45,5 @@ class StallAndWaitStatementAST(StatementAST):
address_code = self.address.var.code
code('''
stallBuffer(&($in_port_code), $address_code);
- $in_port_code.stallMessage($address_code);
+ $in_port_code.stallMessage($address_code, clockEdge());
''')
diff --git a/src/mem/slicc/symbols/StateMachine.py b/src/mem/slicc/symbols/StateMachine.py
index 42a81c096..015d902b4 100644
--- a/src/mem/slicc/symbols/StateMachine.py
+++ b/src/mem/slicc/symbols/StateMachine.py
@@ -580,24 +580,10 @@ $c_ident::initNetQueues()
m_net_ptr->set${network}NetQueue(m_version + base, $vid->getOrdered(), $vnet,
"$vnet_type", $vid);
''')
- # Set the end
- if network == "To":
- code('$vid->setSender(this);')
- else:
- code('$vid->setReceiver(this);')
-
# Set Priority
if "rank" in var:
code('$vid->setPriority(${{var["rank"]}})')
- else:
- if var.type_ast.type.c_ident == "MessageBuffer":
- code('$vid->setReceiver(this);')
- if var.ident.find("triggerQueue") >= 0:
- code('$vid->setSender(this);')
- elif var.ident.find("optionalQueue") >= 0:
- code('$vid->setSender(this);')
-
code.dedent()
code('''
}
@@ -637,9 +623,6 @@ $c_ident::init()
comment = "Type %s default" % vtype.ident
code('*$vid = ${{vtype["default"]}}; // $comment')
- if vtype.c_ident == "TimerTable":
- code('$vid->setClockObj(this);')
-
# Set the prefetchers
code()
for prefetcher in self.prefetchers:
@@ -1293,7 +1276,7 @@ ${ident}_Controller::doTransitionWorker(${ident}_Event event,
res = trans.resources
for key,val in res.iteritems():
val = '''
-if (!%s.areNSlotsAvailable(%s))
+if (!%s.areNSlotsAvailable(%s, clockEdge()))
return TransitionResult_ResourceStall;
''' % (key.code, val)
case_sorter.append(val)