diff options
Diffstat (limited to 'src')
36 files changed, 284 insertions, 394 deletions
diff --git a/src/cpu/testers/rubytest/CheckTable.cc b/src/cpu/testers/rubytest/CheckTable.cc index 728ad0303..4a0c1eadd 100644 --- a/src/cpu/testers/rubytest/CheckTable.cc +++ b/src/cpu/testers/rubytest/CheckTable.cc @@ -111,8 +111,7 @@ CheckTable::getRandomCheck() Check* CheckTable::getCheck(const Address& address) { - DEBUG_MSG(TESTER_COMP, MedPrio, "Looking for check by address"); - DEBUG_EXPR(TESTER_COMP, MedPrio, address); + DPRINTF(RubyTest, "Looking for check by address: %s", address); m5::hash_map<Address, Check*>::iterator i = m_lookup_map.find(address); diff --git a/src/mem/SConscript b/src/mem/SConscript index 52c530732..f0f6facd1 100644 --- a/src/mem/SConscript +++ b/src/mem/SConscript @@ -59,4 +59,18 @@ TraceFlag('BusBridge') TraceFlag('LLSC') TraceFlag('MMU') TraceFlag('MemoryAccess') -TraceFlag('Ruby') + +TraceFlag('RubyCache') +TraceFlag('RubyDma') +TraceFlag('RubyGenerated') +TraceFlag('RubyMemory') +TraceFlag('RubyNetwork') +TraceFlag('RubyQueue') +TraceFlag('RubyPort') +TraceFlag('RubySlicc') +TraceFlag('RubyStorebuffer') +TraceFlag('RubyTester') + +CompoundFlag('Ruby', [ 'RubyQueue', 'RubyNetwork', 'RubyTester', + 'RubyGenerated', 'RubySlicc', 'RubyStorebuffer', 'RubyCache', + 'RubyMemory', 'RubyDma']) diff --git a/src/mem/protocol/MESI_CMP_directory-L1cache.sm b/src/mem/protocol/MESI_CMP_directory-L1cache.sm index 57a147900..fb78623cd 100644 --- a/src/mem/protocol/MESI_CMP_directory-L1cache.sm +++ b/src/mem/protocol/MESI_CMP_directory-L1cache.sm @@ -329,8 +329,8 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") out_msg.Requestor := machineID; out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache, l2_select_low_bit, l2_select_num_bits)); - DEBUG_EXPR(address); - //DEBUG_EXPR(out_msg.Destination); + DPRINTF(RubySlicc, "address: %s, destination: %s\n", + address, out_msg.Destination); out_msg.MessageSize := MessageSizeType:Control; out_msg.Prefetch := in_msg.Prefetch; out_msg.AccessMode := in_msg.AccessMode; @@ -346,8 +346,8 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") out_msg.Requestor := machineID; out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache, l2_select_low_bit, l2_select_num_bits)); - DEBUG_EXPR(address); - //DEBUG_EXPR(out_msg.Destination); + DPRINTF(RubySlicc, "address: %s, destination: %s\n", + address, out_msg.Destination); out_msg.MessageSize := MessageSizeType:Control; out_msg.Prefetch := in_msg.Prefetch; out_msg.AccessMode := in_msg.AccessMode; @@ -362,11 +362,11 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") out_msg.Address := address; out_msg.Type := CoherenceRequestType:GETX; out_msg.Requestor := machineID; - //DEBUG_EXPR(machineID); + DPRINTF(RubySlicc, "%s\n", machineID); out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache, l2_select_low_bit, l2_select_num_bits)); - DEBUG_EXPR(address); - //DEBUG_EXPR(out_msg.Destination); + DPRINTF(RubySlicc, "address: %s, destination: %s\n", + address, out_msg.Destination); out_msg.MessageSize := MessageSizeType:Control; out_msg.Prefetch := in_msg.Prefetch; out_msg.AccessMode := in_msg.AccessMode; @@ -382,8 +382,8 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") out_msg.Requestor := machineID; out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache, l2_select_low_bit, l2_select_num_bits)); - DEBUG_EXPR(address); - //DEBUG_EXPR(out_msg.Destination); + DPRINTF(RubySlicc, "address: %s, destination: %s\n", + address, out_msg.Destination); out_msg.MessageSize := MessageSizeType:Control; out_msg.Prefetch := in_msg.Prefetch; out_msg.AccessMode := in_msg.AccessMode; @@ -522,7 +522,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache, l2_select_low_bit, l2_select_num_bits)); out_msg.MessageSize := MessageSizeType:Response_Control; - DEBUG_EXPR(address); + DPRINTF(RubySlicc, "%s\n", address); } } @@ -535,7 +535,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache, l2_select_low_bit, l2_select_num_bits)); out_msg.MessageSize := MessageSizeType:Response_Control; - DEBUG_EXPR(address); + DPRINTF(RubySlicc, "%s\n", address); } } @@ -543,12 +543,12 @@ machine(L1Cache, "MSI Directory L1 Cache CMP") action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") { - //DEBUG_EXPR(getL1CacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getL1CacheEntry(address).DataBlk); sequencer.readCallback(address, getL1CacheEntry(address).DataBlk); } action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") { - //DEBUG_EXPR(getL1CacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getL1CacheEntry(address).DataBlk); sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk); getL1CacheEntry(address).Dirty := true; } diff --git a/src/mem/protocol/MESI_CMP_directory-L2cache.sm b/src/mem/protocol/MESI_CMP_directory-L2cache.sm index 8a8f62314..98502df0f 100644 --- a/src/mem/protocol/MESI_CMP_directory-L2cache.sm +++ b/src/mem/protocol/MESI_CMP_directory-L2cache.sm @@ -188,9 +188,8 @@ machine(L2Cache, "MESI Directory L2 Cache CMP") } void addSharer(Address addr, MachineID requestor) { - //DEBUG_EXPR(machineID); - //DEBUG_EXPR(requestor); - //DEBUG_EXPR(addr); + DPRINTF(RubySlicc, "machineID: %s, requestor: %s, address: %s\n", + machineID, requestor, addr); getL2CacheEntry(addr).Sharers.add(requestor); } @@ -251,8 +250,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP") return Event:L1_PUTX_old; } } else { - DEBUG_EXPR(addr); - DEBUG_EXPR(type); + DPRINTF(RubySlicc, "address: %s, Request Type: %s\n", addr, type); error("Invalid L1 forwarded request type"); } } @@ -267,11 +265,9 @@ machine(L2Cache, "MESI Directory L2 Cache CMP") in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) { if(L1unblockNetwork_in.isReady()) { peek(L1unblockNetwork_in, ResponseMsg) { - DEBUG_EXPR(in_msg.Address); - DEBUG_EXPR(getState(in_msg.Address)); - DEBUG_EXPR(in_msg.Sender); - DEBUG_EXPR(in_msg.Type); - DEBUG_EXPR(in_msg.Destination); + DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n", + in_msg.Address, getState(in_msg.Address), in_msg.Sender, + in_msg.Type, in_msg.Destination); assert(in_msg.Destination.isElement(machineID)); if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) { @@ -329,12 +325,9 @@ machine(L2Cache, "MESI Directory L2 Cache CMP") in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) { if(L1RequestIntraChipL2Network_in.isReady()) { peek(L1RequestIntraChipL2Network_in, RequestMsg) { - DEBUG_EXPR(in_msg.Address); - //DEBUG_EXPR(id); - DEBUG_EXPR(getState(in_msg.Address)); - //DEBUG_EXPR(in_msg.Requestor); - DEBUG_EXPR(in_msg.Type); - //DEBUG_EXPR(in_msg.Destination); + DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n", + in_msg.Address, getState(in_msg.Address), in_msg.Requestor, + in_msg.Type, in_msg.Destination); assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache); assert(in_msg.Destination.isElement(machineID)); if (L2cacheMemory.isTagPresent(in_msg.Address)) { @@ -506,12 +499,11 @@ machine(L2Cache, "MESI Directory L2 Cache CMP") out_msg.Type := CoherenceResponseType:DATA; out_msg.Sender := machineID; out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); - //DEBUG_EXPR(out_msg.Destination); + DPRINTF(RubySlicc, "%s\n", out_msg.Destination); out_msg.DataBlk := getL2CacheEntry(address).DataBlk; out_msg.Dirty := getL2CacheEntry(address).Dirty; - DEBUG_EXPR(out_msg.Address); - //DEBUG_EXPR(out_msg.Destination); - //DEBUG_EXPR(out_msg.DataBlk); + DPRINTF(RubySlicc, "Address: %s, Destination: %s, DataBlock: %s\n", + out_msg.Address, out_msg.Destination, out_msg.DataBlk); out_msg.MessageSize := MessageSizeType:Response_Data; } } diff --git a/src/mem/protocol/MESI_CMP_directory-dir.sm b/src/mem/protocol/MESI_CMP_directory-dir.sm index ceba6c425..8fa8eedc5 100644 --- a/src/mem/protocol/MESI_CMP_directory-dir.sm +++ b/src/mem/protocol/MESI_CMP_directory-dir.sm @@ -167,7 +167,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol") } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) { trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address)); } else { - DEBUG_EXPR(in_msg); + DPRINTF(RubySlicc, "%s\n", in_msg); error("Invalid message"); } } @@ -183,7 +183,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol") } else if (in_msg.Type == CoherenceResponseType:ACK) { trigger(Event:CleanReplacement, in_msg.Address); } else { - DEBUG_EXPR(in_msg.Type); + DPRINTF(RubySlicc, "%s\n", in_msg.Type); error("Invalid message"); } } @@ -199,7 +199,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol") } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) { trigger(Event:Memory_Ack, in_msg.Address); } else { - DEBUG_EXPR(in_msg.Type); + DPRINTF(RubySlicc, "%s\n", in_msg.Type); error("Invalid message"); } } @@ -271,7 +271,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol") out_msg.Prefetch := in_msg.Prefetch; out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -287,7 +287,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol") out_msg.MessageSize := in_msg.MessageSize; //out_msg.Prefetch := in_msg.Prefetch; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -295,8 +295,8 @@ machine(Directory, "MESI_CMP_filter_directory protocol") action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") { peek(responseNetwork_in, ResponseMsg) { getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk; - DEBUG_EXPR(in_msg.Address); - DEBUG_EXPR(in_msg.DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + in_msg.Address, in_msg.DataBlk); } } //added by SS for dma @@ -309,7 +309,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol") out_msg.OriginalRequestorMachId := machineID; out_msg.MessageSize := in_msg.MessageSize; out_msg.DataBlk := getDirectoryEntry(address).DataBlk; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -349,7 +349,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol") out_msg.MessageSize := in_msg.MessageSize; //out_msg.Prefetch := in_msg.Prefetch; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -439,7 +439,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol") out_msg.MessageSize := in_msg.MessageSize; //out_msg.Prefetch := in_msg.Prefetch; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm index 84975ffd5..8232e93e1 100644 --- a/src/mem/protocol/MI_example-cache.sm +++ b/src/mem/protocol/MI_example-cache.sm @@ -273,7 +273,7 @@ machine(L1Cache, "MI Example L1 Cache") } action(r_load_hit, "r", desc="Notify sequencer the load completed.") { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk); sequencer.readCallback(address, GenericMachineType:L1Cache, getCacheEntry(address).DataBlk); @@ -281,7 +281,7 @@ machine(L1Cache, "MI Example L1 Cache") action(rx_load_hit, "rx", desc="External load completed.") { peek(responseNetwork_in, ResponseMsg) { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk); sequencer.readCallback(address, getNondirectHitMachType(in_msg.Sender), getCacheEntry(address).DataBlk); @@ -289,7 +289,7 @@ machine(L1Cache, "MI Example L1 Cache") } action(s_store_hit, "s", desc="Notify sequencer that store completed.") { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk); sequencer.writeCallback(address, GenericMachineType:L1Cache, getCacheEntry(address).DataBlk); @@ -297,7 +297,7 @@ machine(L1Cache, "MI Example L1 Cache") action(sx_store_hit, "sx", desc="External store completed.") { peek(responseNetwork_in, ResponseMsg) { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk); sequencer.writeCallback(address, getNondirectHitMachType(in_msg.Sender), getCacheEntry(address).DataBlk); diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm index cb274553e..dfb95aedd 100644 --- a/src/mem/protocol/MI_example-dir.sm +++ b/src/mem/protocol/MI_example-dir.sm @@ -167,7 +167,7 @@ machine(Directory, "Directory protocol") } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) { trigger(Event:Memory_Ack, in_msg.Address); } else { - DEBUG_EXPR(in_msg.Type); + DPRINTF(RubySlicc,"%s\n", in_msg.Type); error("Invalid message"); } } @@ -369,7 +369,7 @@ machine(Directory, "Directory protocol") out_msg.OriginalRequestorMachId := in_msg.Requestor; out_msg.MessageSize := in_msg.MessageSize; out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc,"%s\n", out_msg); } } } @@ -383,7 +383,7 @@ machine(Directory, "Directory protocol") //out_msg.OriginalRequestorMachId := machineID; out_msg.MessageSize := in_msg.MessageSize; out_msg.DataBlk := getDirectoryEntry(address).DataBlk; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc,"%s\n", out_msg); } } } @@ -399,7 +399,7 @@ machine(Directory, "Directory protocol") out_msg.MessageSize := in_msg.MessageSize; //out_msg.Prefetch := in_msg.Prefetch; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc,"%s\n", out_msg); } } } @@ -416,7 +416,7 @@ machine(Directory, "Directory protocol") out_msg.MessageSize := in_msg.MessageSize; //out_msg.Prefetch := in_msg.Prefetch; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc,"%s\n", out_msg); } } } @@ -434,7 +434,7 @@ machine(Directory, "Directory protocol") out_msg.MessageSize := in_msg.MessageSize; //out_msg.Prefetch := in_msg.Prefetch; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc,"%s\n", out_msg); } } } diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm index 3f9980d67..31de269a9 100644 --- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm +++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm @@ -298,8 +298,7 @@ machine(L1Cache, "Directory protocol") if (requestNetwork_in.isReady()) { peek(requestNetwork_in, RequestMsg, block_on="Address") { assert(in_msg.Destination.isElement(machineID)); - DEBUG_EXPR("MRM_DEBUG: L1 received"); - DEBUG_EXPR(in_msg.Type); + DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type); if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) { if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) { trigger(Event:Own_GETX, in_msg.Address); @@ -479,8 +478,7 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT out_msg.Acks := in_msg.Acks; out_msg.MessageSize := MessageSizeType:Response_Data; } - DEBUG_EXPR("Sending data to L2"); - DEBUG_EXPR(in_msg.Address); + DPRINTF(RubySlicc, "Sending data to L2: %s\n", in_msg.Address); } else { enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) { @@ -494,7 +492,7 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT out_msg.Acks := in_msg.Acks; out_msg.MessageSize := MessageSizeType:ResponseLocal_Data; } - DEBUG_EXPR("Sending data to L1"); + DPRINTF(RubySlicc, "Sending data to L1\n"); } } } @@ -529,7 +527,7 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT out_msg.Acks := in_msg.Acks; out_msg.MessageSize := MessageSizeType:Response_Data; } - DEBUG_EXPR("Sending exclusive data to L2"); + DPRINTF(RubySlicc, "Sending exclusive data to L2\n"); } else { enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) { @@ -543,7 +541,7 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT out_msg.Acks := in_msg.Acks; out_msg.MessageSize := MessageSizeType:ResponseLocal_Data; } - DEBUG_EXPR("Sending exclusive data to L1"); + DPRINTF(RubySlicc, "Sending exclusive data to L1\n"); } } } @@ -599,12 +597,12 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT } action(h_load_hit, "h", desc="Notify sequencer the load completed.") { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk); sequencer.readCallback(address, getCacheEntry(address).DataBlk); } action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk); sequencer.writeCallback(address, getCacheEntry(address).DataBlk); getCacheEntry(address).Dirty := true; } @@ -634,8 +632,7 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") { peek(responseToL1Cache_in, ResponseMsg) { - DEBUG_EXPR("MRM_DEBUG: L1 decrementNumberOfMessages"); - DEBUG_EXPR(in_msg.Acks); + DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks); TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks; } } diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm index 0316e2310..7c6021bae 100644 --- a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm +++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm @@ -742,8 +742,8 @@ machine(L2Cache, "Token protocol") out_msg.Dirty := false; out_msg.MessageSize := MessageSizeType:Response_Data; } - DEBUG_EXPR(address); - DEBUG_EXPR(L2_TBEs[address].DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, L2_TBEs[address].DataBlk); } action(c_sendDataFromTBEToL1GETX, "\c", desc="Send data from TBE to L1 requestors in TBE") { @@ -758,8 +758,8 @@ machine(L2Cache, "Token protocol") out_msg.Acks := L2_TBEs[address].Local_GETX_IntAcks; out_msg.MessageSize := MessageSizeType:Response_Data; } - DEBUG_EXPR(address); - DEBUG_EXPR(L2_TBEs[address].DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, L2_TBEs[address].DataBlk); } action(c_sendExclusiveDataFromTBEToL1GETS, "\cc", desc="Send data from TBE to L1 requestors in TBE") { @@ -802,8 +802,8 @@ machine(L2Cache, "Token protocol") out_msg.Acks := L2_TBEs[address].Fwd_GETX_ExtAcks; out_msg.MessageSize := MessageSizeType:Response_Data; } - DEBUG_EXPR(address); - DEBUG_EXPR(L2_TBEs[address].DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, L2_TBEs[address].DataBlk); } action(c_sendExclusiveDataFromTBEToFwdGETS, "\ccc", desc="Send data from TBE to external GETX") { @@ -818,8 +818,8 @@ machine(L2Cache, "Token protocol") out_msg.Acks := L2_TBEs[address].Fwd_GETX_ExtAcks; out_msg.MessageSize := MessageSizeType:Response_Data; } - DEBUG_EXPR(address); - DEBUG_EXPR(L2_TBEs[address].DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, L2_TBEs[address].DataBlk); } action(d_sendDataToL1GETS, "d", desc="Send data directly to L1 requestor") { @@ -836,8 +836,8 @@ machine(L2Cache, "Token protocol") out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data; } } - DEBUG_EXPR(address); - DEBUG_EXPR(getL2CacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, getL2CacheEntry(address).DataBlk); } action(d_sendDataToL1GETX, "\d", desc="Send data and a token from TBE to L1 requestor") { @@ -854,8 +854,8 @@ machine(L2Cache, "Token protocol") out_msg.Acks := L2_TBEs[address].Local_GETX_IntAcks; } } - DEBUG_EXPR(address); - DEBUG_EXPR(getL2CacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, getL2CacheEntry(address).DataBlk); } action(dd_sendDataToFwdGETX, "dd", desc="send data") { @@ -872,8 +872,8 @@ machine(L2Cache, "Token protocol") out_msg.Acks := in_msg.Acks; } } - DEBUG_EXPR(address); - DEBUG_EXPR(getL2CacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, getL2CacheEntry(address).DataBlk); } @@ -891,8 +891,8 @@ machine(L2Cache, "Token protocol") out_msg.MessageSize := MessageSizeType:Response_Data; } } - DEBUG_EXPR(address); - DEBUG_EXPR(getL2CacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, getL2CacheEntry(address).DataBlk); } action(dd_sendExclusiveDataToFwdGETS, "\d\d", desc="send data") { @@ -950,12 +950,12 @@ machine(L2Cache, "Token protocol") action(ee_sendLocalInv, "\ee", desc="Send local invalidates") { L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address); - DEBUG_EXPR(address); - DEBUG_EXPR(getLocalSharers(address)); - DEBUG_EXPR(L2_TBEs[address].NumIntPendingAcks); + DPRINTF(RubySlicc, "Address: %s, Local Sharers: %s, Pending Acks: %d\n", + address, getLocalSharers(address), + L2_TBEs[address].NumIntPendingAcks); if (isLocalOwnerValid(address)) { L2_TBEs[address].NumIntPendingAcks := L2_TBEs[address].NumIntPendingAcks + 1; - DEBUG_EXPR(getLocalOwner(address)); + DPRINTF(RubySlicc, "%s\n", getLocalOwner(address)); } enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) { diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm index 74a785808..36e96cf41 100644 --- a/src/mem/protocol/MOESI_CMP_directory-dir.sm +++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm @@ -256,7 +256,7 @@ machine(Directory, "Directory protocol") } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) { trigger(Event:Memory_Ack, in_msg.Address); } else { - DEBUG_EXPR(in_msg.Type); + DPRINTF(RubySlicc, "%s\n", in_msg.Type); error("Invalid message"); } } @@ -409,16 +409,16 @@ machine(Directory, "Directory protocol") assert(in_msg.Dirty); assert(in_msg.MessageSize == MessageSizeType:Writeback_Data); getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk; - DEBUG_EXPR(in_msg.Address); - DEBUG_EXPR(in_msg.DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + in_msg.Address, in_msg.DataBlk); } } action(p_writeFwdDataToMemory, "p", desc="Write Response data to memory") { peek(unblockNetwork_in, ResponseMsg) { getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk; - DEBUG_EXPR(in_msg.Address); - DEBUG_EXPR(in_msg.DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + in_msg.Address, in_msg.DataBlk); } } @@ -470,7 +470,7 @@ machine(Directory, "Directory protocol") if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) { out_msg.Acks := out_msg.Acks - 1; } - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -490,7 +490,7 @@ machine(Directory, "Directory protocol") // Not used: out_msg.ReadX := false; out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -508,7 +508,7 @@ machine(Directory, "Directory protocol") // Not used: out_msg.ReadX := false; out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } diff --git a/src/mem/protocol/MOESI_CMP_directory-perfectDir.sm b/src/mem/protocol/MOESI_CMP_directory-perfectDir.sm index 7717434f8..52d6ca48b 100644 --- a/src/mem/protocol/MOESI_CMP_directory-perfectDir.sm +++ b/src/mem/protocol/MOESI_CMP_directory-perfectDir.sm @@ -333,8 +333,8 @@ machine(Directory, "Directory protocol") { assert(in_msg.Dirty); assert(in_msg.MessageSize == MessageSizeType:Writeback_Data); directory[in_msg.Address].DataBlk := in_msg.DataBlk; - DEBUG_EXPR(in_msg.Address); - DEBUG_EXPR(in_msg.DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + in_msg.Address, in_msg.DataBlk); } } diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm index 7a234e56f..a810a3e02 100644 --- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm +++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm @@ -190,13 +190,14 @@ machine(L1Cache, "Token protocol") int averageLatencyCounter, default="(500 << (*m_L1Cache_averageLatencyHysteresis_ptr))"; int averageLatencyEstimate() { - DEBUG_EXPR( (averageLatencyCounter >> averageLatencyHysteresis) ); + DPRINTF(RubySlicc, "%d\n", + (averageLatencyCounter >> averageLatencyHysteresis)); //profile_average_latency_estimate( (averageLatencyCounter >> averageLatencyHysteresis) ); return averageLatencyCounter >> averageLatencyHysteresis; } void updateAverageLatencyEstimate(int latency) { - DEBUG_EXPR( latency ); + DPRINTF(RubySlicc, "%d\n", latency); assert(latency >= 0); // By subtracting the current average and then adding the most @@ -890,8 +891,8 @@ machine(L1Cache, "Token protocol") // Increment IssueCount L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1; - DEBUG_EXPR("incremented issue count"); - DEBUG_EXPR(L1_TBEs[address].IssueCount); + DPRINTF(RubySlicc, "incremented issue count to %d\n", + L1_TBEs[address].IssueCount); // Set a wakeup timer if (dynamic_timeout_enabled) { @@ -1203,8 +1204,8 @@ machine(L1Cache, "Token protocol") action(h_load_hit, "h", desc="Notify sequencer the load completed.") { - DEBUG_EXPR(address); - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, getCacheEntry(address).DataBlk); sequencer.readCallback(address, GenericMachineType:L1Cache, @@ -1213,8 +1214,8 @@ machine(L1Cache, "Token protocol") } action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") { - DEBUG_EXPR(address); - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, getCacheEntry(address).DataBlk); peek(responseNetwork_in, ResponseMsg) { sequencer.readCallback(address, @@ -1225,20 +1226,20 @@ machine(L1Cache, "Token protocol") } action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") { - DEBUG_EXPR(address); - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, getCacheEntry(address).DataBlk); sequencer.writeCallback(address, GenericMachineType:L1Cache, getCacheEntry(address).DataBlk); getCacheEntry(address).Dirty := true; - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk); } action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") { - DEBUG_EXPR(address); - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + address, getCacheEntry(address).DataBlk); peek(responseNetwork_in, ResponseMsg) { sequencer.writeCallback(address, @@ -1247,7 +1248,7 @@ machine(L1Cache, "Token protocol") } getCacheEntry(address).Dirty := true; - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk); } action(i_allocateTBE, "i", desc="Allocate TBE") { @@ -1317,11 +1318,10 @@ machine(L1Cache, "Token protocol") action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") { peek(responseNetwork_in, ResponseMsg) { assert(in_msg.Tokens != 0); - DEBUG_EXPR("MRM_DEBUG L1 received tokens"); - DEBUG_EXPR(in_msg.Address); - DEBUG_EXPR(in_msg.Tokens); + DPRINTF(RubySlicc, "L1 received tokens for address: %s, tokens: %d\n", + in_msg.Address, in_msg.Tokens); getCacheEntry(address).Tokens := getCacheEntry(address).Tokens + in_msg.Tokens; - DEBUG_EXPR(getCacheEntry(address).Tokens); + DPRINTF(RubySlicc, "%d\n", getCacheEntry(address).Tokens); if (getCacheEntry(address).Dirty == false && in_msg.Dirty) { getCacheEntry(address).Dirty := true; diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm index ae239e3ef..3541da41b 100644 --- a/src/mem/protocol/MOESI_CMP_token-L2cache.sm +++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm @@ -311,7 +311,7 @@ machine(L2Cache, "Token protocol") } else if(type == CoherenceRequestType:GETX) { return GenericRequestType:GETX; } else { - DEBUG_EXPR(type); + DPRINTF(RubySlicc, "%s\n", type); error("invalid CoherenceRequestType"); } } @@ -475,7 +475,7 @@ machine(L2Cache, "Token protocol") } else if (in_msg.Type == CoherenceResponseType:INV) { trigger(Event:L1_INV, in_msg.Address); } else { - DEBUG_EXPR(in_msg.Type); + DPRINTF(RubySlicc, "%s\n", in_msg.Type); error("Unexpected message"); } } @@ -765,8 +765,8 @@ machine(L2Cache, "Token protocol") peek(requestNetwork_in, RequestMsg) { if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) { //profile_filter_action(1); - DEBUG_EXPR("filtered message"); - DEBUG_EXPR(in_msg.RetryNum); + DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n", + in_msg.RetryNum); } else { enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) { diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm index 79a3839f7..041d9b743 100644 --- a/src/mem/protocol/MOESI_CMP_token-dir.sm +++ b/src/mem/protocol/MOESI_CMP_token-dir.sm @@ -227,7 +227,7 @@ machine(Directory, "Token protocol") } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) { trigger(Event:Memory_Ack, in_msg.Address); } else { - DEBUG_EXPR(in_msg.Type); + DPRINTF(RubySlicc, "%s\n", in_msg.Type); error("Invalid message"); } } @@ -254,7 +254,7 @@ machine(Directory, "Token protocol") } else if (in_msg.Type == CoherenceResponseType:ACK) { trigger(Event:Ack_All_Tokens, in_msg.Address); } else { - DEBUG_EXPR(in_msg.Type); + DPRINTF(RubySlicc, "%s\n", in_msg.Type); error("Invalid message"); } } else { @@ -266,7 +266,7 @@ machine(Directory, "Token protocol") } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) { trigger(Event:Ack_Owner, in_msg.Address); } else { - DEBUG_EXPR(in_msg.Type); + DPRINTF(RubySlicc, "%s\n", in_msg.Type); error("Invalid message"); } } @@ -590,7 +590,7 @@ machine(Directory, "Token protocol") out_msg.OriginalRequestorMachId := in_msg.Requestor; out_msg.MessageSize := in_msg.MessageSize; out_msg.DataBlk := getDirectoryEntry(address).DataBlk; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -603,7 +603,7 @@ machine(Directory, "Token protocol") out_msg.OriginalRequestorMachId := persistentTable.findSmallest(address); out_msg.MessageSize := MessageSizeType:Request_Control; out_msg.DataBlk := getDirectoryEntry(address).DataBlk; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } @@ -616,7 +616,7 @@ machine(Directory, "Token protocol") out_msg.OriginalRequestorMachId := in_msg.Requestor; out_msg.MessageSize := in_msg.MessageSize; out_msg.DataBlk := getDirectoryEntry(address).DataBlk; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -625,7 +625,7 @@ machine(Directory, "Token protocol") enqueue(memQueue_out, MemoryMsg, latency="1") { out_msg.Address := address; out_msg.Type := MemoryRequestType:MEMORY_WB; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } @@ -637,7 +637,7 @@ machine(Directory, "Token protocol") out_msg.DataBlk := TBEs[address].DataBlk; // then add the dma write data out_msg.DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len); - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } @@ -647,7 +647,7 @@ machine(Directory, "Token protocol") out_msg.Type := MemoryRequestType:MEMORY_WB; // first, initialize the data blk to the current version of system memory out_msg.DataBlk := TBEs[address].DataBlk; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } @@ -755,8 +755,8 @@ machine(Directory, "Token protocol") action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") { peek(responseNetwork_in, ResponseMsg) { getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk; - DEBUG_EXPR(in_msg.Address); - DEBUG_EXPR(in_msg.DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + in_msg.Address, in_msg.DataBlk); } } diff --git a/src/mem/protocol/MOESI_hammer-cache.sm b/src/mem/protocol/MOESI_hammer-cache.sm index f99194a75..02463405b 100644 --- a/src/mem/protocol/MOESI_hammer-cache.sm +++ b/src/mem/protocol/MOESI_hammer-cache.sm @@ -513,7 +513,7 @@ machine(L1Cache, "AMD Hammer-like protocol") out_msg.Sender := machineID; out_msg.Destination.add(in_msg.Requestor); out_msg.DataBlk := getCacheEntry(address).DataBlk; - DEBUG_EXPR(out_msg.DataBlk); + DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk); out_msg.Dirty := getCacheEntry(address).Dirty; if (in_msg.DirectedProbe) { out_msg.Acks := machineCount(MachineType:L1Cache); @@ -535,7 +535,7 @@ machine(L1Cache, "AMD Hammer-like protocol") out_msg.Sender := machineID; out_msg.Destination := in_msg.MergedRequestors; out_msg.DataBlk := getCacheEntry(address).DataBlk; - DEBUG_EXPR(out_msg.DataBlk); + DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk); out_msg.Dirty := getCacheEntry(address).Dirty; out_msg.Acks := machineCount(MachineType:L1Cache); out_msg.MessageSize := MessageSizeType:Response_Data; @@ -609,7 +609,7 @@ machine(L1Cache, "AMD Hammer-like protocol") } action(h_load_hit, "h", desc="Notify sequencer the load completed.") { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk); sequencer.readCallback(address, testAndClearLocalHit(address), @@ -618,7 +618,7 @@ machine(L1Cache, "AMD Hammer-like protocol") } action(hx_external_load_hit, "hx", desc="load required external msgs") { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk); peek(responseToCache_in, ResponseMsg) { sequencer.readCallback(address, @@ -632,7 +632,7 @@ machine(L1Cache, "AMD Hammer-like protocol") } action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk); peek(mandatoryQueue_in, CacheMsg) { sequencer.writeCallback(address, testAndClearLocalHit(address), @@ -646,7 +646,7 @@ machine(L1Cache, "AMD Hammer-like protocol") } action(sx_external_store_hit, "sx", desc="store required external msgs.") { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk); peek(responseToCache_in, ResponseMsg) { sequencer.writeCallback(address, @@ -661,7 +661,7 @@ machine(L1Cache, "AMD Hammer-like protocol") } action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") { - DEBUG_EXPR(getCacheEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk); sequencer.writeCallback(address, getNondirectHitMachType(address, @@ -697,9 +697,9 @@ machine(L1Cache, "AMD Hammer-like protocol") action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") { peek(responseToCache_in, ResponseMsg) { assert(in_msg.Acks > 0); - DEBUG_EXPR(TBEs[address].NumPendingMsgs); + DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs); TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks; - DEBUG_EXPR(TBEs[address].NumPendingMsgs); + DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs); TBEs[address].LastResponder := in_msg.Sender; if (TBEs[address].InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) { assert(TBEs[address].InitialRequestTime == in_msg.InitialRequestTime); @@ -763,7 +763,7 @@ machine(L1Cache, "AMD Hammer-like protocol") out_msg.Type := CoherenceResponseType:DATA; out_msg.Sender := machineID; out_msg.Destination.add(in_msg.Requestor); - DEBUG_EXPR(out_msg.Destination); + DPRINTF(RubySlicc, "%s\n", out_msg.Destination); out_msg.DataBlk := TBEs[address].DataBlk; out_msg.Dirty := TBEs[address].Dirty; if (in_msg.DirectedProbe) { @@ -785,7 +785,7 @@ machine(L1Cache, "AMD Hammer-like protocol") out_msg.Type := CoherenceResponseType:DATA; out_msg.Sender := machineID; out_msg.Destination := in_msg.MergedRequestors; - DEBUG_EXPR(out_msg.Destination); + DPRINTF(RubySlicc, "%s\n", out_msg.Destination); out_msg.DataBlk := TBEs[address].DataBlk; out_msg.Dirty := TBEs[address].Dirty; out_msg.Acks := machineCount(MachineType:L1Cache); @@ -854,8 +854,8 @@ machine(L1Cache, "AMD Hammer-like protocol") action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") { peek(responseToCache_in, ResponseMsg) { - DEBUG_EXPR(getCacheEntry(address).DataBlk); - DEBUG_EXPR(in_msg.DataBlk); + DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n", + getCacheEntry(address).DataBlk, in_msg.DataBlk); assert(getCacheEntry(address).DataBlk == in_msg.DataBlk); getCacheEntry(address).DataBlk := in_msg.DataBlk; getCacheEntry(address).Dirty := in_msg.Dirty || getCacheEntry(address).Dirty; diff --git a/src/mem/protocol/MOESI_hammer-dir.sm b/src/mem/protocol/MOESI_hammer-dir.sm index 9f7d08f9d..e6e474e95 100644 --- a/src/mem/protocol/MOESI_hammer-dir.sm +++ b/src/mem/protocol/MOESI_hammer-dir.sm @@ -311,7 +311,7 @@ machine(Directory, "AMD Hammer-like protocol") } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) { trigger(Event:Memory_Ack, in_msg.Address); } else { - DEBUG_EXPR(in_msg.Type); + DPRINTF(RubySlicc, "%d\n", in_msg.Type); error("Invalid message"); } } @@ -483,28 +483,28 @@ machine(Directory, "AMD Hammer-like protocol") action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") { peek(responseToDir_in, ResponseMsg) { assert(in_msg.Acks > 0); - DEBUG_EXPR(TBEs[address].NumPendingMsgs); + DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs); // // Note that cache data responses will have an ack count of 2. However, // directory DMA requests must wait for acks from all LLC caches, so // only decrement by 1. // TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1; - DEBUG_EXPR(TBEs[address].NumPendingMsgs); + DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs); } } action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") { peek(unblockNetwork_in, ResponseMsg) { assert(in_msg.Type == CoherenceResponseType:UNBLOCKS); - DEBUG_EXPR(TBEs[address].NumPendingMsgs); + DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs); // // Note that cache data responses will have an ack count of 2. However, // directory DMA requests must wait for acks from all LLC caches, so // only decrement by 1. // TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1; - DEBUG_EXPR(TBEs[address].NumPendingMsgs); + DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs); } } @@ -566,10 +566,10 @@ machine(Directory, "AMD Hammer-like protocol") out_msg.Sender := machineID; out_msg.Destination.add(in_msg.OriginalRequestorMachId); out_msg.DataBlk := in_msg.DataBlk; - DEBUG_EXPR(out_msg.DataBlk); + DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk); out_msg.Dirty := false; // By definition, the block is now clean out_msg.Acks := TBEs[address].Acks; - DEBUG_EXPR(out_msg.Acks); + DPRINTF(RubySlicc, "%d\n", out_msg.Acks); assert(out_msg.Acks > 0); out_msg.MessageSize := MessageSizeType:Response_Data; } @@ -656,7 +656,7 @@ machine(Directory, "AMD Hammer-like protocol") out_msg.OriginalRequestorMachId := in_msg.Requestor; out_msg.MessageSize := in_msg.MessageSize; out_msg.DataBlk := getDirectoryEntry(address).DataBlk; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -670,7 +670,7 @@ machine(Directory, "AMD Hammer-like protocol") out_msg.OriginalRequestorMachId := in_msg.Requestor; out_msg.MessageSize := in_msg.MessageSize; out_msg.DataBlk := getDirectoryEntry(address).DataBlk; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -876,8 +876,8 @@ machine(Directory, "AMD Hammer-like protocol") action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") { peek(responseToDir_in, ResponseMsg) { getDirectoryEntry(address).DataBlk := in_msg.DataBlk; - DEBUG_EXPR(in_msg.Address); - DEBUG_EXPR(in_msg.DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + in_msg.Address, in_msg.DataBlk); } } @@ -886,23 +886,23 @@ machine(Directory, "AMD Hammer-like protocol") assert(in_msg.Dirty); assert(in_msg.MessageSize == MessageSizeType:Writeback_Data); getDirectoryEntry(address).DataBlk := in_msg.DataBlk; - DEBUG_EXPR(in_msg.Address); - DEBUG_EXPR(in_msg.DataBlk); + DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", + in_msg.Address, in_msg.DataBlk); } } action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") { - DEBUG_EXPR(getDirectoryEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk); getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk; - DEBUG_EXPR(getDirectoryEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk); getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len); - DEBUG_EXPR(getDirectoryEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk); } action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") { - DEBUG_EXPR(getDirectoryEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk); getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk; - DEBUG_EXPR(getDirectoryEntry(address).DataBlk); + DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk); } action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") { @@ -922,7 +922,7 @@ machine(Directory, "AMD Hammer-like protocol") enqueue(memQueue_out, MemoryMsg, latency="1") { out_msg.Address := address; out_msg.Type := MemoryRequestType:MEMORY_WB; - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } } @@ -935,7 +935,7 @@ machine(Directory, "AMD Hammer-like protocol") out_msg.DataBlk := TBEs[address].DataBlk; // then add the dma write data out_msg.DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len); - DEBUG_EXPR(out_msg); + DPRINTF(RubySlicc, "%s\n", out_msg); } } diff --git a/src/mem/ruby/SConsopts b/src/mem/ruby/SConsopts index 95ca71fdd..7aa3e2c4e 100644 --- a/src/mem/ruby/SConsopts +++ b/src/mem/ruby/SConsopts @@ -32,9 +32,8 @@ Import('*') sticky_vars.AddVariables( BoolVariable('NO_VECTOR_BOUNDS_CHECKS', "Don't do bounds checks", True), - BoolVariable('RUBY_DEBUG', "Add debugging stuff to Ruby", False), ('GEMS_ROOT', "Add debugging stuff to Ruby", Dir('..').srcnode().abspath), ) -export_vars += [ 'NO_VECTOR_BOUNDS_CHECKS', 'RUBY_DEBUG', 'GEMS_ROOT' ] +export_vars += [ 'NO_VECTOR_BOUNDS_CHECKS', 'GEMS_ROOT' ] diff --git a/src/mem/ruby/buffers/MessageBuffer.cc b/src/mem/ruby/buffers/MessageBuffer.cc index 7d28cef22..2a86f1bab 100644 --- a/src/mem/ruby/buffers/MessageBuffer.cc +++ b/src/mem/ruby/buffers/MessageBuffer.cc @@ -99,10 +99,9 @@ MessageBuffer::areNSlotsAvailable(int n) if (current_size + n <= m_max_size) { return true; } else { - DEBUG_MSG(QUEUE_COMP, MedPrio, n); - DEBUG_MSG(QUEUE_COMP, MedPrio, current_size); - DEBUG_MSG(QUEUE_COMP, MedPrio, m_size); - DEBUG_MSG(QUEUE_COMP, MedPrio, m_max_size); + DPRINTF(RubyQueue, "n: %d, current_size: %d, m_size: %d, " + "m_max_size: %d\n", + n, current_size, m_size, m_max_size); m_not_avail_count++; return false; } @@ -119,18 +118,14 @@ MessageBuffer::getMsgPtrCopy() const const Message* MessageBuffer::peekAtHeadOfQueue() const { - DEBUG_NEWLINE(QUEUE_COMP, MedPrio); - - DEBUG_MSG(QUEUE_COMP, MedPrio, - csprintf("Peeking at head of queue %s time: %d.", - m_name, g_eventQueue_ptr->getTime())); + DPRINTF(RubyQueue, "Peeking at head of queue %s time: %lld\n", + m_name, g_eventQueue_ptr->getTime()); assert(isReady()); const Message* msg_ptr = m_prio_heap.front().m_msgptr.get(); assert(msg_ptr); - DEBUG_EXPR(QUEUE_COMP, MedPrio, *msg_ptr); - DEBUG_NEWLINE(QUEUE_COMP, MedPrio); + DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr)); return msg_ptr; } @@ -149,12 +144,8 @@ random_time() void MessageBuffer::enqueue(MsgPtr message, Time delta) { - DEBUG_NEWLINE(QUEUE_COMP, HighPrio); - DEBUG_MSG(QUEUE_COMP, HighPrio, - csprintf("enqueue %s time: %d.", m_name, - g_eventQueue_ptr->getTime())); - DEBUG_EXPR(QUEUE_COMP, MedPrio, message); - DEBUG_NEWLINE(QUEUE_COMP, HighPrio); + DPRINTF(RubyQueue, "Enqueue %s time: %lld, message: %s.\n", + m_name, g_eventQueue_ptr->getTime(), (*(message.get()))); m_msg_counter++; m_size++; @@ -229,12 +220,10 @@ MessageBuffer::enqueue(MsgPtr message, Time delta) push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MessageBufferNode>()); - DEBUG_NEWLINE(QUEUE_COMP, HighPrio); - DEBUG_MSG(QUEUE_COMP, HighPrio, - csprintf("enqueue %s with arrival_time %d cur_time: %d.", - m_name, arrival_time, g_eventQueue_ptr->getTime())); - DEBUG_EXPR(QUEUE_COMP, MedPrio, message); - DEBUG_NEWLINE(QUEUE_COMP, HighPrio); + DPRINTF(RubyQueue, "Enqueue %s with arrival_time %lld cur_time: %lld, " + "message: %s.\n", + m_name, arrival_time, g_eventQueue_ptr->getTime(), + (*(message.get()))); // Schedule the wakeup if (m_consumer_ptr != NULL) { @@ -263,11 +252,11 @@ MessageBuffer::dequeue_getDelayCycles(MsgPtr& message) void MessageBuffer::dequeue(MsgPtr& message) { - DEBUG_MSG(QUEUE_COMP, MedPrio, "dequeue from " + m_name); + DPRINTF(RubyQueue, "Dequeue from %s\n", m_name); message = m_prio_heap.front().m_msgptr; pop(); - DEBUG_EXPR(QUEUE_COMP, MedPrio, message); + DPRINTF(RubyQueue, "Enqueue message is %s\n", (*(message.get()))); } int @@ -290,7 +279,7 @@ MessageBuffer::dequeue_getDelayCycles() void MessageBuffer::pop() { - DEBUG_MSG(QUEUE_COMP, MedPrio, "pop from " + m_name); + DPRINTF(RubyQueue, "Pop from %s\n", m_name); assert(isReady()); pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MessageBufferNode>()); @@ -321,7 +310,7 @@ MessageBuffer::clear() void MessageBuffer::recycle() { - DEBUG_MSG(QUEUE_COMP, MedPrio, "recycling " + m_name); + DPRINTF(RubyQueue, "Recycling %s\n", m_name); assert(isReady()); MessageBufferNode node = m_prio_heap.front(); pop_heap(m_prio_heap.begin(), m_prio_heap.end(), @@ -337,7 +326,7 @@ MessageBuffer::recycle() void MessageBuffer::reanalyzeMessages(const Address& addr) { - DEBUG_MSG(QUEUE_COMP, MedPrio, "reanalyzeMessages " + m_name); + DPRINTF(RubyQueue, "ReanalyzeMessages %s\n", m_name); assert(m_stall_msg_map.count(addr) > 0); // @@ -362,7 +351,7 @@ MessageBuffer::reanalyzeMessages(const Address& addr) void MessageBuffer::stallMessage(const Address& addr) { - DEBUG_MSG(QUEUE_COMP, MedPrio, "stalling " + m_name); + DPRINTF(RubyQueue, "Stalling %s\n", m_name); assert(isReady()); assert(addr.getOffset() == 0); MsgPtr message = m_prio_heap.front().m_msgptr; diff --git a/src/mem/ruby/common/Debug.cc b/src/mem/ruby/common/Debug.cc index eb6cc5c47..6995ef637 100644 --- a/src/mem/ruby/common/Debug.cc +++ b/src/mem/ruby/common/Debug.cc @@ -208,13 +208,6 @@ Debug::checkFilterString(const char *filter_str) return false; // no error } - if (RUBY_DEBUG == false) { - cerr << "Error: User specified set of debug components, but the " - << "RUBY_DEBUG compile-time flag is false." << endl - << "Solution: Re-compile with RUBY_DEBUG set to true." << endl; - return true; // error - } - if (string(filter_str) == "all") { return false; // no error } diff --git a/src/mem/ruby/common/Debug.hh b/src/mem/ruby/common/Debug.hh index f8c18a0b5..7005d95f7 100644 --- a/src/mem/ruby/common/Debug.hh +++ b/src/mem/ruby/common/Debug.hh @@ -36,7 +36,6 @@ #include <string> #include <vector> -#include "config/ruby_debug.hh" #include "mem/ruby/common/Global.hh" #include "sim/sim_object.hh" @@ -228,62 +227,6 @@ const bool ASSERT_FLAG = true; } \ } while (0) -#define DEBUG_MSG(module, priority, MESSAGE) do { \ - using namespace std; \ - if (RUBY_DEBUG) { \ - if (g_debug_ptr->validDebug(module, priority)) { \ - (* debug_cout_ptr) << "Debug: in fn " \ - << __PRETTY_FUNCTION__ \ - << " in " << __FILE__ << ":" \ - << __LINE__ << ": " \ - << (MESSAGE) << endl << flush; \ - } \ - } \ -} while (0) - -#define DEBUG_EXPR(module, priority, EXPR) do { \ - using namespace std; \ - if (RUBY_DEBUG) { \ - if (g_debug_ptr->validDebug(module, priority)) { \ - (* debug_cout_ptr) << "Debug: in fn " \ - << __PRETTY_FUNCTION__ \ - << " in " << __FILE__ << ":" \ - << __LINE__ << ": " \ - << #EXPR << " is " \ - << (EXPR) << endl << flush; \ - } \ - } \ -} while (0) - -#define DEBUG_NEWLINE(module, priority) do { \ - using namespace std; \ - if (RUBY_DEBUG) { \ - if (g_debug_ptr->validDebug(module, priority)) { \ - (* debug_cout_ptr) << endl << flush; \ - } \ - } \ -} while (0) - -#define DEBUG_SLICC(priority, LINE, MESSAGE) do { \ - using namespace std; \ - if (RUBY_DEBUG) { \ - if (g_debug_ptr->validDebug(SLICC_COMP, priority)) { \ - (* debug_cout_ptr) << (LINE) << (MESSAGE) << endl << flush; \ - } \ - } \ -} while (0) - -#define DEBUG_OUT(rest... ) do { \ - using namespace std; \ - if (RUBY_DEBUG) { \ - cout << "Debug: in fn " \ - << __PRETTY_FUNCTION__ \ - << " in " << __FILE__ << ":" \ - << __LINE__ << ": "; \ - g_debug_ptr->debugMsg(rest); \ - } \ -} while (0) - #define ERROR_OUT( rest... ) do { \ using namespace std; \ if (ERROR_MESSAGE_FLAG) { \ diff --git a/src/mem/ruby/common/NetDest.hh b/src/mem/ruby/common/NetDest.hh index 3fe87f69b..dc4a54965 100644 --- a/src/mem/ruby/common/NetDest.hh +++ b/src/mem/ruby/common/NetDest.hh @@ -55,7 +55,7 @@ class NetDest ~NetDest() { - DEBUG_MSG(MEMORY_COMP, LowPrio, "NetDest Destructor"); + DPRINTF(RubyMemory, "NetDest Destructor\n"); } void add(MachineID newElement); diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc index a33776c17..16792ef2b 100644 --- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc +++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc @@ -205,9 +205,8 @@ NetworkInterface_d::calculateVC(int vnet) void NetworkInterface_d::wakeup() { - DEBUG_EXPR(NETWORK_COMP, MedPrio, m_id); - DEBUG_MSG(NETWORK_COMP, MedPrio, "NI WOKE UP"); - DEBUG_EXPR(NETWORK_COMP, MedPrio, g_eventQueue_ptr->getTime()); + DPRINTF(RubyNetwork, "m_id: %d woke up at time: %lld", + m_id, g_eventQueue_ptr->getTime()); MsgPtr msg_ptr; diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc index de57944f1..50aa16cea 100644 --- a/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc +++ b/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc @@ -62,8 +62,8 @@ Switch_d::init() void Switch_d::wakeup() { - DEBUG_MSG(NETWORK_COMP, HighPrio, "Switch woke up"); - DEBUG_EXPR(NETWORK_COMP, HighPrio, g_eventQueue_ptr->getTime()); + DPRINTF(RubyNetwork, "Switch woke up at time: %lld\n", + g_eventQueue_ptr->getTime()); for (int inport = 0; inport < m_num_inports; inport++) { if (!m_switch_buffer[inport]->isReady()) diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc index 86f9483c6..23efaa618 100644 --- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc +++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc @@ -258,9 +258,8 @@ NetworkInterface::wakeup() if (inNetLink->isReady()) { flit *t_flit = inNetLink->consumeLink(); if (t_flit->get_type() == TAIL_ || t_flit->get_type() == HEAD_TAIL_) { - DEBUG_EXPR(NETWORK_COMP, HighPrio, m_id); - DEBUG_MSG(NETWORK_COMP, HighPrio, "Message got delivered"); - DEBUG_EXPR(NETWORK_COMP, HighPrio, g_eventQueue_ptr->getTime()); + DPRINTF(RubyNetwork, "m_id: %d, Message delivered at time: %lld\n", + m_id, g_eventQueue_ptr->getTime()); // When we are doing network only testing, the messages do not // have to be buffered into the message buffers of the protocol diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc b/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc index ca75edb58..ce90c9748 100644 --- a/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc +++ b/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc @@ -309,8 +309,8 @@ Router::wakeup() // checking the incoming link if (m_in_link[incoming_port]->isReady()) { - DEBUG_EXPR(NETWORK_COMP, HighPrio, m_id); - DEBUG_EXPR(NETWORK_COMP, HighPrio, g_eventQueue_ptr->getTime()); + DPRINTF(RubyNetwork, "m_id: %d, Time: %lld\n", + m_id, g_eventQueue_ptr->getTime()); t_flit = m_in_link[incoming_port]->peekLink(); routeCompute(t_flit, incoming_port); m_in_link[incoming_port]->consumeLink(); diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc index 5a1ee32ec..7229c724f 100644 --- a/src/mem/ruby/network/simple/PerfectSwitch.cc +++ b/src/mem/ruby/network/simple/PerfectSwitch.cc @@ -123,7 +123,7 @@ PerfectSwitch::~PerfectSwitch() void PerfectSwitch::wakeup() { - DEBUG_EXPR(NETWORK_COMP, MedPrio, m_switch_id); + DPRINTF(RubyNetwork, "m_switch_id: %d\n",m_switch_id); MsgPtr msg_ptr; @@ -168,12 +168,12 @@ PerfectSwitch::wakeup() // Is there a message waiting? while (m_in[incoming][vnet]->isReady()) { - DEBUG_EXPR(NETWORK_COMP, MedPrio, incoming); + DPRINTF(RubyNetwork, "incoming: %d\n", incoming); // Peek at message msg_ptr = m_in[incoming][vnet]->peekMsgPtr(); net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get()); - DEBUG_EXPR(NETWORK_COMP, MedPrio, *net_msg_ptr); + DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr)); output_links.clear(); output_link_destinations.clear(); @@ -216,7 +216,7 @@ PerfectSwitch::wakeup() // pick the next link to look at int link = m_link_order[i].m_link; NetDest dst = m_routing_table[link]; - DEBUG_EXPR(NETWORK_COMP, MedPrio, dst); + DPRINTF(RubyNetwork, "dst: %s\n", dst); if (!msg_dsts.intersectionIsNotEmpty(dst)) continue; @@ -246,19 +246,17 @@ PerfectSwitch::wakeup() int outgoing = output_links[i]; if (!m_out[outgoing][vnet]->areNSlotsAvailable(1)) enough = false; - DEBUG_MSG(NETWORK_COMP, HighPrio, - "checking if node is blocked"); - DEBUG_EXPR(NETWORK_COMP, HighPrio, outgoing); - DEBUG_EXPR(NETWORK_COMP, HighPrio, vnet); - DEBUG_EXPR(NETWORK_COMP, HighPrio, enough); + DPRINTF(RubyNetwork, "Checking if node is blocked\n" + "outgoing: %d, vnet: %d, enough: %d\n", + outgoing, vnet, enough); } // There were not enough resources if (!enough) { g_eventQueue_ptr->scheduleEvent(this, 1); - DEBUG_MSG(NETWORK_COMP, HighPrio, - "Can't deliver message since a node is blocked"); - DEBUG_EXPR(NETWORK_COMP, HighPrio, *net_msg_ptr); + DPRINTF(RubyNetwork, "Can't deliver message since a node " + "is blocked\n" + "Message: %s\n", (*net_msg_ptr)); break; // go to next incoming port } @@ -295,13 +293,10 @@ PerfectSwitch::wakeup() output_link_destinations[i]; // Enqeue msg - DEBUG_NEWLINE(NETWORK_COMP,HighPrio); - DEBUG_MSG(NETWORK_COMP, HighPrio, - csprintf("switch: %d enqueuing net msg from " - "inport[%d][%d] to outport [%d][%d] time: %d.", + DPRINTF(RubyNetwork, "Switch: %d enqueuing net msg from " + "inport[%d][%d] to outport [%d][%d] time: %lld.\n", m_switch_id, incoming, vnet, outgoing, vnet, - g_eventQueue_ptr->getTime())); - DEBUG_NEWLINE(NETWORK_COMP,HighPrio); + g_eventQueue_ptr->getTime()); m_out[outgoing][vnet]->enqueue(msg_ptr); } diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc index a77d40dee..096a8f466 100644 --- a/src/mem/ruby/network/simple/Throttle.cc +++ b/src/mem/ruby/network/simple/Throttle.cc @@ -161,12 +161,10 @@ Throttle::wakeup() m_units_remaining[vnet] += network_message_to_size(net_msg_ptr); - DEBUG_NEWLINE(NETWORK_COMP,HighPrio); - DEBUG_MSG(NETWORK_COMP, HighPrio, - csprintf("throttle: %d my bw %d bw spent enqueueing " - "net msg %d time: %d.", + DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent " + "enqueueing net msg %d time: %lld.\n", m_node, getLinkBandwidth(), m_units_remaining[vnet], - g_eventQueue_ptr->getTime())); + g_eventQueue_ptr->getTime()); // Move the message m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency); @@ -175,8 +173,7 @@ Throttle::wakeup() // Count the message m_message_counters[net_msg_ptr->getMessageSize()][vnet]++; - DEBUG_MSG(NETWORK_COMP,LowPrio,*m_out[vnet]); - DEBUG_NEWLINE(NETWORK_COMP,HighPrio); + DPRINTF(RubyNetwork, "%s\n", *m_out[vnet]); } // Calculate the amount of bandwidth we spent on this message @@ -188,7 +185,7 @@ Throttle::wakeup() if (bw_remaining > 0 && (m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) && !m_out[vnet]->areNSlotsAvailable(1)) { - DEBUG_MSG(NETWORK_COMP,LowPrio,vnet); + DPRINTF(RubyNetwork, "vnet: %d", vnet); // schedule me to wakeup again because I'm waiting for my // output queue to become available schedule_wakeup = true; @@ -209,11 +206,9 @@ Throttle::wakeup() // We have extra bandwidth and our output buffer was // available, so we must not have anything else to do until // another message arrives. - DEBUG_MSG(NETWORK_COMP, LowPrio, *this); - DEBUG_MSG(NETWORK_COMP, LowPrio, "not scheduled again"); + DPRINTF(RubyNetwork, "%s not scheduled again\n", *this); } else { - DEBUG_MSG(NETWORK_COMP, LowPrio, *this); - DEBUG_MSG(NETWORK_COMP, LowPrio, "scheduled again"); + DPRINTF(RubyNetwork, "%s scheduled again\n", *this); // We are out of bandwidth for this cycle, so wakeup next // cycle and continue diff --git a/src/mem/ruby/network/simple/Topology.cc b/src/mem/ruby/network/simple/Topology.cc index bd167bd40..5e6bf9939 100644 --- a/src/mem/ruby/network/simple/Topology.cc +++ b/src/mem/ruby/network/simple/Topology.cc @@ -405,13 +405,11 @@ shortest_path_to_node(SwitchID src, SwitchID next, const Matrix& weights, } } - DEBUG_MSG(NETWORK_COMP, MedPrio, "returning shortest path"); - DEBUG_EXPR(NETWORK_COMP, MedPrio, (src-(2*max_machines))); - DEBUG_EXPR(NETWORK_COMP, MedPrio, (next-(2*max_machines))); - DEBUG_EXPR(NETWORK_COMP, MedPrio, src); - DEBUG_EXPR(NETWORK_COMP, MedPrio, next); - DEBUG_EXPR(NETWORK_COMP, MedPrio, result); - DEBUG_NEWLINE(NETWORK_COMP, MedPrio); + DPRINTF(RubyNetwork, "Returning shortest path\n" + "(src-(2*max_machines)): %d, (next-(2*max_machines)): %d, " + "src: %d, next: %d, result: %s\n", + (src-(2*max_machines)), (next-(2*max_machines)), + src, next, result); return result; } diff --git a/src/mem/ruby/storebuffer/storebuffer.cc b/src/mem/ruby/storebuffer/storebuffer.cc index d6ec0959e..1549e33ee 100644 --- a/src/mem/ruby/storebuffer/storebuffer.cc +++ b/src/mem/ruby/storebuffer/storebuffer.cc @@ -43,7 +43,7 @@ hit(int64_t id) { if (request_map.find(id) == request_map.end()) { ERROR_OUT("Request ID not found in the map"); - DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id); + DPRINTF(RubyStorebuffer, "id: %lld\n", id); ASSERT(0); } else { request_map[id]->complete(id); @@ -73,11 +73,6 @@ StoreBuffer::StoreBuffer(uint32 id, uint32 block_bits, int storebuffer_size) if (m_storebuffer_size > 0){ m_use_storebuffer = true; } - -#ifdef DEBUG_WRITE_BUFFER - DEBUG_OUT("*******storebuffer_t::Using Write Buffer? %d\n", - m_use_storebuffer); -#endif } StoreBuffer::~StoreBuffer() @@ -100,7 +95,7 @@ StoreBuffer::addToStoreBuffer(RubyRequest request) uint64_t id = libruby_issue_request(m_port, request); if (request_map.find(id) != request_map.end()) { ERROR_OUT("Request ID is already in the map"); - DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id); + DPRINTF(RubyStorebuffer, "id: %lld\n", id); ASSERT(0); } else { request_map.insert(make_pair(id, this)); @@ -110,12 +105,6 @@ StoreBuffer::addToStoreBuffer(RubyRequest request) } -#ifdef DEBUG_WRITE_BUFFER - DEBUG_OUT("\n***StoreBuffer: addToStoreBuffer BEGIN, contents:\n"); - DEBUG_OUT("\n"); - DEBUG_OUT("\t INSERTING new request\n"); -#endif - buffer.push_front(SBEntry(request, NULL)); m_buffer_size++; @@ -128,11 +117,6 @@ StoreBuffer::addToStoreBuffer(RubyRequest request) } iseq++; - -#ifdef DEBUG_WRITE_BUFFER - DEBUG_OUT("***StoreBuffer: addToStoreBuffer END, contents:\n"); - DEBUG_OUT("\n"); -#endif } @@ -161,7 +145,7 @@ StoreBuffer::handleLoad(RubyRequest request) uint64_t id = libruby_issue_request(m_port, request); if (request_map.find(id) != request_map.end()) { ERROR_OUT("Request ID is already in the map"); - DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id); + DPRINTF(RubyStorebuffer, "id: %lld\n", id); ASSERT(0); } else { request_map.insert(make_pair(id, this)); @@ -285,11 +269,6 @@ StoreBuffer::flushStoreBuffer() return; } -#ifdef DEBUG_WRITE_BUFFER - DEBUG_OUT("\n***StoreBuffer: flushStoreBuffer BEGIN, contents:\n"); - DEBUG_OUT("\n"); -#endif - m_storebuffer_flushing = (m_buffer_size > 0); } @@ -318,10 +297,6 @@ StoreBuffer::complete(uint64_t id) physical_address_t physical_address = outstanding_requests.find(id)->second.paddr; RubyRequestType type = outstanding_requests.find(id)->second.type; -#ifdef DEBUG_WRITE_BUFFER - DEBUG_OUT("\n***StoreBuffer: complete BEGIN, contents:\n"); - DEBUG_OUT("\n"); -#endif if (type == RubyRequestType_ST) { physical_address_t lineaddr = physical_address & m_block_mask; @@ -357,10 +332,6 @@ StoreBuffer::complete(uint64_t id) ASSERT(0); } -#ifdef DEBUG_WRITE_BUFFER - DEBUG_OUT("***StoreBuffer: complete END, contents:\n"); - DEBUG_OUT("\n"); -#endif } else if (type == RubyRequestType_LD) { m_hit_callback(id); } @@ -372,13 +343,10 @@ StoreBuffer::complete(uint64_t id) void StoreBuffer::print() { - DEBUG_OUT("[%d] StoreBuffer: Total entries: %d Outstanding: %d\n", - m_id, m_buffer_size); + DPRINTF(RubyStorebuffer, "[%d] StoreBuffer: Total entries: %d " + "Outstanding: %d\n", + m_id, m_storebuffer_size, m_buffer_size); if (!m_use_storebuffer) - DEBUG_OUT("\t WRITE BUFFER NOT USED\n"); + DPRINTF(RubyStorebuffer, "\t WRITE BUFFER NOT USED\n"); } - - - - diff --git a/src/mem/ruby/system/CacheMemory.cc b/src/mem/ruby/system/CacheMemory.cc index 59f97e5fe..87baebd0c 100644 --- a/src/mem/ruby/system/CacheMemory.cc +++ b/src/mem/ruby/system/CacheMemory.cc @@ -166,7 +166,7 @@ CacheMemory::tryCacheAccess(const Address& address, CacheRequestType type, DataBlock*& data_ptr) { assert(address == line_address(address)); - DEBUG_EXPR(CACHE_COMP, HighPrio, address); + DPRINTF(RubyCache, "address: %s\n", address); Index cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if (loc != -1) { @@ -194,7 +194,7 @@ CacheMemory::testCacheAccess(const Address& address, CacheRequestType type, DataBlock*& data_ptr) { assert(address == line_address(address)); - DEBUG_EXPR(CACHE_COMP, HighPrio, address); + DPRINTF(RubyCache, "address: %s\n", address); Index cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); @@ -223,12 +223,10 @@ CacheMemory::isTagPresent(const Address& address) const if (loc == -1) { // We didn't find the tag - DEBUG_EXPR(CACHE_COMP, LowPrio, address); - DEBUG_MSG(CACHE_COMP, LowPrio, "No tag match"); + DPRINTF(RubyCache, "No tag match for address: %s\n", address); return false; } - DEBUG_EXPR(CACHE_COMP, LowPrio, address); - DEBUG_MSG(CACHE_COMP, LowPrio, "found"); + DPRINTF(RubyCache, "address: %s found\n", address); return true; } @@ -263,7 +261,7 @@ CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry) assert(address == line_address(address)); assert(!isTagPresent(address)); assert(cacheAvail(address)); - DEBUG_EXPR(CACHE_COMP, HighPrio, address); + DPRINTF(RubyCache, "address: %s\n", address); // Find the first open slot Index cacheSet = addressToCacheSet(address); @@ -292,7 +290,7 @@ CacheMemory::deallocate(const Address& address) { assert(address == line_address(address)); assert(isTagPresent(address)); - DEBUG_EXPR(CACHE_COMP, HighPrio, address); + DPRINTF(RubyCache, "address: %s\n", address); Index cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if (loc != -1) { diff --git a/src/mem/ruby/system/DirectoryMemory.cc b/src/mem/ruby/system/DirectoryMemory.cc index fbb48d7f5..4a72dce33 100644 --- a/src/mem/ruby/system/DirectoryMemory.cc +++ b/src/mem/ruby/system/DirectoryMemory.cc @@ -157,7 +157,7 @@ DirectoryMemory::lookup(PhysAddress address) assert(isPresent(address)); Directory_Entry* entry; uint64 idx; - DEBUG_EXPR(CACHE_COMP, HighPrio, address); + DPRINTF(RubyCache, "address: %s\n", address); if (m_use_map) { if (m_sparseMemory->exist(address)) { diff --git a/src/mem/ruby/system/SConscript b/src/mem/ruby/system/SConscript index 6d1aff31d..edc9d451f 100644 --- a/src/mem/ruby/system/SConscript +++ b/src/mem/ruby/system/SConscript @@ -50,6 +50,3 @@ Source('RubyPort.cc') Source('Sequencer.cc', Werror=False) Source('System.cc') Source('TimerTable.cc') - -TraceFlag('RubyCache') -TraceFlag('RubyDma') diff --git a/src/mem/ruby/system/SparseMemory.cc b/src/mem/ruby/system/SparseMemory.cc index c4f636322..376852826 100644 --- a/src/mem/ruby/system/SparseMemory.cc +++ b/src/mem/ruby/system/SparseMemory.cc @@ -112,7 +112,7 @@ SparseMemory::exist(const Address& address) const int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); int lowBit; assert(address == line_address(address)); - DEBUG_EXPR(CACHE_COMP, HighPrio, address); + DPRINTF(RubyCache, "address: %s\n", address); for (int level = 0; level < m_number_of_levels; level++) { // Create the appropriate sub address for this level @@ -122,10 +122,9 @@ SparseMemory::exist(const Address& address) const lowBit = highBit - m_number_of_bits_per_level[level]; curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); - DEBUG_EXPR(CACHE_COMP, HighPrio, level); - DEBUG_EXPR(CACHE_COMP, HighPrio, lowBit); - DEBUG_EXPR(CACHE_COMP, HighPrio, highBit - 1); - DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress); + DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, " + "curAddress: %s\n", + level, lowBit, highBit - 1, curAddress); // Adjust the highBit value for the next level highBit -= m_number_of_bits_per_level[level]; @@ -135,12 +134,12 @@ SparseMemory::exist(const Address& address) const if (curTable->count(curAddress) != 0) { curTable = (SparseMapType*)(((*curTable)[curAddress]).entry); } else { - DEBUG_MSG(CACHE_COMP, HighPrio, "Not found"); + DPRINTF(RubyCache, "Not found\n"); return false; } } - DEBUG_MSG(CACHE_COMP, HighPrio, "Entry found"); + DPRINTF(RubyCache, "Entry found\n"); return true; } @@ -224,11 +223,10 @@ SparseMemory::recursivelyRemoveLevels(const Address& address, curAddress.setAddress(address.bitSelect(curInfo.lowBit, curInfo.highBit - 1)); - DEBUG_EXPR(CACHE_COMP, HighPrio, address); - DEBUG_EXPR(CACHE_COMP, HighPrio, curInfo.level); - DEBUG_EXPR(CACHE_COMP, HighPrio, curInfo.lowBit); - DEBUG_EXPR(CACHE_COMP, HighPrio, curInfo.highBit - 1); - DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress); + DPRINTF(RubyCache, "address: %s, curInfo.level: %d, curInfo.lowBit: %d, " + "curInfo.highBit - 1: %d, curAddress: %s\n", + address, curInfo.level, curInfo.lowBit, + curInfo.highBit - 1, curAddress); assert(curInfo.curTable->count(curAddress) != 0); @@ -307,7 +305,7 @@ SparseMemory::lookup(const Address& address) assert(exist(address)); assert(address == line_address(address)); - DEBUG_EXPR(CACHE_COMP, HighPrio, address); + DPRINTF(RubyCache, "address: %s\n", address); Address curAddress; SparseMapType* curTable = m_map_head; @@ -327,10 +325,9 @@ SparseMemory::lookup(const Address& address) lowBit = highBit - m_number_of_bits_per_level[level]; curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); - DEBUG_EXPR(CACHE_COMP, HighPrio, level); - DEBUG_EXPR(CACHE_COMP, HighPrio, lowBit); - DEBUG_EXPR(CACHE_COMP, HighPrio, highBit - 1); - DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress); + DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, " + "curAddress: %s\n", + level, lowBit, highBit - 1, curAddress); // Adjust the highBit value for the next level highBit -= m_number_of_bits_per_level[level]; diff --git a/src/mem/ruby/tester/RaceyPseudoThread.cc b/src/mem/ruby/tester/RaceyPseudoThread.cc index 79f6d1550..eaae1112f 100644 --- a/src/mem/ruby/tester/RaceyPseudoThread.cc +++ b/src/mem/ruby/tester/RaceyPseudoThread.cc @@ -60,9 +60,7 @@ void RaceyPseudoThread::checkForDeadlock() { void RaceyPseudoThread::performCallback(int proc, Address address, uint8_t * data ) { assert(proc == m_proc_id); - DEBUG_EXPR(TESTER_COMP, LowPrio, proc); - DEBUG_EXPR(TESTER_COMP, LowPrio, address); - + DPRINTF(RubyTester, "proc: %d, address: %s\n", proc, address); m_last_progress = m_driver.eventQueue->getTime(); diff --git a/src/mem/slicc/ast/FuncCallExprAST.py b/src/mem/slicc/ast/FuncCallExprAST.py index abf7eec7b..830b10c21 100644 --- a/src/mem/slicc/ast/FuncCallExprAST.py +++ b/src/mem/slicc/ast/FuncCallExprAST.py @@ -40,11 +40,33 @@ class FuncCallExprAST(ExprAST): def generate(self, code): machine = self.state_machine - # DEBUG_EXPR is strange since it takes parameters of multiple types - if self.proc_name == "DEBUG_EXPR": - # FIXME - check for number of parameters - code('DEBUG_SLICC(MedPrio, "$0: ", $1)', - self.exprs[0].location, self.exprs[0].inline()) + if self.proc_name == "DPRINTF": + # Code for inserting the location of the DPRINTF() + # statement in the .sm file in the statement it self. + # 'self.exprs[0].location' represents the location. + # 'format' represents the second argument of the + # original DPRINTF() call. It is left unmodified. + # str_list is used for concatenating the argument + # list following the format specifier. A DPRINTF() + # call may or may not contain any arguments following + # the format specifier. These two cases need to be + # handled differently. Hence the check whether or not + # the str_list is empty. + + format = "%s" % (self.exprs[1].inline()) + format_length = len(format) + str_list = [] + + for i in range(2, len(self.exprs)): + str_list.append("%s" % self.exprs[i].inline()) + + if len(str_list) == 0: + code('DPRINTF(RubySlicc, "$0: $1")', + self.exprs[0].location, format[2:format_length-2]) + else: + code('DPRINTF(RubySlicc, "$0: $1", $2)', + self.exprs[0].location, format[2:format_length-2], + ', '.join(str_list)) return self.symtab.find("void", Type) diff --git a/src/mem/slicc/symbols/StateMachine.py b/src/mem/slicc/symbols/StateMachine.py index c1926fbab..a7ac556e1 100644 --- a/src/mem/slicc/symbols/StateMachine.py +++ b/src/mem/slicc/symbols/StateMachine.py @@ -743,7 +743,7 @@ void $c_ident::clearStats() { void $c_ident::${{action.ident}}(const Address& addr) { - DEBUG_MSG(GENERATED_COMP, HighPrio, "executing"); + DPRINTF(RubyGenerated, "executing\\n"); ${{action["c_code"]}} } @@ -814,7 +814,6 @@ ${ident}_Controller::wakeup() break; // If we got this far, we have nothing left todo } // g_eventQueue_ptr->scheduleEvent(this, 1); - // DEBUG_NEWLINE(GENERATED_COMP, MedPrio); } ''') @@ -849,19 +848,19 @@ ${ident}_Controller::doTransition(${ident}_Event event, { ${ident}_State next_state = state; - DEBUG_NEWLINE(GENERATED_COMP, MedPrio); - DEBUG_MSG(GENERATED_COMP, MedPrio, *this); - DEBUG_EXPR(GENERATED_COMP, MedPrio, g_eventQueue_ptr->getTime()); - DEBUG_EXPR(GENERATED_COMP, MedPrio,state); - DEBUG_EXPR(GENERATED_COMP, MedPrio,event); - DEBUG_EXPR(GENERATED_COMP, MedPrio,addr); + DPRINTF(RubyGenerated, "%s, Time: %lld, state: %s, event: %s, addr: %s\\n", + *this, + g_eventQueue_ptr->getTime(), + ${ident}_State_to_string(state), + ${ident}_Event_to_string(event), + addr); TransitionResult result = doTransitionWorker(event, state, next_state, addr); if (result == TransitionResult_Valid) { - DEBUG_EXPR(GENERATED_COMP, MedPrio, next_state); - DEBUG_NEWLINE(GENERATED_COMP, MedPrio); + DPRINTF(RubyGenerated, "next_state: %s\\n", + ${ident}_State_to_string(next_state)); m_profiler.countTransition(state, event); if (Debug::getProtocolTrace()) { g_system_ptr->getProfiler()->profileTransition("${ident}", @@ -884,8 +883,7 @@ ${ident}_Controller::doTransition(${ident}_Event event, "Resource Stall"); } } else if (result == TransitionResult_ProtocolStall) { - DEBUG_MSG(GENERATED_COMP, HighPrio, "stalling"); - DEBUG_NEWLINE(GENERATED_COMP, MedPrio); + DPRINTF(RubyGenerated, "stalling\\n"); if (Debug::getProtocolTrace()) { g_system_ptr->getProfiler()->profileTransition("${ident}", m_version, addr, |