From 5592798865ece858bab2b444bc782d19121e2566 Mon Sep 17 00:00:00 2001 From: Steve Reinhardt Date: Sat, 6 Feb 2016 17:21:19 -0800 Subject: style: fix missing spaces in control statements Result of running 'hg m5style --skip-all --fix-control -a'. --- src/mem/bridge.cc | 2 +- src/mem/cache/prefetch/stride.cc | 2 +- src/mem/dram_ctrl.cc | 12 ++++++------ src/mem/physical.cc | 2 +- src/mem/port.cc | 4 ++-- src/mem/ruby/filters/BulkBloomFilter.cc | 2 +- src/mem/ruby/filters/H3BloomFilter.cc | 4 ++-- src/mem/ruby/filters/MultiBitSelBloomFilter.cc | 2 +- src/mem/ruby/filters/MultiGrainBloomFilter.cc | 4 ++-- src/mem/ruby/filters/NonCountingBloomFilter.cc | 2 +- src/mem/ruby/network/MessageBuffer.cc | 2 +- .../network/garnet/fixed-pipeline/GarnetNetwork_d.cc | 4 ++-- .../garnet/fixed-pipeline/NetworkInterface_d.cc | 2 +- src/mem/ruby/network/simple/PerfectSwitch.cc | 4 ++-- src/mem/ruby/profiler/AccessTraceForAddress.cc | 4 ++-- src/mem/ruby/slicc_interface/AbstractController.cc | 2 +- src/mem/ruby/structures/AbstractReplacementPolicy.cc | 4 ++-- src/mem/ruby/structures/BankedArray.cc | 2 +- src/mem/ruby/structures/CacheMemory.cc | 10 +++++----- src/mem/ruby/structures/PseudoLRUPolicy.cc | 2 +- src/mem/ruby/structures/RubyMemoryControl.cc | 2 +- src/mem/ruby/structures/TBETable.hh | 2 +- src/mem/ruby/system/GPUCoalescer.cc | 20 ++++++++++---------- src/mem/ruby/system/Sequencer.cc | 2 +- src/mem/ruby/system/VIPERCoalescer.cc | 6 +++--- src/mem/ruby/system/WeightedLRUPolicy.cc | 4 ++-- src/mem/serial_link.cc | 2 +- src/mem/stack_dist_calc.cc | 4 ++-- 28 files changed, 57 insertions(+), 57 deletions(-) (limited to 'src/mem') diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc index a8ca56b6a..226647fdc 100644 --- a/src/mem/bridge.cc +++ b/src/mem/bridge.cc @@ -385,7 +385,7 @@ Bridge::BridgeMasterPort::checkFunctional(PacketPtr pkt) bool found = false; auto i = transmitList.begin(); - while(i != transmitList.end() && !found) { + while (i != transmitList.end() && !found) { if (pkt->checkFunctional((*i).pkt)) { pkt->makeResponse(); found = true; diff --git a/src/mem/cache/prefetch/stride.cc b/src/mem/cache/prefetch/stride.cc index bcd72f25a..4456cf237 100644 --- a/src/mem/cache/prefetch/stride.cc +++ b/src/mem/cache/prefetch/stride.cc @@ -114,7 +114,7 @@ StridePrefetcher::calculatePrefetch(const PacketPtr &pkt, // Lookup pc-based information StrideEntry *entry; - if(pcTableHit(pc, is_secure, master_id, entry)) { + if (pcTableHit(pc, is_secure, master_id, entry)) { // Hit in table int new_stride = pkt_addr - entry->lastAddr; bool stride_match = (new_stride == entry->stride); diff --git a/src/mem/dram_ctrl.cc b/src/mem/dram_ctrl.cc index e3c532455..c7ad3b448 100644 --- a/src/mem/dram_ctrl.cc +++ b/src/mem/dram_ctrl.cc @@ -723,7 +723,7 @@ DRAMCtrl::chooseNext(std::deque& queue, Tick extra_col_delay) if (memSchedPolicy == Enums::fcfs) { // check if there is a packet going to a free rank - for(auto i = queue.begin(); i != queue.end() ; ++i) { + for (auto i = queue.begin(); i != queue.end() ; ++i) { DRAMPacket* dram_pkt = *i; if (ranks[dram_pkt->rank]->isAvailable()) { queue.erase(i); @@ -911,7 +911,7 @@ DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt); // start by enforcing tRRD - for(int i = 0; i < banksPerRank; i++) { + for (int i = 0; i < banksPerRank; i++) { // next activate to any bank in this rank must not happen // before tRRD if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) { @@ -956,7 +956,7 @@ DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref, DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate " "no earlier than %llu\n", activationLimit, rank_ref.actTicks.back() + tXAW); - for(int j = 0; j < banksPerRank; j++) + for (int j = 0; j < banksPerRank; j++) // next activate must not happen before end of window rank_ref.banks[j].actAllowedAt = std::max(rank_ref.actTicks.back() + tXAW, @@ -1073,8 +1073,8 @@ DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt) // update the time for the next read/write burst for each // bank (add a max with tCCD/tCCD_L here) Tick cmd_dly; - for(int j = 0; j < ranksPerChannel; j++) { - for(int i = 0; i < banksPerRank; i++) { + for (int j = 0; j < ranksPerChannel; j++) { + for (int i = 0; i < banksPerRank; i++) { // next burst to same bank group in this rank must not happen // before tCCD_L. Different bank group timing requirement is // tBURST; Add tCS for different ranks @@ -1454,7 +1454,7 @@ DRAMCtrl::minBankPrep(const deque& queue, // bank in question vector got_waiting(ranksPerChannel * banksPerRank, false); for (const auto& p : queue) { - if(p->rankRef.isAvailable()) + if (p->rankRef.isAvailable()) got_waiting[p->bankId] = true; } diff --git a/src/mem/physical.cc b/src/mem/physical.cc index dfea2e9e1..5fd459121 100644 --- a/src/mem/physical.cc +++ b/src/mem/physical.cc @@ -373,7 +373,7 @@ PhysicalMemory::unserialize(CheckpointIn &cp) vector lal_cid; UNSERIALIZE_CONTAINER(lal_addr); UNSERIALIZE_CONTAINER(lal_cid); - for(size_t i = 0; i < lal_addr.size(); ++i) { + for (size_t i = 0; i < lal_addr.size(); ++i) { const auto& m = addrMap.find(lal_addr[i]); m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i])); } diff --git a/src/mem/port.cc b/src/mem/port.cc index 03b68814f..91b9408eb 100644 --- a/src/mem/port.cc +++ b/src/mem/port.cc @@ -72,7 +72,7 @@ BaseMasterPort::~BaseMasterPort() BaseSlavePort& BaseMasterPort::getSlavePort() const { - if(_baseSlavePort == NULL) + if (_baseSlavePort == NULL) panic("Cannot getSlavePort on master port %s that is not connected\n", name()); @@ -98,7 +98,7 @@ BaseSlavePort::~BaseSlavePort() BaseMasterPort& BaseSlavePort::getMasterPort() const { - if(_baseMasterPort == NULL) + if (_baseMasterPort == NULL) panic("Cannot getMasterPort on slave port %s that is not connected\n", name()); diff --git a/src/mem/ruby/filters/BulkBloomFilter.cc b/src/mem/ruby/filters/BulkBloomFilter.cc index 478871a40..f634b13ad 100644 --- a/src/mem/ruby/filters/BulkBloomFilter.cc +++ b/src/mem/ruby/filters/BulkBloomFilter.cc @@ -146,7 +146,7 @@ BulkBloomFilter::isSet(Addr addr) // check second section zero = false; - for(int i = m_filter_size / 2; i < m_filter_size; ++i) { + for (int i = m_filter_size / 2; i < m_filter_size; ++i) { // get intersection of signatures m_temp_filter[i] = m_temp_filter[i] && m_filter[i]; zero = zero || m_temp_filter[i]; diff --git a/src/mem/ruby/filters/H3BloomFilter.cc b/src/mem/ruby/filters/H3BloomFilter.cc index 5d6a9558f..a9a0cdc5b 100644 --- a/src/mem/ruby/filters/H3BloomFilter.cc +++ b/src/mem/ruby/filters/H3BloomFilter.cc @@ -419,7 +419,7 @@ H3BloomFilter::merge(AbstractBloomFilter *other_filter) { // assumes both filters are the same size! H3BloomFilter * temp = (H3BloomFilter*) other_filter; - for(int i = 0; i < m_filter_size; ++i){ + for (int i = 0; i < m_filter_size; ++i){ m_filter[i] |= (*temp)[i]; } } @@ -513,7 +513,7 @@ H3BloomFilter::hash_H3(uint64_t value, int index) int result = 0; for (int i = 0; i < 64; i++) { - if(val&mask) result ^= H3[i][index]; + if (val&mask) result ^= H3[i][index]; val = val >> 1; } return result; diff --git a/src/mem/ruby/filters/MultiBitSelBloomFilter.cc b/src/mem/ruby/filters/MultiBitSelBloomFilter.cc index f326030e9..4cbf25f57 100644 --- a/src/mem/ruby/filters/MultiBitSelBloomFilter.cc +++ b/src/mem/ruby/filters/MultiBitSelBloomFilter.cc @@ -93,7 +93,7 @@ MultiBitSelBloomFilter::merge(AbstractBloomFilter *other_filter) { // assumes both filters are the same size! MultiBitSelBloomFilter * temp = (MultiBitSelBloomFilter*) other_filter; - for(int i = 0; i < m_filter_size; ++i){ + for (int i = 0; i < m_filter_size; ++i){ m_filter[i] |= (*temp)[i]; } } diff --git a/src/mem/ruby/filters/MultiGrainBloomFilter.cc b/src/mem/ruby/filters/MultiGrainBloomFilter.cc index a6642ab7b..0226a2957 100644 --- a/src/mem/ruby/filters/MultiGrainBloomFilter.cc +++ b/src/mem/ruby/filters/MultiGrainBloomFilter.cc @@ -58,7 +58,7 @@ MultiGrainBloomFilter::clear() for (int i = 0; i < m_filter_size; i++) { m_filter[i] = 0; } - for(int i=0; i < m_page_filter_size; ++i){ + for (int i=0; i < m_page_filter_size; ++i){ m_page_filter[i] = 0; } } @@ -125,7 +125,7 @@ MultiGrainBloomFilter::getTotalCount() count += m_filter[i]; } - for(int i=0; i < m_page_filter_size; ++i) { + for (int i=0; i < m_page_filter_size; ++i) { count += m_page_filter[i] = 0; } diff --git a/src/mem/ruby/filters/NonCountingBloomFilter.cc b/src/mem/ruby/filters/NonCountingBloomFilter.cc index d949d02c4..fe76f7c90 100644 --- a/src/mem/ruby/filters/NonCountingBloomFilter.cc +++ b/src/mem/ruby/filters/NonCountingBloomFilter.cc @@ -73,7 +73,7 @@ NonCountingBloomFilter::merge(AbstractBloomFilter *other_filter) { // assumes both filters are the same size! NonCountingBloomFilter * temp = (NonCountingBloomFilter*) other_filter; - for(int i = 0; i < m_filter_size; ++i){ + for (int i = 0; i < m_filter_size; ++i){ m_filter[i] |= (*temp)[i]; } } diff --git a/src/mem/ruby/network/MessageBuffer.cc b/src/mem/ruby/network/MessageBuffer.cc index 3f1586916..b78e9aad0 100644 --- a/src/mem/ruby/network/MessageBuffer.cc +++ b/src/mem/ruby/network/MessageBuffer.cc @@ -263,7 +263,7 @@ MessageBuffer::recycle(Tick current_time, Tick recycle_latency) void MessageBuffer::reanalyzeList(list <, Tick schdTick) { - while(!lt.empty()) { + while (!lt.empty()) { m_msg_counter++; MsgPtr m = lt.front(); m->setLastEnqueueTime(schdTick); diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/GarnetNetwork_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/GarnetNetwork_d.cc index f6fe6f586..ce919335e 100644 --- a/src/mem/ruby/network/garnet/fixed-pipeline/GarnetNetwork_d.cc +++ b/src/mem/ruby/network/garnet/fixed-pipeline/GarnetNetwork_d.cc @@ -52,7 +52,7 @@ GarnetNetwork_d::GarnetNetwork_d(const Params *p) m_vnet_type.resize(m_virtual_networks); - for(int i = 0 ; i < m_virtual_networks ; i++) + for (int i = 0 ; i < m_virtual_networks ; i++) { if (m_vnet_type_names[i] == "response") m_vnet_type[i] = DATA_VNET_; // carries data (and ctrl) packets @@ -94,7 +94,7 @@ GarnetNetwork_d::init() m_topology_ptr->createLinks(this); // FaultModel: declare each router to the fault model - if(isFaultModelEnabled()){ + if (isFaultModelEnabled()){ for (vector::const_iterator i= m_routers.begin(); i != m_routers.end(); ++i) { Router_d* router = safe_cast(*i); diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc index a02ac83f0..b1ce027dc 100644 --- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc +++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc @@ -185,7 +185,7 @@ NetworkInterface_d::calculateVC(int vnet) for (int i = 0; i < m_vc_per_vnet; i++) { int delta = m_vc_allocator[vnet]; m_vc_allocator[vnet]++; - if(m_vc_allocator[vnet] == m_vc_per_vnet) + if (m_vc_allocator[vnet] == m_vc_per_vnet) m_vc_allocator[vnet] = 0; if (m_out_vc_state[(vnet*m_vc_per_vnet) + delta]->isInState( diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc index 301d453c5..027c8baee 100644 --- a/src/mem/ruby/network/simple/PerfectSwitch.cc +++ b/src/mem/ruby/network/simple/PerfectSwitch.cc @@ -61,7 +61,7 @@ PerfectSwitch::init(SimpleNetwork *network_ptr) { m_network_ptr = network_ptr; - for(int i = 0;i < m_virtual_networks;++i) { + for (int i = 0;i < m_virtual_networks;++i) { m_pending_message_count.push_back(0); } } @@ -110,7 +110,7 @@ PerfectSwitch::operateVnet(int vnet) m_round_robin_start = 0; } - if(m_pending_message_count[vnet] > 0) { + if (m_pending_message_count[vnet] > 0) { // for all input ports, use round robin scheduling for (int counter = 0; counter < m_in.size(); counter++) { // Round robin scheduling diff --git a/src/mem/ruby/profiler/AccessTraceForAddress.cc b/src/mem/ruby/profiler/AccessTraceForAddress.cc index a61c7329f..dc5f1ac8d 100644 --- a/src/mem/ruby/profiler/AccessTraceForAddress.cc +++ b/src/mem/ruby/profiler/AccessTraceForAddress.cc @@ -64,9 +64,9 @@ AccessTraceForAddress::update(RubyRequestType type, { m_touched_by.add(cpu); m_total++; - if(type == RubyRequestType_ATOMIC) { + if (type == RubyRequestType_ATOMIC) { m_atomics++; - } else if(type == RubyRequestType_LD){ + } else if (type == RubyRequestType_LD){ m_loads++; } else if (type == RubyRequestType_ST){ m_stores++; diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc index 669fb30fb..2a53e53be 100644 --- a/src/mem/ruby/slicc_interface/AbstractController.cc +++ b/src/mem/ruby/slicc_interface/AbstractController.cc @@ -156,7 +156,7 @@ AbstractController::wakeUpAllBuffers() std::vector wokeUpMsgVecs; MsgBufType wokeUpMsgBufs; - if(m_waiting_buffers.size() > 0) { + if (m_waiting_buffers.size() > 0) { for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin(); buf_iter != m_waiting_buffers.end(); ++buf_iter) { diff --git a/src/mem/ruby/structures/AbstractReplacementPolicy.cc b/src/mem/ruby/structures/AbstractReplacementPolicy.cc index d802ecd31..5abd55083 100644 --- a/src/mem/ruby/structures/AbstractReplacementPolicy.cc +++ b/src/mem/ruby/structures/AbstractReplacementPolicy.cc @@ -36,9 +36,9 @@ AbstractReplacementPolicy::AbstractReplacementPolicy(const Params * p) m_num_sets = p->size/p->block_size/p->assoc; m_assoc = p->assoc; m_last_ref_ptr = new Tick*[m_num_sets]; - for(unsigned i = 0; i < m_num_sets; i++){ + for (unsigned i = 0; i < m_num_sets; i++){ m_last_ref_ptr[i] = new Tick[m_assoc]; - for(unsigned j = 0; j < m_assoc; j++){ + for (unsigned j = 0; j < m_assoc; j++){ m_last_ref_ptr[i][j] = 0; } } diff --git a/src/mem/ruby/structures/BankedArray.cc b/src/mem/ruby/structures/BankedArray.cc index 550693ca2..5054eae8a 100644 --- a/src/mem/ruby/structures/BankedArray.cc +++ b/src/mem/ruby/structures/BankedArray.cc @@ -73,7 +73,7 @@ BankedArray::reserve(int64_t idx) unsigned int bank = mapIndexToBank(idx); assert(bank < banks); - if(busyBanks[bank].endAccess >= curTick()) { + if (busyBanks[bank].endAccess >= curTick()) { if (busyBanks[bank].startAccess == curTick() && busyBanks[bank].idx == idx) { // this is the same reservation (can happen when diff --git a/src/mem/ruby/structures/CacheMemory.cc b/src/mem/ruby/structures/CacheMemory.cc index 45fb85d05..f7c196119 100644 --- a/src/mem/ruby/structures/CacheMemory.cc +++ b/src/mem/ruby/structures/CacheMemory.cc @@ -323,7 +323,7 @@ CacheMemory::lookup(Addr address) assert(address == makeLineAddress(address)); int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); - if(loc == -1) return NULL; + if (loc == -1) return NULL; return m_cache[cacheSet][loc]; } @@ -334,7 +334,7 @@ CacheMemory::lookup(Addr address) const assert(address == makeLineAddress(address)); int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); - if(loc == -1) return NULL; + if (loc == -1) return NULL; return m_cache[cacheSet][loc]; } @@ -345,7 +345,7 @@ CacheMemory::setMRU(Addr address) int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); - if(loc != -1) + if (loc != -1) m_replacementPolicy_ptr->touch(cacheSet, loc, curTick()); } @@ -363,7 +363,7 @@ CacheMemory::setMRU(Addr address, int occupancy) int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); - if(loc != -1) { + if (loc != -1) { if (m_replacementPolicy_ptr->useOccupancy()) { (static_cast(m_replacementPolicy_ptr))-> touch(cacheSet, loc, curTick(), occupancy); @@ -380,7 +380,7 @@ CacheMemory::getReplacementWeight(int64_t set, int64_t loc) assert(set < m_cache_num_sets); assert(loc < m_cache_assoc); int ret = 0; - if(m_cache[set][loc] != NULL) { + if (m_cache[set][loc] != NULL) { ret = m_cache[set][loc]->getNumValidBlocks(); assert(ret >= 0); } diff --git a/src/mem/ruby/structures/PseudoLRUPolicy.cc b/src/mem/ruby/structures/PseudoLRUPolicy.cc index a2b21a625..954e7444a 100644 --- a/src/mem/ruby/structures/PseudoLRUPolicy.cc +++ b/src/mem/ruby/structures/PseudoLRUPolicy.cc @@ -51,7 +51,7 @@ PseudoLRUPolicy::PseudoLRUPolicy(const Params * p) int tmp_assoc = m_effective_assoc; while (true) { tmp_assoc /= 2; - if(!tmp_assoc) break; + if (!tmp_assoc) break; m_num_levels++; } assert(m_num_levels < sizeof(unsigned int)*4); diff --git a/src/mem/ruby/structures/RubyMemoryControl.cc b/src/mem/ruby/structures/RubyMemoryControl.cc index 77f1c239f..095b2e306 100644 --- a/src/mem/ruby/structures/RubyMemoryControl.cc +++ b/src/mem/ruby/structures/RubyMemoryControl.cc @@ -639,7 +639,7 @@ DrainState RubyMemoryControl::drain() { DPRINTF(RubyMemory, "MemoryController drain\n"); - if(m_event.scheduled()) { + if (m_event.scheduled()) { deschedule(m_event); } return DrainState::Drained; diff --git a/src/mem/ruby/structures/TBETable.hh b/src/mem/ruby/structures/TBETable.hh index a39c5af2e..b6ee5d7c9 100644 --- a/src/mem/ruby/structures/TBETable.hh +++ b/src/mem/ruby/structures/TBETable.hh @@ -110,7 +110,7 @@ template inline ENTRY* TBETable::lookup(Addr address) { - if(m_map.find(address) != m_map.end()) return &(m_map.find(address)->second); + if (m_map.find(address) != m_map.end()) return &(m_map.find(address)->second); return NULL; } diff --git a/src/mem/ruby/system/GPUCoalescer.cc b/src/mem/ruby/system/GPUCoalescer.cc index db279bd3a..d4629a0b7 100644 --- a/src/mem/ruby/system/GPUCoalescer.cc +++ b/src/mem/ruby/system/GPUCoalescer.cc @@ -239,7 +239,7 @@ GPUCoalescer::getRequestStatus(PacketPtr pkt, RubyRequestType request_type) return RequestStatus_BufferFull; } - if(m_controller->isBlocked(line_addr) && + if (m_controller->isBlocked(line_addr) && request_type != RubyRequestType_Locked_RMW_Write) { return RequestStatus_Aliased; } @@ -519,7 +519,7 @@ GPUCoalescer::writeCallback(Addr address, // Not valid for Network_test protocl // bool success = true; - if(!m_usingNetworkTester) + if (!m_usingNetworkTester) success = handleLlsc(address, request); if (request->m_type == RubyRequestType_Locked_RMW_Read) { @@ -704,7 +704,7 @@ GPUCoalescer::makeRequest(PacketPtr pkt) // This is a Kernel Begin leave handling to // virtual xCoalescer::makeRequest return RequestStatus_Issued; - }else if(pkt->req->isRelease()) { + }else if (pkt->req->isRelease()) { // This is a Kernel End leave handling to // virtual xCoalescer::makeRequest // If we are here then we didn't call @@ -917,7 +917,7 @@ GPUCoalescer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type) std::pair tmpAtomicOp(tmpOffset, tmpPkt->getAtomicOp()); atomicOps.push_back(tmpAtomicOp); - } else if(tmpPkt->isWrite()) { + } else if (tmpPkt->isWrite()) { dataBlock.setData(tmpPkt->getPtr(), tmpOffset, tmpSize); } @@ -1151,11 +1151,11 @@ GPUCoalescer::atomicCallback(Addr address, void GPUCoalescer::recordCPReadCallBack(MachineID myMachID, MachineID senderMachID) { - if(myMachID == senderMachID) { + if (myMachID == senderMachID) { CP_TCPLdHits++; - } else if(machineIDToMachineType(senderMachID) == MachineType_TCP) { + } else if (machineIDToMachineType(senderMachID) == MachineType_TCP) { CP_TCPLdTransfers++; - } else if(machineIDToMachineType(senderMachID) == MachineType_TCC) { + } else if (machineIDToMachineType(senderMachID) == MachineType_TCC) { CP_TCCLdHits++; } else { CP_LdMiss++; @@ -1165,11 +1165,11 @@ GPUCoalescer::recordCPReadCallBack(MachineID myMachID, MachineID senderMachID) void GPUCoalescer::recordCPWriteCallBack(MachineID myMachID, MachineID senderMachID) { - if(myMachID == senderMachID) { + if (myMachID == senderMachID) { CP_TCPStHits++; - } else if(machineIDToMachineType(senderMachID) == MachineType_TCP) { + } else if (machineIDToMachineType(senderMachID) == MachineType_TCP) { CP_TCPStTransfers++; - } else if(machineIDToMachineType(senderMachID) == MachineType_TCC) { + } else if (machineIDToMachineType(senderMachID) == MachineType_TCC) { CP_TCCStHits++; } else { CP_StMiss++; diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc index c2727b41d..dedade3cf 100644 --- a/src/mem/ruby/system/Sequencer.cc +++ b/src/mem/ruby/system/Sequencer.cc @@ -379,7 +379,7 @@ Sequencer::writeCallback(Addr address, DataBlock& data, // Not valid for Network_test protocl // bool success = true; - if(!m_usingNetworkTester) + if (!m_usingNetworkTester) success = handleLlsc(address, request); if (request->m_type == RubyRequestType_Locked_RMW_Read) { diff --git a/src/mem/ruby/system/VIPERCoalescer.cc b/src/mem/ruby/system/VIPERCoalescer.cc index ca91f2723..6956469d5 100644 --- a/src/mem/ruby/system/VIPERCoalescer.cc +++ b/src/mem/ruby/system/VIPERCoalescer.cc @@ -117,7 +117,7 @@ VIPERCoalescer::makeRequest(PacketPtr pkt) // isKernel + isRelease insertKernel(pkt->req->contextId(), pkt); wbL1(); - if(m_outstanding_wb == 0) { + if (m_outstanding_wb == 0) { for (auto it = kernelEndList.begin(); it != kernelEndList.end(); it++) { newKernelEnds.push_back(it->first); } @@ -261,7 +261,7 @@ VIPERCoalescer::invwbL1() { int size = m_dataCache_ptr->getNumBlocks(); // Walk the cache - for(int i = 0; i < size; i++) { + for (int i = 0; i < size; i++) { Addr addr = m_dataCache_ptr->getAddressAtIdx(i); // Evict Read-only data std::shared_ptr msg = std::make_shared( @@ -273,7 +273,7 @@ VIPERCoalescer::invwbL1() m_outstanding_inv++; } // Walk the cache - for(int i = 0; i< size; i++) { + for (int i = 0; i< size; i++) { Addr addr = m_dataCache_ptr->getAddressAtIdx(i); // Write dirty data back std::shared_ptr msg = std::make_shared( diff --git a/src/mem/ruby/system/WeightedLRUPolicy.cc b/src/mem/ruby/system/WeightedLRUPolicy.cc index 5baa4d9a5..2256b2f24 100644 --- a/src/mem/ruby/system/WeightedLRUPolicy.cc +++ b/src/mem/ruby/system/WeightedLRUPolicy.cc @@ -39,9 +39,9 @@ WeightedLRUPolicy::WeightedLRUPolicy(const Params* p) : AbstractReplacementPolicy(p), m_cache(p->cache) { m_last_occ_ptr = new int*[m_num_sets]; - for(unsigned i = 0; i < m_num_sets; i++){ + for (unsigned i = 0; i < m_num_sets; i++){ m_last_occ_ptr[i] = new int[m_assoc]; - for(unsigned j = 0; j < m_assoc; j++){ + for (unsigned j = 0; j < m_assoc; j++){ m_last_occ_ptr[i][j] = 0; } } diff --git a/src/mem/serial_link.cc b/src/mem/serial_link.cc index ce90d1fc4..b6cb097b7 100644 --- a/src/mem/serial_link.cc +++ b/src/mem/serial_link.cc @@ -413,7 +413,7 @@ SerialLink::SerialLinkMasterPort::checkFunctional(PacketPtr pkt) bool found = false; auto i = transmitList.begin(); - while(i != transmitList.end() && !found) { + while (i != transmitList.end() && !found) { if (pkt->checkFunctional((*i).pkt)) { pkt->makeResponse(); found = true; diff --git a/src/mem/stack_dist_calc.cc b/src/mem/stack_dist_calc.cc index 2b880f118..886e53977 100644 --- a/src/mem/stack_dist_calc.cc +++ b/src/mem/stack_dist_calc.cc @@ -216,13 +216,13 @@ StackDistCalc::getSum(Node* node, bool from_left, uint64_t sum_from_below, ++level; // Variable stack_dist is updated only // when arriving from Left. - if(from_left) { + if (from_left) { stack_dist += node->sumRight; } // Recursively call the getSum operation till the // root node is reached - if(node->parent) { + if (node->parent) { stack_dist = getSum(node->parent, node->isLeftNode, node->sumLeft + node->sumRight, stack_dist, level); -- cgit v1.2.3