From 0be64ffe2f4ff8824b3084362706ffbf456ea490 Mon Sep 17 00:00:00 2001 From: Steve Reinhardt Date: Sat, 31 May 2014 18:00:23 -0700 Subject: style: eliminate equality tests with true and false Using '== true' in a boolean expression is totally redundant, and using '== false' is pretty verbose (and arguably less readable in most cases) compared to '!'. It's somewhat of a pet peeve, perhaps, but I had some time waiting for some tests to run and decided to clean these up. Unfortunately, SLICC appears not to have the '!' operator, so I had to leave the '== false' tests in the SLICC code. --- src/arch/mips/tlb.cc | 2 +- src/arch/power/tlb.cc | 2 +- src/arch/sparc/tlb.cc | 4 ++-- src/arch/x86/pagetable_walker.cc | 4 ++-- src/base/loader/hex_file.cc | 4 ++-- src/base/match.cc | 2 +- src/cpu/exetrace.cc | 2 +- src/cpu/inorder/cpu.cc | 2 +- src/cpu/inorder/pipeline_stage.cc | 6 +++--- src/cpu/inorder/resources/use_def.cc | 2 +- src/cpu/o3/commit_impl.hh | 6 +++--- src/cpu/o3/fetch_impl.hh | 4 ++-- src/cpu/o3/iew_impl.hh | 10 +++++----- src/cpu/o3/inst_queue_impl.hh | 2 +- src/cpu/o3/lsq_unit_impl.hh | 8 ++++---- src/cpu/o3/rob_impl.hh | 2 +- src/cpu/ozone/inst_queue_impl.hh | 2 +- src/mem/protocol/MOESI_CMP_token-L1cache.sm | 6 +++--- src/mem/protocol/MOESI_CMP_token-L2cache.sm | 8 ++++---- src/mem/protocol/MOESI_CMP_token-dir.sm | 2 +- src/mem/ruby/buffers/MessageBuffer.cc | 2 +- src/mem/ruby/slicc_interface/NetworkMessage.hh | 4 ++-- src/mem/ruby/system/DMASequencer.cc | 6 +++--- src/mem/slicc/ast/PeekStatementAST.py | 7 +++---- 24 files changed, 49 insertions(+), 50 deletions(-) (limited to 'src') diff --git a/src/arch/mips/tlb.cc b/src/arch/mips/tlb.cc index 8b106d437..420707bd0 100644 --- a/src/arch/mips/tlb.cc +++ b/src/arch/mips/tlb.cc @@ -184,7 +184,7 @@ TLB::insertAt(PTE &pte, unsigned Index, int _smallPages) (pte.D0 << 2) | (pte.V0 <<1) | pte.G), ((pte.PFN1 <<6) | (pte.C1 << 3) | (pte.D1 << 2) | (pte.V1 <<1) | pte.G)); - if (table[Index].V0 == true || table[Index].V1 == true) { + if (table[Index].V0 || table[Index].V1) { // Previous entry is valid PageTable::iterator i = lookupTable.find(table[Index].VPN); lookupTable.erase(i); diff --git a/src/arch/power/tlb.cc b/src/arch/power/tlb.cc index 9c1745cc8..1396d726c 100644 --- a/src/arch/power/tlb.cc +++ b/src/arch/power/tlb.cc @@ -165,7 +165,7 @@ TLB::insertAt(PowerISA::PTE &pte, unsigned Index, int _smallPages) } else { // Update TLB - if (table[Index].V0 == true || table[Index].V1 == true) { + if (table[Index].V0 || table[Index].V1) { // Previous entry is valid PageTable::iterator i = lookupTable.find(table[Index].VPN); diff --git a/src/arch/sparc/tlb.cc b/src/arch/sparc/tlb.cc index 72126a928..b0267718d 100644 --- a/src/arch/sparc/tlb.cc +++ b/src/arch/sparc/tlb.cc @@ -290,7 +290,7 @@ TLB::demapContext(int partition_id, int context_id) for (int x = 0; x < size; x++) { if (tlb[x].range.contextId == context_id && tlb[x].range.partitionId == partition_id) { - if (tlb[x].valid == true) { + if (tlb[x].valid) { freeList.push_front(&tlb[x]); } tlb[x].valid = false; @@ -329,7 +329,7 @@ TLB::flushAll() lookupTable.clear(); for (int x = 0; x < size; x++) { - if (tlb[x].valid == true) + if (tlb[x].valid) freeList.push_back(&tlb[x]); tlb[x].valid = false; tlb[x].used = false; diff --git a/src/arch/x86/pagetable_walker.cc b/src/arch/x86/pagetable_walker.cc index 63397a607..0c8dcd442 100644 --- a/src/arch/x86/pagetable_walker.cc +++ b/src/arch/x86/pagetable_walker.cc @@ -233,7 +233,7 @@ Fault Walker::WalkerState::startWalk() { Fault fault = NoFault; - assert(started == false); + assert(!started); started = true; setupWalk(req->getVaddr()); if (timing) { @@ -262,7 +262,7 @@ Fault Walker::WalkerState::startFunctional(Addr &addr, unsigned &logBytes) { Fault fault = NoFault; - assert(started == false); + assert(!started); started = true; setupWalk(addr); diff --git a/src/base/loader/hex_file.cc b/src/base/loader/hex_file.cc index e26ac31e6..dac013d40 100755 --- a/src/base/loader/hex_file.cc +++ b/src/base/loader/hex_file.cc @@ -104,7 +104,7 @@ HexFile::parseLine(char *Str, Addr *A, uint32_t *D) } else if (Str[i] == ' ' || Str[i] == '\n') { if (Number == 0) return; - if (Flag == false) { + if (!Flag) { *A = Number; Number = 0; Flag = true; @@ -125,7 +125,7 @@ HexFile::parseLine(char *Str, Addr *A, uint32_t *D) i++; } - if (Flag != true) { + if (!Flag) { *A = 0; *D = 0; } else { diff --git a/src/base/match.cc b/src/base/match.cc index 08ba5f20e..65b740076 100644 --- a/src/base/match.cc +++ b/src/base/match.cc @@ -89,7 +89,7 @@ ObjectMatch::domatch(const string &name) const } } - if (match == true) + if (match) return true; } diff --git a/src/cpu/exetrace.cc b/src/cpu/exetrace.cc index 2877db187..b3b6e6a60 100644 --- a/src/cpu/exetrace.cc +++ b/src/cpu/exetrace.cc @@ -115,7 +115,7 @@ Trace::ExeTracerRecord::traceInst(StaticInstPtr inst, bool ran) outs << Enums::OpClassStrings[inst->opClass()] << " : "; } - if (Debug::ExecResult && predicate == false) { + if (Debug::ExecResult && !predicate) { outs << "Predicated False"; } diff --git a/src/cpu/inorder/cpu.cc b/src/cpu/inorder/cpu.cc index eecbd033e..e966e8e83 100644 --- a/src/cpu/inorder/cpu.cc +++ b/src/cpu/inorder/cpu.cc @@ -1763,7 +1763,7 @@ InOrderCPU::cleanUpRemovedInsts() // Clear if Non-Speculative if (inst->staticInst && inst->seqNum == nonSpecSeqNum[tid] && - nonSpecInstActive[tid] == true) { + nonSpecInstActive[tid]) { nonSpecInstActive[tid] = false; } diff --git a/src/cpu/inorder/pipeline_stage.cc b/src/cpu/inorder/pipeline_stage.cc index d98fbb744..5e94c665f 100644 --- a/src/cpu/inorder/pipeline_stage.cc +++ b/src/cpu/inorder/pipeline_stage.cc @@ -248,19 +248,19 @@ void PipelineStage::removeStalls(ThreadID tid) { for (int st_num = 0; st_num < NumStages; st_num++) { - if (stalls[tid].stage[st_num] == true) { + if (stalls[tid].stage[st_num]) { DPRINTF(InOrderStage, "Removing stall from stage %i.\n", st_num); stalls[tid].stage[st_num] = false; } - if (toPrevStages->stageBlock[st_num][tid] == true) { + if (toPrevStages->stageBlock[st_num][tid]) { DPRINTF(InOrderStage, "Removing pending block from stage %i.\n", st_num); toPrevStages->stageBlock[st_num][tid] = false; } - if (fromNextStages->stageBlock[st_num][tid] == true) { + if (fromNextStages->stageBlock[st_num][tid]) { DPRINTF(InOrderStage, "Removing pending block from stage %i.\n", st_num); fromNextStages->stageBlock[st_num][tid] = false; diff --git a/src/cpu/inorder/resources/use_def.cc b/src/cpu/inorder/resources/use_def.cc index d25925b9b..5a871d0d4 100644 --- a/src/cpu/inorder/resources/use_def.cc +++ b/src/cpu/inorder/resources/use_def.cc @@ -191,7 +191,7 @@ UseDefUnit::execute(int slot_idx) // If there is a non-speculative instruction // in the pipeline then stall instructions here // --- - if (*nonSpecInstActive[tid] == true && seq_num > *nonSpecSeqNum[tid]) { + if (*nonSpecInstActive[tid] && seq_num > *nonSpecSeqNum[tid]) { DPRINTF(InOrderUseDef, "[tid:%i]: [sn:%i] cannot execute because" "there is non-speculative instruction [sn:%i] has not " "graduated.\n", tid, seq_num, *nonSpecSeqNum[tid]); diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh index 333687c84..b6fdc40bb 100644 --- a/src/cpu/o3/commit_impl.hh +++ b/src/cpu/o3/commit_impl.hh @@ -843,10 +843,10 @@ DefaultCommit::commit() // Not sure which one takes priority. I think if we have // both, that's a bad sign. - if (trapSquash[tid] == true) { + if (trapSquash[tid]) { assert(!tcSquash[tid]); squashFromTrap(tid); - } else if (tcSquash[tid] == true) { + } else if (tcSquash[tid]) { assert(commitStatus[tid] != TrapPending); squashFromTC(tid); } else if (commitStatus[tid] == SquashAfterPending) { @@ -885,7 +885,7 @@ DefaultCommit::commit() // then use one older sequence number. InstSeqNum squashed_inst = fromIEW->squashedSeqNum[tid]; - if (fromIEW->includeSquashInst[tid] == true) { + if (fromIEW->includeSquashInst[tid]) { squashed_inst--; } diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh index a81125da6..93dc2e250 100644 --- a/src/cpu/o3/fetch_impl.hh +++ b/src/cpu/o3/fetch_impl.hh @@ -430,8 +430,8 @@ DefaultFetch::drainSanityCheck() const assert(isDrained()); assert(retryPkt == NULL); assert(retryTid == InvalidThreadID); - assert(cacheBlocked == false); - assert(interruptPending == false); + assert(!cacheBlocked); + assert(!interruptPending); for (ThreadID i = 0; i < numThreads; ++i) { assert(!memReq[i]); diff --git a/src/cpu/o3/iew_impl.hh b/src/cpu/o3/iew_impl.hh index 3c133ff0c..644366dfc 100644 --- a/src/cpu/o3/iew_impl.hh +++ b/src/cpu/o3/iew_impl.hh @@ -487,7 +487,7 @@ DefaultIEW::squashDueToBranch(DynInstPtr &inst, ThreadID tid) DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, PC: %s " "[sn:%i].\n", tid, inst->pcState(), inst->seqNum); - if (toCommit->squash[tid] == false || + if (!toCommit->squash[tid] || inst->seqNum < toCommit->squashedSeqNum[tid]) { toCommit->squash[tid] = true; toCommit->squashedSeqNum[tid] = inst->seqNum; @@ -517,7 +517,7 @@ DefaultIEW::squashDueToMemOrder(DynInstPtr &inst, ThreadID tid) // case the memory violator should take precedence over the branch // misprediction because it requires the violator itself to be included in // the squash. - if (toCommit->squash[tid] == false || + if (!toCommit->squash[tid] || inst->seqNum <= toCommit->squashedSeqNum[tid]) { toCommit->squash[tid] = true; @@ -538,7 +538,7 @@ DefaultIEW::squashDueToMemBlocked(DynInstPtr &inst, ThreadID tid) { DPRINTF(IEW, "[tid:%i]: Memory blocked, squashing load and younger insts, " "PC: %s [sn:%i].\n", tid, inst->pcState(), inst->seqNum); - if (toCommit->squash[tid] == false || + if (!toCommit->squash[tid] || inst->seqNum < toCommit->squashedSeqNum[tid]) { toCommit->squash[tid] = true; @@ -1314,7 +1314,7 @@ DefaultIEW::executeInsts() } // If the store had a fault then it may not have a mem req - if (fault != NoFault || inst->readPredicate() == false || + if (fault != NoFault || !inst->readPredicate() || !inst->isStoreConditional()) { // If the instruction faulted, then we need to send it along // to commit without the instruction completing. @@ -1339,7 +1339,7 @@ DefaultIEW::executeInsts() // will be replaced and we will lose it. if (inst->getFault() == NoFault) { inst->execute(); - if (inst->readPredicate() == false) + if (!inst->readPredicate()) inst->forwardOldRegs(); } diff --git a/src/cpu/o3/inst_queue_impl.hh b/src/cpu/o3/inst_queue_impl.hh index 8eba028d6..ab3861add 100644 --- a/src/cpu/o3/inst_queue_impl.hh +++ b/src/cpu/o3/inst_queue_impl.hh @@ -1262,7 +1262,7 @@ InstructionQueue::addToDependents(DynInstPtr &new_inst) // it be added to the dependency graph. if (src_reg >= numPhysRegs) { continue; - } else if (regScoreboard[src_reg] == false) { + } else if (!regScoreboard[src_reg]) { DPRINTF(IQ, "Instruction PC %s has src reg %i that " "is being added to the dependency chain.\n", new_inst->pcState(), src_reg); diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh index 416f3e7e7..547800b4c 100644 --- a/src/cpu/o3/lsq_unit_impl.hh +++ b/src/cpu/o3/lsq_unit_impl.hh @@ -612,12 +612,12 @@ LSQUnit::executeLoad(DynInstPtr &inst) // If the instruction faulted or predicated false, then we need to send it // along to commit without the instruction completing. - if (load_fault != NoFault || inst->readPredicate() == false) { + if (load_fault != NoFault || !inst->readPredicate()) { // Send this instruction to commit, also make sure iew stage // realizes there is activity. // Mark it as executed unless it is an uncached load that // needs to hit the head of commit. - if (inst->readPredicate() == false) + if (!inst->readPredicate()) inst->forwardOldRegs(); DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", inst->seqNum, @@ -665,7 +665,7 @@ LSQUnit::executeStore(DynInstPtr &store_inst) store_fault == NoFault) return store_fault; - if (store_inst->readPredicate() == false) + if (!store_inst->readPredicate()) store_inst->forwardOldRegs(); if (storeQueue[store_idx].size == 0) { @@ -673,7 +673,7 @@ LSQUnit::executeStore(DynInstPtr &store_inst) store_inst->pcState(), store_inst->seqNum); return store_fault; - } else if (store_inst->readPredicate() == false) { + } else if (!store_inst->readPredicate()) { DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", store_inst->seqNum); return store_fault; diff --git a/src/cpu/o3/rob_impl.hh b/src/cpu/o3/rob_impl.hh index 61d6bd11b..5a9dc90f9 100644 --- a/src/cpu/o3/rob_impl.hh +++ b/src/cpu/o3/rob_impl.hh @@ -519,7 +519,7 @@ ROB::readHeadInst(ThreadID tid) if (threadEntries[tid] != 0) { InstIt head_thread = instList[tid].begin(); - assert((*head_thread)->isInROB()==true); + assert((*head_thread)->isInROB()); return *head_thread; } else { diff --git a/src/cpu/ozone/inst_queue_impl.hh b/src/cpu/ozone/inst_queue_impl.hh index babee399d..6e85464e2 100644 --- a/src/cpu/ozone/inst_queue_impl.hh +++ b/src/cpu/ozone/inst_queue_impl.hh @@ -1101,7 +1101,7 @@ InstQueue::addToDependents(DynInstPtr &new_inst) // it be added to the dependency graph. if (src_reg >= numPhysRegs) { continue; - } else if (regScoreboard[src_reg] == false) { + } else if (!regScoreboard[src_reg]) { DPRINTF(IQ, "Instruction PC %#x has src reg %i that " "is being added to the dependency chain.\n", new_inst->readPC(), src_reg); diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm index 7400ba12e..2b15dc8bf 100644 --- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm +++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm @@ -261,7 +261,7 @@ machine(L1Cache, "Token protocol") } else if (is_valid(cache_entry)) { return cache_entry.CacheState; } else { - if ((persistentTable.isLocked(addr) == true) && (persistentTable.findSmallest(addr) != machineID)) { + if (persistentTable.isLocked(addr) && (persistentTable.findSmallest(addr) != machineID)) { // Not in cache, in persistent table, but this processor isn't highest priority return State:I_L; } else { @@ -1401,7 +1401,7 @@ machine(L1Cache, "Token protocol") assert(is_valid(tbe)); if (tbe.WentPersistent) { - // assert(starving == true); + // assert(starving); outstandingRequests := outstandingRequests - 1; enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) { out_msg.Addr := address; @@ -1428,7 +1428,7 @@ machine(L1Cache, "Token protocol") // Update average latency if (tbe.IssueCount <= 1) { - if (tbe.ExternalResponse == true) { + if (tbe.ExternalResponse) { updateAverageLatencyEstimate(curCycle() - tbe.IssueTime); } } diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm index b429a68aa..f0fa8227d 100644 --- a/src/mem/protocol/MOESI_CMP_token-L2cache.sm +++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm @@ -165,7 +165,7 @@ machine(L2Cache, "Token protocol") State getState(Entry cache_entry, Address addr) { if (is_valid(cache_entry)) { return cache_entry.CacheState; - } else if (persistentTable.isLocked(addr) == true) { + } else if (persistentTable.isLocked(addr)) { return State:I_L; } else { return State:NP; @@ -250,7 +250,7 @@ machine(L2Cache, "Token protocol") bool exclusiveExists(Address addr) { if (localDirectory.isTagPresent(addr)) { - if (localDirectory[addr].exclusive == true) { + if (localDirectory[addr].exclusive) { return true; } else { @@ -285,7 +285,7 @@ machine(L2Cache, "Token protocol") } void clearExclusiveBitIfExists(Address addr) { - if (localDirectory.isTagPresent(addr) == true) { + if (localDirectory.isTagPresent(addr)) { localDirectory[addr].exclusive := false; } } @@ -761,7 +761,7 @@ machine(L2Cache, "Token protocol") action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") { peek(requestNetwork_in, RequestMsg) { - if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Addr) == false) { + if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.Addr) == false) { //profile_filter_action(1); DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n", in_msg.RetryNum); diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm index f7d3f1fa2..4354d7c4c 100644 --- a/src/mem/protocol/MOESI_CMP_token-dir.sm +++ b/src/mem/protocol/MOESI_CMP_token-dir.sm @@ -708,7 +708,7 @@ machine(Directory, "Token protocol") action(s_deallocateTBE, "s", desc="Deallocate TBE") { if (tbe.WentPersistent) { - assert(starving == true); + assert(starving); enqueue(persistentNetwork_out, PersistentMsg, 1) { out_msg.Addr := address; diff --git a/src/mem/ruby/buffers/MessageBuffer.cc b/src/mem/ruby/buffers/MessageBuffer.cc index 298fdb3c3..b63b07976 100644 --- a/src/mem/ruby/buffers/MessageBuffer.cc +++ b/src/mem/ruby/buffers/MessageBuffer.cc @@ -160,7 +160,7 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta) Tick current_time = m_sender->clockEdge(); Tick arrival_time = 0; - if (!RubySystem::getRandomization() || (m_randomization == false)) { + if (!RubySystem::getRandomization() || !m_randomization) { // No randomization arrival_time = current_time + delta * m_sender->clockPeriod(); } else { diff --git a/src/mem/ruby/slicc_interface/NetworkMessage.hh b/src/mem/ruby/slicc_interface/NetworkMessage.hh index 03d05d15d..10d78251a 100644 --- a/src/mem/ruby/slicc_interface/NetworkMessage.hh +++ b/src/mem/ruby/slicc_interface/NetworkMessage.hh @@ -60,7 +60,7 @@ class NetworkMessage : public Message const NetDest& getInternalDestination() const { - if (m_internal_dest_valid == false) + if (!m_internal_dest_valid) return getDestination(); return m_internal_dest; @@ -69,7 +69,7 @@ class NetworkMessage : public Message NetDest& getInternalDestination() { - if (m_internal_dest_valid == false) { + if (!m_internal_dest_valid) { m_internal_dest = getDestination(); m_internal_dest_valid = true; } diff --git a/src/mem/ruby/system/DMASequencer.cc b/src/mem/ruby/system/DMASequencer.cc index 469d19be6..9b0157b45 100644 --- a/src/mem/ruby/system/DMASequencer.cc +++ b/src/mem/ruby/system/DMASequencer.cc @@ -94,7 +94,7 @@ DMASequencer::makeRequest(PacketPtr pkt) void DMASequencer::issueNext() { - assert(m_is_busy == true); + assert(m_is_busy); active_request.bytes_completed = active_request.bytes_issued; if (active_request.len == active_request.bytes_completed) { // @@ -144,12 +144,12 @@ DMASequencer::issueNext() void DMASequencer::dataCallback(const DataBlock & dblk) { - assert(m_is_busy == true); + assert(m_is_busy); int len = active_request.bytes_issued - active_request.bytes_completed; int offset = 0; if (active_request.bytes_completed == 0) offset = active_request.start_paddr & m_data_block_mask; - assert(active_request.write == false); + assert(!active_request.write); if (active_request.data != NULL) { memcpy(&active_request.data[active_request.bytes_completed], dblk.getData(offset, len), len); diff --git a/src/mem/slicc/ast/PeekStatementAST.py b/src/mem/slicc/ast/PeekStatementAST.py index a9816bd3d..d267df26e 100644 --- a/src/mem/slicc/ast/PeekStatementAST.py +++ b/src/mem/slicc/ast/PeekStatementAST.py @@ -68,12 +68,11 @@ class PeekStatementAST(StatementAST): if self.pairs.has_key("block_on"): address_field = self.pairs['block_on'] code(''' - if ( (m_is_blocking == true) && - (m_block_map.count(in_msg_ptr->m_$address_field) == 1) ) { - if (m_block_map[in_msg_ptr->m_$address_field] != &$qcode) { + if (m_is_blocking && + (m_block_map.count(in_msg_ptr->m_$address_field) == 1) && + (m_block_map[in_msg_ptr->m_$address_field] != &$qcode)) { $qcode.delayHead(); continue; - } } ''') -- cgit v1.2.3