From de62aedabc96e7492c40bbc4468ba42b3274bfd6 Mon Sep 17 00:00:00 2001 From: Andreas Hansson Date: Sat, 27 Sep 2014 09:08:29 -0400 Subject: misc: Fix a bunch of minor issues identified by static analysis Add some missing initialisation, and fix a handful benign resource leaks (including some false positives). --- src/base/output.cc | 2 ++ src/base/socket.cc | 7 +++++-- src/cpu/testers/rubytest/RubyTester.cc | 3 +++ src/mem/cache/cache_impl.hh | 3 +++ src/mem/comm_monitor.cc | 6 ++++++ src/mem/comm_monitor.hh | 2 +- src/mem/packet.hh | 2 +- src/mem/physical.cc | 22 ++++------------------ src/mem/ruby/system/RubyPort.cc | 6 +++++- src/sim/eventq.cc | 11 +++++------ src/sim/eventq.hh | 3 ++- 11 files changed, 37 insertions(+), 30 deletions(-) (limited to 'src') diff --git a/src/base/output.cc b/src/base/output.cc index 912ec20e9..da9a551fa 100644 --- a/src/base/output.cc +++ b/src/base/output.cc @@ -255,6 +255,8 @@ OutputDirectory::remove(const string &name, bool recursive) de = readdir(subdir); } + + closedir(subdir); } // try to force recognition that we deleted the files in the directory diff --git a/src/base/socket.cc b/src/base/socket.cc index c39accd7e..01fb519b4 100644 --- a/src/base/socket.cc +++ b/src/base/socket.cc @@ -94,8 +94,9 @@ ListenSocket::listen(int port, bool reuse) struct sockaddr_in sockaddr; sockaddr.sin_family = PF_INET; sockaddr.sin_addr.s_addr = INADDR_ANY; - sockaddr.sin_port = htons(port); + // finally clear sin_zero + memset(&sockaddr.sin_zero, 0, sizeof(sockaddr.sin_zero)); int ret = ::bind(fd, (struct sockaddr *)&sockaddr, sizeof (sockaddr)); if (ret != 0) { if (ret == -1 && errno != EADDRINUSE) @@ -126,7 +127,9 @@ ListenSocket::accept(bool nodelay) int sfd = ::accept(fd, (struct sockaddr *)&sockaddr, &slen); if (sfd != -1 && nodelay) { int i = 1; - ::setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, (char *)&i, sizeof(i)); + if (::setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, (char *)&i, + sizeof(i)) < 0) + warn("ListenSocket(accept): setsockopt() TCP_NODELAY failed!"); } return sfd; diff --git a/src/cpu/testers/rubytest/RubyTester.cc b/src/cpu/testers/rubytest/RubyTester.cc index 229b3e0a9..6ac252612 100644 --- a/src/cpu/testers/rubytest/RubyTester.cc +++ b/src/cpu/testers/rubytest/RubyTester.cc @@ -52,9 +52,12 @@ RubyTester::RubyTester(const Params *p) : MemObject(p), checkStartEvent(this), _masterId(p->system->getMasterId(name())), + m_checkTable_ptr(nullptr), m_num_cpus(p->num_cpus), m_checks_to_complete(p->checks_to_complete), m_deadlock_threshold(p->deadlock_threshold), + m_num_writers(0), + m_num_readers(0), m_wakeup_frequency(p->wakeup_frequency), m_check_flush(p->check_flush), m_num_inst_ports(p->port_cpuInstPort_connection_count) diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index e4a6f3c24..5cfe7c0cf 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -917,6 +917,9 @@ Cache::recvAtomic(PacketPtr pkt) if (pkt->cmd == MemCmd::WriteInvalidateReq) { memSidePort->sendAtomic(pkt); // complete writeback if (isTopLevel) { + // @todo Static analysis suggests this can actually happen + assert(blk); + // top level caches allocate and write the data assert(blk->isDirty()); assert(!blk->isWritable()); diff --git a/src/mem/comm_monitor.cc b/src/mem/comm_monitor.cc index 3c3af76ea..c9cbcb143 100644 --- a/src/mem/comm_monitor.cc +++ b/src/mem/comm_monitor.cc @@ -105,6 +105,12 @@ CommMonitor::CommMonitor(Params* params) name(), samplePeriodTicks, samplePeriod.msec()); } +CommMonitor::~CommMonitor() +{ + // if not already done, close the stream + closeStreams(); +} + void CommMonitor::closeStreams() { diff --git a/src/mem/comm_monitor.hh b/src/mem/comm_monitor.hh index c3a970940..69122cc60 100644 --- a/src/mem/comm_monitor.hh +++ b/src/mem/comm_monitor.hh @@ -77,7 +77,7 @@ class CommMonitor : public MemObject CommMonitor(Params* params); /** Destructor */ - ~CommMonitor() {} + ~CommMonitor(); /** * Callback to flush and close all open output streams on exit. If diff --git a/src/mem/packet.hh b/src/mem/packet.hh index 4ed307f66..c7b47c0a7 100644 --- a/src/mem/packet.hh +++ b/src/mem/packet.hh @@ -601,7 +601,7 @@ class Packet : public Printable */ Packet(Request *_req, MemCmd _cmd) : cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false), - src(InvalidPortID), dest(InvalidPortID), + size(0), src(InvalidPortID), dest(InvalidPortID), bytesValidStart(0), bytesValidEnd(0), firstWordDelay(0), lastWordDelay(0), senderState(NULL) diff --git a/src/mem/physical.cc b/src/mem/physical.cc index 08ec83410..398d0530f 100644 --- a/src/mem/physical.cc +++ b/src/mem/physical.cc @@ -307,16 +307,9 @@ PhysicalMemory::serializeStore(ostream& os, unsigned int store_id, // write memory file string filepath = Checkpoint::dir() + "/" + filename.c_str(); - int fd = creat(filepath.c_str(), 0664); - if (fd < 0) { - perror("creat"); - fatal("Can't open physical memory checkpoint file '%s'\n", - filename); - } - - gzFile compressed_mem = gzdopen(fd, "wb"); + gzFile compressed_mem = gzopen(filepath.c_str(), "wb"); if (compressed_mem == NULL) - fatal("Insufficient memory to allocate compression state for %s\n", + fatal("Can't open physical memory checkpoint file '%s'\n", filename); uint64_t pass_size = 0; @@ -380,16 +373,9 @@ PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section) string filepath = cp->cptDir + "/" + filename; // mmap memoryfile - int fd = open(filepath.c_str(), O_RDONLY); - if (fd < 0) { - perror("open"); - fatal("Can't open physical memory checkpoint file '%s'", filename); - } - - gzFile compressed_mem = gzdopen(fd, "rb"); + gzFile compressed_mem = gzopen(filepath.c_str(), "rb"); if (compressed_mem == NULL) - fatal("Insufficient memory to allocate compression state for %s\n", - filename); + fatal("Can't open physical memory checkpoint file '%s'", filename); // we've already got the actual backing store mapped uint8_t* pmem = backingStore[store_id].second; diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc index 110b6924d..71b08ebbb 100644 --- a/src/mem/ruby/system/RubyPort.cc +++ b/src/mem/ruby/system/RubyPort.cc @@ -205,7 +205,11 @@ RubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt) AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges(); for (auto it = l.begin(); it != l.end(); ++it) { if (it->contains(pkt->getAddr())) { - ruby_port->master_ports[i]->sendTimingReq(pkt); + // generally it is not safe to assume success here as + // the port could be blocked + bool M5_VAR_USED success = + ruby_port->master_ports[i]->sendTimingReq(pkt); + assert(success); return true; } } diff --git a/src/sim/eventq.cc b/src/sim/eventq.cc index b8e45a13e..4fde79656 100644 --- a/src/sim/eventq.cc +++ b/src/sim/eventq.cc @@ -460,29 +460,28 @@ Event::dump() const } EventQueue::EventQueue(const string &n) - : objName(n), head(NULL), _curTick(0), - async_queue_mutex(new std::mutex()) + : objName(n), head(NULL), _curTick(0) { } void EventQueue::asyncInsert(Event *event) { - async_queue_mutex->lock(); + async_queue_mutex.lock(); async_queue.push_back(event); - async_queue_mutex->unlock(); + async_queue_mutex.unlock(); } void EventQueue::handleAsyncInsertions() { assert(this == curEventQueue()); - async_queue_mutex->lock(); + async_queue_mutex.lock(); while (!async_queue.empty()) { insert(async_queue.front()); async_queue.pop_front(); } - async_queue_mutex->unlock(); + async_queue_mutex.unlock(); } diff --git a/src/sim/eventq.hh b/src/sim/eventq.hh index e238785f6..c390d2155 100644 --- a/src/sim/eventq.hh +++ b/src/sim/eventq.hh @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -448,7 +449,7 @@ class EventQueue : public Serializable Tick _curTick; //! Mutex to protect async queue. - std::mutex *async_queue_mutex; + std::mutex async_queue_mutex; //! List of events added by other threads to this event queue. std::list async_queue; -- cgit v1.2.3