summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRon Dreslinski <rdreslin@umich.edu>2006-10-20 13:01:21 -0400
committerRon Dreslinski <rdreslin@umich.edu>2006-10-20 13:01:21 -0400
commit28e9641c2cf063d8ee1eba9f440dfcda9c82d965 (patch)
tree27d82f9251fe5b6eb5d4daa94c6b2af00f324229 /src
parent780aa0a0ebb765781a31d0fb58257b1efb1f324a (diff)
downloadgem5-28e9641c2cf063d8ee1eba9f440dfcda9c82d965.tar.xz
Use fixPacket function everywhere.
Fix fixPacket assert function. Stop timing port from forwarding the request if a response was found in its queue on a read. src/cpu/memtest/memtest.cc: src/cpu/memtest/memtest.hh: src/python/m5/objects/MemTest.py: Add parameter to configure what percentage of mem accesses are functional src/mem/cache/base_cache.cc: src/mem/cache/cache_impl.hh: Use fix Packet function src/mem/packet.cc: Fix an assert that was checking the wrong thing src/mem/tport.cc: Properly detect if we need to do the access to the functional device --HG-- extra : convert_revision : 447cc1a9a65ddd2a41e937fb09dc0e7c74e9c75e
Diffstat (limited to 'src')
-rw-r--r--src/cpu/memtest/memtest.cc12
-rw-r--r--src/cpu/memtest/memtest.hh4
-rw-r--r--src/mem/cache/base_cache.cc27
-rw-r--r--src/mem/cache/cache_impl.hh63
-rw-r--r--src/mem/packet.cc2
-rw-r--r--src/mem/tport.cc14
-rw-r--r--src/python/m5/objects/MemTest.py1
7 files changed, 22 insertions, 101 deletions
diff --git a/src/cpu/memtest/memtest.cc b/src/cpu/memtest/memtest.cc
index cb643e5d9..a99fe7e07 100644
--- a/src/cpu/memtest/memtest.cc
+++ b/src/cpu/memtest/memtest.cc
@@ -113,7 +113,7 @@ MemTest::MemTest(const string &name,
// PhysicalMemory *check_mem,
unsigned _memorySize,
unsigned _percentReads,
-// unsigned _percentCopies,
+ unsigned _percentFunctional,
unsigned _percentUncacheable,
unsigned _progressInterval,
unsigned _percentSourceUnaligned,
@@ -130,7 +130,7 @@ MemTest::MemTest(const string &name,
// checkMem(check_mem),
size(_memorySize),
percentReads(_percentReads),
-// percentCopies(_percentCopies),
+ percentFunctional(_percentFunctional),
percentUncacheable(_percentUncacheable),
progressInterval(_progressInterval),
nextProgressMessage(_progressInterval),
@@ -345,7 +345,7 @@ MemTest::tick()
} else {
paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
}
- bool probe = (random() % 2 == 1) && !(flags & UNCACHEABLE);
+ bool probe = (random() % 100 < percentFunctional) && !(flags & UNCACHEABLE);
//bool probe = false;
paddr &= ~((1 << access_size) - 1);
@@ -501,7 +501,7 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(MemTest)
// SimObjectParam<PhysicalMemory *> check_mem;
Param<unsigned> memory_size;
Param<unsigned> percent_reads;
-// Param<unsigned> percent_copies;
+ Param<unsigned> percent_functional;
Param<unsigned> percent_uncacheable;
Param<unsigned> progress_interval;
Param<unsigned> percent_source_unaligned;
@@ -520,7 +520,7 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(MemTest)
// INIT_PARAM(check_mem, "check memory"),
INIT_PARAM(memory_size, "memory size"),
INIT_PARAM(percent_reads, "target read percentage"),
-// INIT_PARAM(percent_copies, "target copy percentage"),
+ INIT_PARAM(percent_functional, "percentage of access that are functional"),
INIT_PARAM(percent_uncacheable, "target uncacheable percentage"),
INIT_PARAM(progress_interval, "progress report interval (in accesses)"),
INIT_PARAM(percent_source_unaligned,
@@ -537,7 +537,7 @@ END_INIT_SIM_OBJECT_PARAMS(MemTest)
CREATE_SIM_OBJECT(MemTest)
{
return new MemTest(getInstanceName(), /*cache->getInterface(),*/ /*main_mem,*/
- /*check_mem,*/ memory_size, percent_reads, /*percent_copies,*/
+ /*check_mem,*/ memory_size, percent_reads, percent_functional,
percent_uncacheable, progress_interval,
percent_source_unaligned, percent_dest_unaligned,
trace_addr, max_loads, atomic);
diff --git a/src/cpu/memtest/memtest.hh b/src/cpu/memtest/memtest.hh
index 5de41f0d8..f4ac16cd5 100644
--- a/src/cpu/memtest/memtest.hh
+++ b/src/cpu/memtest/memtest.hh
@@ -55,7 +55,7 @@ class MemTest : public MemObject
// PhysicalMemory *check_mem,
unsigned _memorySize,
unsigned _percentReads,
-// unsigned _percentCopies,
+ unsigned _percentFunctional,
unsigned _percentUncacheable,
unsigned _progressInterval,
unsigned _percentSourceUnaligned,
@@ -144,7 +144,7 @@ class MemTest : public MemObject
unsigned size; // size of testing memory region
unsigned percentReads; // target percentage of read accesses
-// unsigned percentCopies; // target percentage of copy accesses
+ unsigned percentFunctional; // target percentage of functional accesses
unsigned percentUncacheable;
int id;
diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc
index e0301a757..c51f1b6ac 100644
--- a/src/mem/cache/base_cache.cc
+++ b/src/mem/cache/base_cache.cc
@@ -116,32 +116,7 @@ BaseCache::CachePort::recvFunctional(Packet *pkt)
// If the target contains data, and it overlaps the
// probed request, need to update data
if (target->intersect(pkt)) {
- uint8_t* pkt_data;
- uint8_t* write_data;
- int data_size;
- if (target->getAddr() < pkt->getAddr()) {
- int offset = pkt->getAddr() - target->getAddr();
- pkt_data = pkt->getPtr<uint8_t>();
- write_data = target->getPtr<uint8_t>() + offset;
- data_size = target->getSize() - offset;
- assert(data_size > 0);
- if (data_size > pkt->getSize())
- data_size = pkt->getSize();
- } else {
- int offset = target->getAddr() - pkt->getAddr();
- pkt_data = pkt->getPtr<uint8_t>() + offset;
- write_data = target->getPtr<uint8_t>();
- data_size = pkt->getSize() - offset;
- assert(data_size >= pkt->getSize());
- if (data_size > target->getSize())
- data_size = target->getSize();
- }
-
- if (pkt->isWrite()) {
- memcpy(pkt_data, write_data, data_size);
- } else {
- memcpy(write_data, pkt_data, data_size);
- }
+ fixPacket(pkt, target);
}
}
cache->doFunctionalAccess(pkt, isCpuSide);
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index ea30dbba6..a88aeb573 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -560,7 +560,6 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update,
if (!update) {
// Check for data in MSHR and writebuffer.
if (mshr) {
- warn("Found outstanding miss on an non-update probe");
MSHR::TargetList *targets = mshr->getTargetList();
MSHR::TargetList::iterator i = targets->begin();
MSHR::TargetList::iterator end = targets->end();
@@ -568,71 +567,15 @@ Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update,
Packet * target = *i;
// If the target contains data, and it overlaps the
// probed request, need to update data
- if (target->isWrite() && target->intersect(pkt)) {
- uint8_t* pkt_data;
- uint8_t* write_data;
- int data_size;
- if (target->getAddr() < pkt->getAddr()) {
- int offset = pkt->getAddr() - target->getAddr();
- pkt_data = pkt->getPtr<uint8_t>();
- write_data = target->getPtr<uint8_t>() + offset;
- data_size = target->getSize() - offset;
- assert(data_size > 0);
- if (data_size > pkt->getSize())
- data_size = pkt->getSize();
- } else {
- int offset = target->getAddr() - pkt->getAddr();
- pkt_data = pkt->getPtr<uint8_t>() + offset;
- write_data = target->getPtr<uint8_t>();
- data_size = pkt->getSize() - offset;
- assert(data_size >= pkt->getSize());
- if (data_size > target->getSize())
- data_size = target->getSize();
- }
-
- if (pkt->isWrite()) {
- memcpy(pkt_data, write_data, data_size);
- } else {
- pkt->flags |= SATISFIED;
- pkt->result = Packet::Success;
- memcpy(write_data, pkt_data, data_size);
- }
+ if (target->intersect(pkt)) {
+ fixPacket(pkt, target);
}
}
}
for (int i = 0; i < writes.size(); ++i) {
Packet * write = writes[i]->pkt;
if (write->intersect(pkt)) {
- warn("Found outstanding write on an non-update probe");
- uint8_t* pkt_data;
- uint8_t* write_data;
- int data_size;
- if (write->getAddr() < pkt->getAddr()) {
- int offset = pkt->getAddr() - write->getAddr();
- pkt_data = pkt->getPtr<uint8_t>();
- write_data = write->getPtr<uint8_t>() + offset;
- data_size = write->getSize() - offset;
- assert(data_size > 0);
- if (data_size > pkt->getSize())
- data_size = pkt->getSize();
- } else {
- int offset = write->getAddr() - pkt->getAddr();
- pkt_data = pkt->getPtr<uint8_t>() + offset;
- write_data = write->getPtr<uint8_t>();
- data_size = pkt->getSize() - offset;
- assert(data_size >= pkt->getSize());
- if (data_size > write->getSize())
- data_size = write->getSize();
- }
-
- if (pkt->isWrite()) {
- memcpy(pkt_data, write_data, data_size);
- } else {
- pkt->flags |= SATISFIED;
- pkt->result = Packet::Success;
- memcpy(write_data, pkt_data, data_size);
- }
-
+ fixPacket(pkt, write);
}
}
if (pkt->isRead()
diff --git a/src/mem/packet.cc b/src/mem/packet.cc
index a16e590e3..46c771c48 100644
--- a/src/mem/packet.cc
+++ b/src/mem/packet.cc
@@ -150,7 +150,7 @@ fixPacket(Packet *func, Packet *timing)
Addr timingStart = timing->getAddr();
Addr timingEnd = timing->getAddr() + timing->getSize() - 1;
- assert(!(funcStart > timingEnd || timingStart < funcEnd));
+ assert(!(funcStart > timingEnd || timingStart > funcEnd));
if (DTRACE(FunctionalAccess)) {
DebugOut() << func;
diff --git a/src/mem/tport.cc b/src/mem/tport.cc
index 2d8e7dba4..479dca1ad 100644
--- a/src/mem/tport.cc
+++ b/src/mem/tport.cc
@@ -33,12 +33,13 @@
void
SimpleTimingPort::recvFunctional(Packet *pkt)
{
- //First check queued events
- std::list<Packet *>::iterator i = transmitList.begin();
- std::list<Packet *>::iterator end = transmitList.end();
- bool cont = true;
+ std::list<Packet *>::iterator i;
+ std::list<Packet *>::iterator end;
- while (i != end && cont) {
+ //First check queued events
+ i = transmitList.begin();
+ end = transmitList.end();
+ while (i != end) {
Packet * target = *i;
// If the target contains data, and it overlaps the
// probed request, need to update data
@@ -46,8 +47,9 @@ SimpleTimingPort::recvFunctional(Packet *pkt)
fixPacket(pkt, target);
}
+
//Then just do an atomic access and throw away the returned latency
- if (cont)
+ if (pkt->result != Packet::Success)
recvAtomic(pkt);
}
diff --git a/src/python/m5/objects/MemTest.py b/src/python/m5/objects/MemTest.py
index 83399be80..1219ddd4d 100644
--- a/src/python/m5/objects/MemTest.py
+++ b/src/python/m5/objects/MemTest.py
@@ -13,6 +13,7 @@ class MemTest(SimObject):
percent_reads = Param.Percent(65, "target read percentage")
percent_source_unaligned = Param.Percent(50,
"percent of copy source address that are unaligned")
+ percent_functional = Param.Percent(50, "percent of access that are functional")
percent_uncacheable = Param.Percent(10,
"target uncacheable percentage")
progress_interval = Param.Counter(1000000,