diff options
author | Joel Hestness <jthestness@gmail.com> | 2015-08-14 00:19:39 -0500 |
---|---|---|
committer | Joel Hestness <jthestness@gmail.com> | 2015-08-14 00:19:39 -0500 |
commit | bf06911b3f6d992dc78489d66410f4580a17db7b (patch) | |
tree | 74799214cc889c3531d263543af4d144d5a3bf9c | |
parent | 9567c839fecfdb29a59f9da50cf706fcb22a2bb1 (diff) | |
download | gem5-bf06911b3f6d992dc78489d66410f4580a17db7b.tar.xz |
ruby: Change PerfectCacheMemory::lookup to return pointer
CacheMemory and DirectoryMemory lookup functions return pointers to entries
stored in the memory. Bring PerfectCacheMemory in line with this convention,
and clean up SLICC code generation that was in place solely to handle
references like that which was returned by PerfectCacheMemory::lookup.
-rw-r--r-- | src/mem/protocol/MOESI_CMP_directory-L2cache.sm | 89 | ||||
-rw-r--r-- | src/mem/protocol/MOESI_CMP_token-L2cache.sm | 35 | ||||
-rw-r--r-- | src/mem/ruby/structures/PerfectCacheMemory.hh | 12 | ||||
-rw-r--r-- | src/mem/slicc/ast/MethodCallExprAST.py | 3 |
4 files changed, 85 insertions, 54 deletions
diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm index c01b9765d..77f498e31 100644 --- a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm +++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm @@ -183,7 +183,7 @@ machine(L2Cache, "Token protocol") } - structure(DirEntry, desc="...") { + structure(DirEntry, desc="...", interface="AbstractEntry") { NetDest Sharers, desc="Set of the internal processors that want the block in shared state"; MachineID Owner, desc="ID of the L1 cache to forward the block to once we get a response"; bool OwnerValid, default="false", desc="true if Owner means something"; @@ -239,6 +239,10 @@ machine(L2Cache, "Token protocol") return (localDirectory.isTagPresent(addr) ); } + DirEntry getDirEntry(Address address), return_by_pointer="yes" { + return localDirectory.lookup(address); + } + bool isOnlySharer(Entry cache_entry, Address addr, MachineID shar_id) { if (is_valid(cache_entry)) { assert (localDirectory.isTagPresent(addr) == false); @@ -259,11 +263,12 @@ machine(L2Cache, "Token protocol") } } else if (localDirectory.isTagPresent(addr)){ - if (localDirectory[addr].Sharers.count() > 1) { + DirEntry dir_entry := getDirEntry(addr); + if (dir_entry.Sharers.count() > 1) { return false; } - else if (localDirectory[addr].Sharers.count() == 1) { - if (localDirectory[addr].Sharers.isElement(shar_id)) { + else if (dir_entry.Sharers.count() == 1) { + if (dir_entry.Sharers.isElement(shar_id)) { return true; } else { @@ -284,18 +289,20 @@ machine(L2Cache, "Token protocol") assert(localDirectory.isTagPresent(addr) == false); assert(is_valid(cache_entry)); localDirectory.allocate(addr); - localDirectory[addr].DirState := cache_entry.CacheState; - localDirectory[addr].Sharers := cache_entry.Sharers; - localDirectory[addr].Owner := cache_entry.Owner; - localDirectory[addr].OwnerValid := cache_entry.OwnerValid; + DirEntry dir_entry := getDirEntry(addr); + dir_entry.DirState := cache_entry.CacheState; + dir_entry.Sharers := cache_entry.Sharers; + dir_entry.Owner := cache_entry.Owner; + dir_entry.OwnerValid := cache_entry.OwnerValid; } void copyDirToCache(Entry cache_entry, Address addr) { assert(is_valid(cache_entry)); - cache_entry.Sharers := localDirectory[addr].Sharers; - cache_entry.Owner := localDirectory[addr].Owner; - cache_entry.OwnerValid := localDirectory[addr].OwnerValid; + DirEntry dir_entry := getDirEntry(addr); + cache_entry.Sharers := dir_entry.Sharers; + cache_entry.Owner := dir_entry.Owner; + cache_entry.OwnerValid := dir_entry.OwnerValid; } @@ -307,10 +314,12 @@ machine(L2Cache, "Token protocol") else { if (localDirectory.isTagPresent(addr) == false) { localDirectory.allocate(addr); - localDirectory[addr].Sharers.clear(); - localDirectory[addr].OwnerValid := false; + DirEntry dir_entry := getDirEntry(addr); + dir_entry.Sharers.clear(); + dir_entry.OwnerValid := false; } - localDirectory[addr].Sharers.add(shar_id); + DirEntry dir_entry := getDirEntry(addr); + dir_entry.Sharers.add(shar_id); } } @@ -326,9 +335,10 @@ machine(L2Cache, "Token protocol") if (localDirectory.isTagPresent(addr) == false) { localDirectory.allocate(addr); } - localDirectory[addr].Sharers.clear(); - localDirectory[addr].OwnerValid := true; - localDirectory[addr].Owner := exc_id; + DirEntry dir_entry := getDirEntry(addr); + dir_entry.Sharers.clear(); + dir_entry.OwnerValid := true; + dir_entry.Owner := exc_id; } } @@ -339,8 +349,9 @@ machine(L2Cache, "Token protocol") cache_entry.OwnerValid := false; } else { - localDirectory[addr].Sharers.clear(); - localDirectory[addr].OwnerValid := false; + DirEntry dir_entry := getDirEntry(addr); + dir_entry.Sharers.clear(); + dir_entry.OwnerValid := false; } } @@ -350,7 +361,8 @@ machine(L2Cache, "Token protocol") cache_entry.Sharers.remove(sender); } else { - localDirectory[addr].Sharers.remove(sender); + DirEntry dir_entry := getDirEntry(addr); + dir_entry.Sharers.remove(sender); } } @@ -360,7 +372,8 @@ machine(L2Cache, "Token protocol") cache_entry.OwnerValid := false; } else { - localDirectory[addr].OwnerValid := false; + DirEntry dir_entry := getDirEntry(addr); + dir_entry.OwnerValid := false; } } @@ -370,7 +383,8 @@ machine(L2Cache, "Token protocol") return cache_entry.Sharers.isElement(shar_id); } else { - return localDirectory[addr].Sharers.isElement(shar_id); + DirEntry dir_entry := getDirEntry(addr); + return dir_entry.Sharers.isElement(shar_id); } } @@ -380,7 +394,8 @@ machine(L2Cache, "Token protocol") return cache_entry.Sharers; } else { - return localDirectory[addr].Sharers; + DirEntry dir_entry := getDirEntry(addr); + return dir_entry.Sharers; } } @@ -390,7 +405,8 @@ machine(L2Cache, "Token protocol") return cache_entry.Owner; } else { - return localDirectory[addr].Owner; + DirEntry dir_entry := getDirEntry(addr); + return dir_entry.Owner; } } @@ -400,7 +416,8 @@ machine(L2Cache, "Token protocol") return cache_entry.Sharers.count(); } else { - return localDirectory[addr].Sharers.count(); + DirEntry dir_entry := getDirEntry(addr); + return dir_entry.Sharers.count(); } } @@ -410,7 +427,8 @@ machine(L2Cache, "Token protocol") return cache_entry.OwnerValid; } else { - return localDirectory[addr].OwnerValid; + DirEntry dir_entry := getDirEntry(addr); + return dir_entry.OwnerValid; } } @@ -425,11 +443,12 @@ machine(L2Cache, "Token protocol") } } else { - if (localDirectory[addr].Sharers.isElement(requestor)) { - return ( localDirectory[addr].Sharers.count() - 1 ); + DirEntry dir_entry := getDirEntry(addr); + if (dir_entry.Sharers.isElement(requestor)) { + return ( dir_entry.Sharers.count() - 1 ); } else { - return localDirectory[addr].Sharers.count(); + return dir_entry.Sharers.count(); } } } @@ -441,7 +460,8 @@ machine(L2Cache, "Token protocol") } else if (is_valid(cache_entry)) { return cache_entry.CacheState; } else if (isDirTagPresent(addr)) { - return localDirectory[addr].DirState; + DirEntry dir_entry := getDirEntry(addr); + return dir_entry.DirState; } else { return State:NP; } @@ -493,7 +513,8 @@ machine(L2Cache, "Token protocol") } } else if (localDirectory.isTagPresent(addr)) { - localDirectory[addr].DirState := state; + DirEntry dir_entry := getDirEntry(addr); + dir_entry.DirState := state; } } @@ -1254,7 +1275,8 @@ machine(L2Cache, "Token protocol") out_msg.Requestor := in_msg.Requestor; out_msg.RequestorMachine := MachineType:L1Cache; // should randomize this so one node doesn't get abused more than others - out_msg.Destination.add(localDirectory[in_msg.Addr].Sharers.smallestElement(MachineType:L1Cache)); + DirEntry dir_entry := getDirEntry(in_msg.Addr); + out_msg.Destination.add(dir_entry.Sharers.smallestElement(MachineType:L1Cache)); out_msg.MessageSize := MessageSizeType:Forwarded_Control; } } @@ -1267,7 +1289,8 @@ machine(L2Cache, "Token protocol") out_msg.Type := CoherenceRequestType:GETX; out_msg.Requestor := tbe.L1_GetX_ID; out_msg.RequestorMachine := MachineType:L1Cache; - out_msg.Destination.add(localDirectory[address].Owner); + DirEntry dir_entry := getDirEntry(address); + out_msg.Destination.add(dir_entry.Owner); out_msg.MessageSize := MessageSizeType:Forwarded_Control; out_msg.Acks := 1 + tbe.Local_GETX_IntAcks; } diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm index 6542ede49..ad746a275 100644 --- a/src/mem/protocol/MOESI_CMP_token-L2cache.sm +++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm @@ -123,7 +123,7 @@ machine(L2Cache, "Token protocol") DataBlock DataBlk, desc="data for the block"; } - structure(DirEntry, desc="...") { + structure(DirEntry, desc="...", interface="AbstractEntry") { Set Sharers, desc="Set of the internal processors that want the block in shared state"; bool exclusive, default="false", desc="if local exclusive is likely"; } @@ -157,6 +157,10 @@ machine(L2Cache, "Token protocol") return cache_entry; } + DirEntry getDirEntry(Address address), return_by_pointer="yes" { + return localDirectory.lookup(address); + } + void functionalRead(Address addr, Packet *pkt) { testAndRead(addr, getCacheEntry(addr).DataBlk, pkt); } @@ -241,8 +245,9 @@ machine(L2Cache, "Token protocol") void removeSharer(Address addr, NodeID id) { if (localDirectory.isTagPresent(addr)) { - localDirectory[addr].Sharers.remove(id); - if (localDirectory[addr].Sharers.count() == 0) { + DirEntry dir_entry := getDirEntry(addr); + dir_entry.Sharers.remove(id); + if (dir_entry.Sharers.count() == 0) { localDirectory.deallocate(addr); } } @@ -250,7 +255,8 @@ machine(L2Cache, "Token protocol") bool sharersExist(Address addr) { if (localDirectory.isTagPresent(addr)) { - if (localDirectory[addr].Sharers.count() > 0) { + DirEntry dir_entry := getDirEntry(addr); + if (dir_entry.Sharers.count() > 0) { return true; } else { @@ -264,7 +270,8 @@ machine(L2Cache, "Token protocol") bool exclusiveExists(Address addr) { if (localDirectory.isTagPresent(addr)) { - if (localDirectory[addr].exclusive) { + DirEntry dir_entry := getDirEntry(addr); + if (dir_entry.exclusive) { return true; } else { @@ -278,29 +285,33 @@ machine(L2Cache, "Token protocol") // assumes that caller will check to make sure tag is present Set getSharers(Address addr) { - return localDirectory[addr].Sharers; + DirEntry dir_entry := getDirEntry(addr); + return dir_entry.Sharers; } void setNewWriter(Address addr, NodeID id) { if (localDirectory.isTagPresent(addr) == false) { localDirectory.allocate(addr); } - localDirectory[addr].Sharers.clear(); - localDirectory[addr].Sharers.add(id); - localDirectory[addr].exclusive := true; + DirEntry dir_entry := getDirEntry(addr); + dir_entry.Sharers.clear(); + dir_entry.Sharers.add(id); + dir_entry.exclusive := true; } void addNewSharer(Address addr, NodeID id) { if (localDirectory.isTagPresent(addr) == false) { localDirectory.allocate(addr); } - localDirectory[addr].Sharers.add(id); - // localDirectory[addr].exclusive := false; + DirEntry dir_entry := getDirEntry(addr); + dir_entry.Sharers.add(id); + // dir_entry.exclusive := false; } void clearExclusiveBitIfExists(Address addr) { if (localDirectory.isTagPresent(addr)) { - localDirectory[addr].exclusive := false; + DirEntry dir_entry := getDirEntry(addr); + dir_entry.exclusive := false; } } diff --git a/src/mem/ruby/structures/PerfectCacheMemory.hh b/src/mem/ruby/structures/PerfectCacheMemory.hh index 555f24b71..413a0f471 100644 --- a/src/mem/ruby/structures/PerfectCacheMemory.hh +++ b/src/mem/ruby/structures/PerfectCacheMemory.hh @@ -71,8 +71,8 @@ class PerfectCacheMemory Address cacheProbe(const Address& newAddress) const; // looks an address up in the cache - ENTRY& lookup(const Address& address); - const ENTRY& lookup(const Address& address) const; + ENTRY* lookup(const Address& address); + const ENTRY* lookup(const Address& address) const; // Get/Set permission of cache block AccessPermission getPermission(const Address& address) const; @@ -151,18 +151,18 @@ PerfectCacheMemory<ENTRY>::cacheProbe(const Address& newAddress) const // looks an address up in the cache template<class ENTRY> -inline ENTRY& +inline ENTRY* PerfectCacheMemory<ENTRY>::lookup(const Address& address) { - return m_map[line_address(address)].m_entry; + return &m_map[line_address(address)].m_entry; } // looks an address up in the cache template<class ENTRY> -inline const ENTRY& +inline const ENTRY* PerfectCacheMemory<ENTRY>::lookup(const Address& address) const { - return m_map[line_address(address)].m_entry; + return &m_map[line_address(address)].m_entry; } template<class ENTRY> diff --git a/src/mem/slicc/ast/MethodCallExprAST.py b/src/mem/slicc/ast/MethodCallExprAST.py index bf133b374..8be319a40 100644 --- a/src/mem/slicc/ast/MethodCallExprAST.py +++ b/src/mem/slicc/ast/MethodCallExprAST.py @@ -156,9 +156,6 @@ class MemberMethodCallExprAST(MethodCallExprAST): methodId = implementedMethodId return_type = obj_type.methods[methodId].return_type - if return_type.isInterface: - prefix = "static_cast<%s &>" % return_type.c_ident - if str(obj_type) == "AbstractCacheEntry" or \ str(obj_type) == "AbstractEntry" or \ ("interface" in obj_type and ( |